hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7945881497d605a1b1152cefa26257db9ec10f7d | 1,938 | py | Python | tools/scrape_language_dataset.py | kmbrou/text-mining-class | 06160dfed64c5d2485d18c0c9dc719efe74a5521 | [
"MIT"
] | 43 | 2018-10-22T05:14:32.000Z | 2020-11-19T19:05:08.000Z | tools/scrape_language_dataset.py | kmbrou/text-mining-class | 06160dfed64c5d2485d18c0c9dc719efe74a5521 | [
"MIT"
] | 1 | 2018-11-27T18:01:30.000Z | 2018-11-27T18:01:30.000Z | tools/scrape_language_dataset.py | kmbrou/text-mining-class | 06160dfed64c5d2485d18c0c9dc719efe74a5521 | [
"MIT"
] | 25 | 2018-11-27T17:09:19.000Z | 2020-11-19T19:05:29.000Z | from pathlib import Path
from urllib.parse import urlparse
from tmclass_solutions.scraping import SimpleWebScraper
from tmclass_solutions.scraping import WikipediaArticle
EN_BASE_URL = "https://en.wikipedia.org/wiki/"
english_articles = [
"Agriculture", "Architecture", "Art", "Biology", "Business",
"Cinematography", "Culture", "Economy", "Literature", "Music",
"Politics", "Religion", "Sport", "Science", "Technology", "Trade"
]
# Most represented languages for those seed articles in text size
# (number of unicode symbols):
hostnames = [
"fr.wikipedia.org",
"en.wikipedia.org",
"ar.wikipedia.org",
"ru.wikipedia.org",
"uk.wikipedia.org",
"fa.wikipedia.org",
"ca.wikipedia.org",
"sr.wikipedia.org",
"es.wikipedia.org",
"zh.wikipedia.org",
"it.wikipedia.org",
"de.wikipedia.org",
"gl.wikipedia.org",
"pt.wikipedia.org",
"vi.wikipedia.org",
"ta.wikipedia.org",
"ja.wikipedia.org",
"bg.wikipedia.org",
"kn.wikipedia.org",
"azb.wikipedia.or",
"id.wikipedia.org",
"el.wikipedia.org",
"eo.wikipedia.org",
"hy.wikipedia.org",
"hi.wikipedia.org",
"sv.wikipedia.org",
"he.wikipedia.org",
"tr.wikipedia.org",
"th.wikipedia.org",
"bn.wikipedia.org",
]
output_folder = Path("/tmp/wikipedia_scraping")
output_folder.mkdir(exist_ok=True, parents=True)
scraper = SimpleWebScraper(output_folder)
whitelist = set(hostnames)
for article_name in english_articles:
article_url = EN_BASE_URL + article_name
folder = scraper.fetch_and_save(article_url)
print(f"Fetched {folder}")
article = WikipediaArticle((folder / "body").read_bytes())
language_links = article.get_language_links()
for language_link in language_links:
if urlparse(language_link).hostname not in whitelist:
continue
folder = scraper.fetch_and_save(language_link)
print(f"Fetched {folder}")
| 29.363636 | 69 | 0.681115 |
7945881f64d97fb04ff2e532d34808708931b8fa | 14,599 | py | Python | pymap/parsing/primitives.py | chenliangomc/pymap | 42581712631e9e9787e9dd094a22f5cc607f804d | [
"MIT"
] | null | null | null | pymap/parsing/primitives.py | chenliangomc/pymap | 42581712631e9e9787e9dd094a22f5cc607f804d | [
"MIT"
] | null | null | null | pymap/parsing/primitives.py | chenliangomc/pymap | 42581712631e9e9787e9dd094a22f5cc607f804d | [
"MIT"
] | null | null | null | """Primitive parseable objects in the IMAP protocol."""
import re
from abc import abstractmethod, ABCMeta
from collections.abc import Sequence as SequenceABC
from functools import total_ordering
from io import BytesIO
from typing import cast, Type, Tuple, List, Union, Iterable, Sequence, Match, \
Optional, Iterator, SupportsBytes
from . import Parseable, ExpectedParseable, NotParseable, Params
from .exceptions import RequiresContinuation
from ..bytes import rev, MaybeBytes, MaybeBytesT, BytesFormat, WriteStream, \
Writeable
__all__ = ['Nil', 'Number', 'Atom', 'ListP', 'String',
'QuotedString', 'LiteralString']
class Nil(Parseable[None]):
"""Represents a ``NIL`` object from an IMAP stream."""
_nil_pattern = rev.compile(b'^NIL$', re.I)
__slots__ = [] # type: ignore
def __init__(self) -> None:
super().__init__()
@property
def value(self) -> None:
"""Always returns ``None``."""
return None
def __bytes__(self) -> bytes:
return b'NIL'
def __hash__(self) -> int:
return hash(Nil)
def __eq__(self, other) -> bool:
if isinstance(other, Nil):
return True
return super().__eq__(other)
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['Nil', memoryview]:
start = cls._whitespace_length(buf)
match = cls._atom_pattern.match(buf, start)
if not match:
raise NotParseable(buf)
atom = match.group(0)
if not cls._nil_pattern.match(atom):
raise NotParseable(buf)
return cls(), buf[match.end(0):]
@total_ordering
class Number(Parseable[int]):
"""Represents a number object from an IMAP stream.
Args:
num: The integer value.
"""
_num_pattern = rev.compile(br'^\d+$')
__slots__ = ['num', '_raw']
def __init__(self, num: int) -> None:
super().__init__()
self.num = num
self._raw = b'%d' % num
@property
def value(self) -> int:
"""The integer value."""
return self.num
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['Number', memoryview]:
start = cls._whitespace_length(buf)
match = cls._atom_pattern.match(buf, start)
if not match:
raise NotParseable(buf)
atom = match.group(0)
if not cls._num_pattern.match(atom):
raise NotParseable(buf)
return cls(int(match.group(0))), buf[match.end(0):]
def __bytes__(self) -> bytes:
return self._raw
def __hash__(self) -> int:
return hash((Number, self.value))
def __eq__(self, other) -> bool:
if isinstance(other, Number):
return self.value == other.value
elif isinstance(other, int):
return self.value == other
return super().__eq__(other)
def __lt__(self, other) -> bool:
if isinstance(other, Number):
return self.value < other.value
elif isinstance(other, int):
return self.value < other
return NotImplemented
class Atom(Parseable[bytes]):
"""Represents an atom object from an IMAP stream.
Args:
value: The atom bytestring.
"""
__slots__ = ['_value']
def __init__(self, value: bytes) -> None:
super().__init__()
self._value = value
@property
def value(self) -> bytes:
"""The atom bytestring."""
return self._value
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['Atom', memoryview]:
start = cls._whitespace_length(buf)
match = cls._atom_pattern.match(buf, start)
if not match:
raise NotParseable(buf[start:])
atom = match.group(0)
return cls(atom), buf[match.end(0):]
def __bytes__(self) -> bytes:
return self.value
def __hash__(self) -> int:
return hash((Atom, self.value))
def __eq__(self, other) -> bool:
if isinstance(other, Atom):
return self.value == other.value
return super().__eq__(other)
class String(Parseable[bytes], metaclass=ABCMeta):
"""Represents a string object from an IMAP string. This object may not be
instantiated directly, use one of its derivatives instead.
"""
_MAX_LEN = 4096
__slots__ = [] # type: ignore
@property
@abstractmethod
def binary(self) -> bool:
"""True if the string should be transmitted as binary."""
...
@property
@abstractmethod
def length(self) -> int:
"""The length of the string value."""
...
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['String', memoryview]:
try:
return QuotedString.parse(buf, params)
except NotParseable:
pass
return LiteralString.parse(buf, params)
@classmethod
def build(cls, value: object, binary: bool = False,
fallback: object = None) -> Union[Nil, 'String']:
"""Produce either a :class:`QuotedString` or :class:`LiteralString`
based on the contents of ``data``. This is useful to improve
readability of response data.
Args:
value: The string to serialize.
binary: True if the string should be transmitted as binary.
fallback: The default value to use if ``value`` is None.
"""
if value is None:
if fallback is None:
return Nil()
else:
return cls.build(fallback, binary)
elif not value:
return QuotedString(b'')
elif isinstance(value, bytes):
ascii_ = value
elif isinstance(value, memoryview):
ascii_ = bytes(value)
elif hasattr(value, '__bytes__'):
ascii_ = bytes(cast(SupportsBytes, value))
elif isinstance(value, str) or hasattr(value, '__str__'):
value = str(value)
try:
ascii_ = bytes(value, 'ascii')
except UnicodeEncodeError:
ascii_ = bytes(value, 'utf-8', 'replace')
return LiteralString(ascii_, binary)
else:
raise TypeError(value)
if not binary and len(ascii_) < 64 \
and b'\n' not in ascii_ \
and b'\x00' not in ascii_:
return QuotedString(ascii_)
else:
return LiteralString(ascii_, binary)
def __bytes__(self) -> bytes:
raise NotImplementedError
def __hash__(self) -> int:
return hash((String, self.value))
def __eq__(self, other) -> bool:
if isinstance(other, String):
return self.value == other.value
return super().__eq__(other)
class QuotedString(String):
"""Represents a string object from an IMAP stream that was encased in
double-quotes.
Args:
string: The string value.
"""
_quoted_pattern = rev.compile(br'(?:\r|\n|\\.|\")')
_quoted_specials_pattern = rev.compile(br'[\"\\]')
__slots__ = ['_string', '_raw']
def __init__(self, string: bytes, raw: bytes = None) -> None:
super().__init__()
self._string = string
self._raw = raw
@property
def value(self) -> bytes:
"""The string value."""
return self._string
@property
def binary(self) -> bool:
return False
@property
def length(self) -> int:
return len(self._string)
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['QuotedString', memoryview]:
start = cls._whitespace_length(buf)
if buf[start:start + 1] != b'"':
raise NotParseable(buf)
marker = start + 1
unquoted = bytearray()
for match in cls._quoted_pattern.finditer(buf, marker):
unquoted += buf[marker:match.start(0)]
match_group = match.group(0)
if match_group in (b'\r', b'\n'):
raise NotParseable(buf)
elif match_group.startswith(b'\\'):
escape_char = match_group[-1:]
if escape_char in (b'\\', b'"'):
unquoted += escape_char
else:
raise NotParseable(buf)
marker = match.end(0)
else:
end = match.end(0)
quoted = buf[start:end + 1]
return cls(bytes(unquoted), bytes(quoted)), buf[end:]
raise NotParseable(buf)
@classmethod
def _escape_quoted_specials(cls, match: Match) -> bytes:
return b'\\' + match.group(0)
def __bytes__(self) -> bytes:
if self._raw is not None:
return bytes(self._raw)
pat = self._quoted_specials_pattern
quoted_string = pat.sub(self._escape_quoted_specials, self.value)
self._raw = BytesFormat(b'"%b"') % (quoted_string, )
return self._raw
class LiteralString(String):
"""Represents a string object from an IMAP stream that used the literal
syntax.
Args:
string: The string value parts.
"""
_literal_pattern = rev.compile(br'(~?){(\d+)(\+?)}\r?\n')
__slots__ = ['_string', '_length', '_binary', '_raw']
def __init__(self, string: Union[bytes, Writeable],
binary: bool = False) -> None:
super().__init__()
self._string = string
self._length = len(string)
self._binary = binary
self._raw: Optional[bytes] = None
@property
def value(self) -> bytes:
return bytes(self._string)
@property
def binary(self) -> bool:
return self._binary
@property
def length(self) -> int:
return self._length
@property
def _prefix(self) -> bytes:
binary_prefix = b'~' if self.binary else b''
return b'%b{%d}\r\n' % (binary_prefix, self.length)
@classmethod
def _check_too_big(cls, params: Params, length: int) -> bool:
if params.command_name == b'APPEND':
max_len = params.max_append_len
else:
max_len = cls._MAX_LEN
return max_len is not None and length > max_len
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['LiteralString', memoryview]:
start = cls._whitespace_length(buf)
match = cls._literal_pattern.match(buf, start)
if not match:
raise NotParseable(buf)
binary = match.group(1) == b'~'
literal_length = int(match.group(2))
if cls._check_too_big(params, literal_length):
raise NotParseable(buf, b'TOOBIG')
elif match.group(3) == b'+':
buf = buf[match.end(0):]
literal = bytes(buf[0:literal_length])
elif len(buf) > match.end(0):
raise NotParseable(buf[match.end(0):])
elif params.continuations:
buf = params.continuations.pop(0)
literal = bytes(buf[0:literal_length])
else:
raise RequiresContinuation(b'Literal string', literal_length)
if len(literal) != literal_length:
raise NotParseable(buf)
return cls(literal, binary), buf[literal_length:]
def write(self, writer: WriteStream) -> None:
writer.write(self._prefix)
if isinstance(self._string, Writeable):
self._string.write(writer)
else:
writer.write(self._string)
def __len__(self) -> int:
return len(self._prefix) + self.length
def __bytes__(self) -> bytes:
if self._raw is None:
out = BytesIO()
self.write(out)
self._raw = out.getvalue()
return self._raw
class ListP(Parseable[Sequence[MaybeBytes]]):
"""Represents a list of :class:`Parseable` objects from an IMAP stream.
Args:
items: The list of parsed objects.
sort: If True, the list of items is sorted.
"""
_end_pattern = rev.compile(br' *\)')
__slots__ = ['items']
def __init__(self, items: Iterable[MaybeBytes],
sort: bool = False) -> None:
super().__init__()
if sort:
items_list = sorted(items)
else:
items_list = list(items)
self.items: Sequence[MaybeBytes] = items_list
@property
def value(self) -> Sequence[MaybeBytes]:
"""The list of parsed objects."""
return self.items
def get_as(self, cls: Type[MaybeBytesT]) -> Sequence[MaybeBytesT]:
"""Return the list of parsed objects."""
_ = cls # noqa
return cast(Sequence[MaybeBytesT], self.items)
def __iter__(self) -> Iterator[MaybeBytes]:
return iter(self.value)
def __len__(self) -> int:
return len(self.value)
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['ListP', memoryview]:
start = cls._whitespace_length(buf)
if buf[start:start + 1] != b'(':
raise NotParseable(buf)
items: List[Parseable] = []
buf = buf[start + 1:]
while True:
match = cls._end_pattern.match(buf)
if match:
return cls(items), buf[match.end(0):]
elif items and not cls._whitespace_length(buf):
raise NotParseable(buf)
params_copy = params.copy(expected=params.list_expected)
item, buf = ExpectedParseable.parse(buf, params_copy)
items.append(item)
def write(self, writer: WriteStream) -> None:
writer.write(b'(')
is_first = True
for i, item in enumerate(self.items):
if is_first:
is_first = False
else:
writer.write(b' ')
if isinstance(item, Writeable):
item.write(writer)
else:
writer.write(bytes(item))
writer.write(b')')
def __bytes__(self) -> bytes:
raw_items = BytesFormat(b' ').join(self.items)
return b'(%b)' % raw_items
def __hash__(self) -> int:
return hash((ListP, self.value))
def __eq__(self, other) -> bool:
if isinstance(other, ListP):
return self.__eq__(other.value)
elif isinstance(other, SequenceABC):
if len(self.value) != len(other):
return False
for i, val in enumerate(self.value):
if val != other[i]:
return False
return True
return super().__eq__(other)
| 29.854806 | 79 | 0.574971 |
794588dcebfe0b2347712ada878cbfb377a233d3 | 16,969 | py | Python | test/dialect/mssql/test_reflection.py | AllanDaemon/sqlalchemy | e57d63ab96bccac77c549771ab60fecd6d1bb770 | [
"MIT"
] | 2 | 2020-02-19T17:50:50.000Z | 2021-02-10T02:52:41.000Z | test/dialect/mssql/test_reflection.py | AllanDaemon/sqlalchemy | e57d63ab96bccac77c549771ab60fecd6d1bb770 | [
"MIT"
] | null | null | null | test/dialect/mssql/test_reflection.py | AllanDaemon/sqlalchemy | e57d63ab96bccac77c549771ab60fecd6d1bb770 | [
"MIT"
] | 1 | 2021-06-13T01:55:35.000Z | 2021-06-13T01:55:35.000Z | # -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import util
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload=True, autoload_with=testing.db
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload=True,
autoload_with=testing.db,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def _test_specific_type(self, type_obj, ddl):
metadata = self.metadata
table = Table("type_test", metadata, Column("col1", type_obj))
table.create()
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=testing.db)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_xml_type(self):
self._test_specific_type(mssql.XML, "XML")
def test_image_type(self):
self._test_specific_type(mssql.IMAGE, "IMAGE")
def test_money_type(self):
self._test_specific_type(mssql.MONEY, "MONEY")
def test_numeric_prec_scale(self):
self._test_specific_type(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)")
def test_float(self):
self._test_specific_type(mssql.FLOAT, "FLOAT(53)")
def test_real(self):
self._test_specific_type(mssql.REAL, "REAL")
def test_float_as_real(self):
# FLOAT(5) comes back as REAL
self._test_specific_type(mssql.FLOAT(5), "REAL")
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table("identity_test", meta2, autoload=True)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], 2)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_increment"], 3)
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute(
"""
create table foo (id integer primary key, data xml)
"""
)
with mock.patch.object(
testing.db.dialect, "ischema_names", {"int": mssql.INTEGER}
):
t1 = Table("foo", metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_cross_schema_fk_pk_name_overlaps(self):
# test for issue #4228
metadata = self.metadata
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all()
insp = inspect(testing.db)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
}
],
)
@testing.provide_metadata
def test_table_name_that_is_greater_than_16_chars(self):
metadata = self.metadata
Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Index("foo_idx", "foo"),
)
metadata.create_all()
t = Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ", MetaData(), autoload_with=testing.db
)
eq_(t.name, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
}
],
)
assert inspect(testing.db).has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload=True,
autoload_with=testing.db,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
@testing.provide_metadata
def test_max_ident_in_varchar_not_present(self):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all()
for col in inspect(testing.db).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=testing.db.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table(
"base_table",
self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(self.col_num)
]
)
self.view_str = view_str = (
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table"
% (
",".join(
"long_named_column_number_%d" % i
for i in range(self.col_num)
)
)
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
class OwnerPlusDBTest(fixtures.TestBase):
def test_default_schema_name_not_interpreted_as_tokenized(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2014_VERSION
mock_connection = mock.Mock(scalar=lambda sql: "Jonah.The.Whale")
schema_name = dialect._get_default_schema_name(mock_connection)
eq_(schema_name, "Jonah.The.Whale")
eq_(
base._owner_plus_db(dialect, schema_name),
(None, "Jonah.The.Whale"),
)
def test_owner_database_pairs_dont_use_for_same_db(self):
dialect = mssql.dialect()
identifier = "my_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect, scalar=mock.Mock(return_value="my_db")
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(mock_connection.mock_calls, [mock.call.scalar("select db_name()")])
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs_switch_for_different_db(self):
dialect = mssql.dialect()
identifier = "my_other_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect, scalar=mock.Mock(return_value="my_db")
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[
mock.call.scalar("select db_name()"),
mock.call.execute("use my_other_db"),
mock.call.execute("use my_db"),
],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs(self):
dialect = mssql.dialect()
for identifier, expected_schema, expected_owner, use_stmt in [
("foo", None, "foo", "use foo"),
("foo.bar", "foo", "bar", "use foo"),
("Foo.Bar", "Foo", "Bar", "use [Foo]"),
("[Foo.Bar]", None, "Foo.Bar", "use [Foo].[Bar]"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat", "use [Foo].[Bar]"),
(
"[foo].]do something; select [foo",
"foo",
"do something; select foo",
"use foo",
),
(
"something; select [foo].bar",
"something; select foo",
"bar",
"use [something; select foo]",
),
]:
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
mock_connection = mock.Mock(
dialect=dialect,
scalar=mock.Mock(return_value="Some ] Database"),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
if schema is None:
eq_(mock_connection.mock_calls, [])
else:
eq_(
mock_connection.mock_calls,
[
mock.call.scalar("select db_name()"),
mock.call.execute(use_stmt),
mock.call.execute("use [Some Database]"),
],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
| 32.885659 | 79 | 0.568802 |
79458a9d16b03a79afefafd688813cec460082f1 | 15,431 | py | Python | test/unit/test_multipart.py | qupengcheng/py-udfs-api | 182de801c0ca52e5960019cd0cbfef10d5a386f0 | [
"MIT"
] | 1 | 2018-11-12T02:59:41.000Z | 2018-11-12T02:59:41.000Z | test/unit/test_multipart.py | qupengcheng/py-udfs-api | 182de801c0ca52e5960019cd0cbfef10d5a386f0 | [
"MIT"
] | null | null | null | test/unit/test_multipart.py | qupengcheng/py-udfs-api | 182de801c0ca52e5960019cd0cbfef10d5a386f0 | [
"MIT"
] | null | null | null | """Test the file multipart.py
Classes:
TestContentHelpers -- test the three content-header helper functions
TestBodyGenerator -- test the BodyGenerator helper class
TestBufferedGenerator -- test the BufferedGenerator helper class
TestFileStream -- test the FileStream generator class
TestDirectoryStream -- test the DirectoryStream generator class
TestTextStream -- test the TextStream generator class
TestStreamHelpers -- unimplemented
"""
import unittest
import os
import re
import pytest
import six
import udfsapi.multipart
class TestContentHelpers(unittest.TestCase):
"""Tests the functionality of the three content-oriented helper functions.
Public methods:
test_content_disposition -- check the content_disposition defaults
test_content_disposition_with_type -- check that content_disposition
handles given disposition type
test_content_type -- check the content_type guessing functionality
test_multipart_content_type -- check multipart_content_type functionality
"""
def test_content_disposition(self):
"""Check that content_disposition defaults properly"""
expected = {'Content-Disposition': 'file; filename="example.txt"'}
actual = udfsapi.multipart.content_disposition('example.txt')
assert expected == actual
def test_content_disposition_with_type(self):
"""Check that content_disposition handles given disposition type"""
expected = {'Content-Disposition':
'attachment; filename="example.txt"'}
actual = udfsapi.multipart.content_disposition('example.txt',
'attachment')
assert expected == actual
def test_content_type(self):
"""Check the content_type guessing functionality."""
actual = udfsapi.multipart.content_type('example.txt')
expected = {'Content-Type': 'text/plain'}
assert expected == actual
actual = udfsapi.multipart.content_type('example.jpeg')
expected = {'Content-Type': 'image/jpeg'}
assert expected == actual
actual = udfsapi.multipart.content_type('example')
expected = {'Content-Type': 'application/octet-stream'}
assert expected == actual
def test_multipart_content_type(self):
"""Check test_multipart_content_type functionality."""
actual = udfsapi.multipart.multipart_content_type(
'8K5rNKlLQVyreRNncxOTeg')
expected = {'Content-Type':
'multipart/mixed; boundary="8K5rNKlLQVyreRNncxOTeg"'}
assert expected == actual
actual = udfsapi.multipart.multipart_content_type(
'8K5rNKlLQVyreRNncxOTeg', 'alt')
expected = {'Content-Type':
'multipart/alt; boundary="8K5rNKlLQVyreRNncxOTeg"'}
assert expected == actual
class TestBodyGenerator(unittest.TestCase):
"""Tests the functionality of the BodyGenerator class.
Public methods:
test_init_defaults -- tests the constructor and its behavior with only the
required argument
test_init_with_all -- tests the constructor when all arguments are set
explicitly
test_write_headers -- tests write_headers function against example output
test_open -- tests open function against example output
test_file_open -- test file_open function against example output
test_file_close -- test file_close function against example output
test_close -- test close function against example output
"""
def test_init_defaults(self):
"""Test the __init__ function for default parameter values."""
name = "test_name"
expected_disposition = 'file; filename="test_name"'
expected_type = 'multipart/mixed; boundary="\S*"'
expected_boundary_pattern = '\S*'
generator = udfsapi.multipart.BodyGenerator(name)
assert generator.headers['Content-Disposition'] == expected_disposition
assert re.search(expected_type, generator.headers['Content-Type'])
assert re.search(expected_boundary_pattern, generator.boundary)
def test_init_with_all(self):
"""Test the __init__ function for explicitly set parameter values."""
name = "test_name"
disptype = "test_disp"
subtype = "test_subtype"
boundary = "test_boundary"
generator = udfsapi.multipart.BodyGenerator(name, disptype,
subtype, boundary)
assert generator.headers == {
'Content-Disposition': 'test_disp; filename="test_name"',
'Content-Type':
'multipart/test_subtype; boundary="test_boundary"'}
assert generator.boundary == boundary
def test_write_headers(self):
"""Test the write_headers function against sample output."""
expected = 'Content-Disposition: test_disp; filename="test_name"' \
+ '\r\nContent-Type: multipart/test_subtype; ' \
+ 'boundary="test_boundary"\r\n\r\n'
name = "test_name"
disptype = "test_disp"
subtype = "test_subtype"
boundary = "test_boundary"
generator = udfsapi.multipart.BodyGenerator(name, disptype,
subtype, boundary)
headers = ""
for chunk in generator.write_headers():
if type(chunk) is not str:
chunk = chunk.decode()
headers += chunk
assert headers == expected
def test_open(self):
"""Test the open function against sample output."""
expected = '--test_boundary\r\n'
name = "test_name"
disptype = "test_disp"
subtype = "test_subtype"
boundary = "test_boundary"
generator = udfsapi.multipart.BodyGenerator(name, disptype,
subtype, boundary)
headers = ""
for chunk in generator.open():
if type(chunk) is not str:
chunk = chunk.decode()
headers += chunk
assert headers == expected
def test_file_open(self):
"""Test the file_open function against sample output."""
expected = '--test_boundary\r\nContent-Disposition: file; '\
+ 'filename="test_name"\r\nContent-Type: '\
+ 'application/octet-stream\r\n\r\n'
name = "test_name"
disptype = "test_disp"
subtype = "test_subtype"
boundary = "test_boundary"
generator = udfsapi.multipart.BodyGenerator(name, disptype,
subtype, boundary)
headers = ""
for chunk in generator.file_open(name):
if type(chunk) is not str:
chunk = chunk.decode()
headers += chunk
assert headers == expected
def test_file_close(self):
"""Test the file_close function against sample output."""
expected = '\r\n'
name = "test_name"
disptype = "test_disp"
subtype = "test_subtype"
boundary = "test_boundary"
generator = udfsapi.multipart.BodyGenerator(name, disptype,
subtype, boundary)
headers = ""
for chunk in generator.file_close():
if type(chunk) is not str:
chunk = chunk.decode()
headers += chunk
assert headers == expected
def test_close(self):
"""Test the close function against sample output."""
expected = '--test_boundary--\r\n'
name = "test_name"
disptype = "test_disp"
subtype = "test_subtype"
boundary = "test_boundary"
generator = udfsapi.multipart.BodyGenerator(name, disptype,
subtype, boundary)
headers = ""
for chunk in generator.close():
if type(chunk) is not str:
chunk = chunk.decode()
headers += chunk
assert headers == expected
def _generate_test_chunks(chunk_size, interations):
"""Generates strings of chunk_size length until out of iterations."""
for i in range(interations):
output = b""
for j in range(chunk_size):
output += b"z"
yield output
class TestBufferedGenerator(unittest.TestCase):
"""Test the BufferedGenerator class.
Public methods:
test_init -- test the default arguments of the constructor
test_file_chunks -- test the file_chunks function against example output
test_gen_chunks -- test the gen_chunks function against example output
test_body -- verify that body is unimplemented
test_close -- test the close function against example output
"""
def test_init(self):
"""Test the __init__ function for default parameter values."""
name = "test_name"
instance = udfsapi.multipart.BufferedGenerator(name)
assert instance.name == name
def test_file_chunks(self):
"""Test the file_chunks function against example output.
Warning: This test depends on the contents of
test/functional/fake_dir/fsdfgh
Changing that file could break the test.
"""
name = "fsdfgh"
chunk_size = 2
path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"functional", "fake_dir", "fsdfgh")
instance = udfsapi.multipart.BufferedGenerator(name, chunk_size)
expected = 'dsadsad\n'
output = ""
open_file = open(path)
for emitted in instance.file_chunks(open_file):
if type(emitted) is not str:
emitted = emitted.decode()
assert len(emitted) <= chunk_size
output += emitted
open_file.close()
assert output == expected
def test_gen_chunks(self):
"""Test the gen_chunks function against example output."""
name = "fsdfgh"
chunk_size = 2
instance = udfsapi.multipart.BufferedGenerator(name, chunk_size)
for i in instance.gen_chunks(_generate_test_chunks(5, 5)):
assert len(i) <= chunk_size
def test_body(self):
"""Ensure that body throws a NotImplemented exception."""
instance = udfsapi.multipart.BufferedGenerator("name")
with pytest.raises(NotImplementedError):
instance.body()
def test_close(self):
"""Test the close function against example output."""
name = "fsdfgh"
chunk_size = 2
instance = udfsapi.multipart.BufferedGenerator(name, chunk_size)
expected = '--\S+--\r\n'
actual = ''
for i in instance.close():
if type(i) is not str and type(i) is not memoryview:
i = i.decode()
elif six.PY3 and type(i) is memoryview:
i = i.tobytes().decode()
assert len(i) <= chunk_size
actual += i
assert re.search(expected, actual)
class TestFileStream(unittest.TestCase):
"""Test the FileStream class
Public methods:
test_body -- check file stream body for proper structure
"""
def test_body(self):
"""Test the body function against expected output.
Warning: This test depends on the contents of
test/functional/fake_dir
Changing that directory or its contents could break the test.
"""
# Get OS-agnostic path to test files
path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"functional", "fake_dir")
# Collect absolute paths to all test files
filenames_list = []
for (dirpath, _, filenames) in os.walk(path):
temp_list = [os.path.join(dirpath, name) for name in filenames]
filenames_list.extend(temp_list)
# Convert absolute paths to relative
relative_paths_list = [os.path.relpath(cur_path, os.getcwd())
for cur_path in filenames_list]
instance = udfsapi.multipart.FileStream(relative_paths_list)
expected = "(--\S+\r\nContent-Disposition: file; filename=\"\S+\""\
+ "\r\nContent-Type: application/\S+\r\n"\
+ "\r\n(.|\n)*\r\n)+--\S+--\r\n"
actual = ""
for i in instance.body():
if type(i) is not str and type(i) is not memoryview:
i = i.decode()
elif six.PY3 and type(i) is memoryview:
i = i.tobytes().decode()
actual += i
assert re.search(expected, actual)
class TestDirectoryStream(unittest.TestCase):
"""Test the DirectoryStream class.
Public methods:
test_body -- check that the HTTP body for the directory is correct
test_body_recursive -- check body structure when recursive directory
is uploaded
"""
def test_body(self):
"""Check the multipart HTTP body for the streamed directory."""
# Get OS-agnostic path to test files
path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"functional", "fake_dir")
instance = udfsapi.multipart.DirectoryStream(path)
expected = b"^(--\S+\r\nContent-Disposition: form-data; name=\"\S+\"; filename=\"\S+\""\
+ b"\r\nContent-Type: application/\S+\r\n\r\n(.|\n)*"\
+ b"\r\n)+--\S+--\r\n$"
actual = instance.body()
"""
for i in instance.body():
if type(i) is not str and type(i) is not memoryview:
i = i.decode()
elif six.PY3 and type(i) is memoryview:
i = i.tobytes().decode()
actual += i
"""
assert re.search(expected, actual)
class TestTextStream(unittest.TestCase):
"""Test the TextStream class.
Public methods:
test_body -- check that the HTTP body for the text is correct
"""
def test_body(self):
"""Check the multipart HTTP body for the streamed directory."""
# Get OS-agnostic path to test files
text = "Here is some text for this test."
instance = udfsapi.multipart.BytesStream(text)
expected = "(--\S+\r\nContent-Disposition: file; filename=\"\S+\""\
+ "\r\nContent-Type: application/\S+\r\n"\
+ "\r\n(.|\n)*\r\n)+--\S+--\r\n"
actual = ""
for i in instance.body():
if type(i) is not str and type(i) is not memoryview:
i = i.decode()
elif six.PY3 and type(i) is memoryview:
i = i.tobytes().decode()
actual += i
assert re.search(expected, actual)
class TestStreamHelpers(unittest.TestCase):
"""Test stream_files, stream_directory, and stream_text.
TODO: These functions are just wrappers around other,
already-tested functions. Maybe they should be tested,
but it is unclear how.
Public Methods:
test_stream_files -- unimplemented
test_stream_directory -- unimplemented
test_stream_text -- unimplemented
"""
def test_stream_files(self):
"""Test the stream_files function."""
pass
def test_stream_directory(self):
"""Test the stream_directory function."""
pass
def test_stream_text(self):
"""Test the stream_text function."""
pass
| 38.290323 | 96 | 0.609747 |
79458b4563ae8c6b9038a7848e6cb82e364615b4 | 450 | py | Python | wazimap_ng/datasets/migrations/0003_profile_indicators.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 11 | 2019-12-31T20:27:22.000Z | 2022-03-10T03:55:38.000Z | wazimap_ng/datasets/migrations/0003_profile_indicators.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 164 | 2020-02-06T15:02:22.000Z | 2022-03-30T22:42:00.000Z | wazimap_ng/datasets/migrations/0003_profile_indicators.py | arghyaiitb/wazimap-ng | 2a77860526d865b8fd0c22a2204f121fdb3b28a0 | [
"Apache-2.0"
] | 16 | 2020-01-03T20:30:24.000Z | 2022-01-11T11:05:15.000Z | # Generated by Django 2.2.9 on 2019-12-20 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasets', '0002_remove_profile_indicators'),
]
operations = [
migrations.AddField(
model_name='profile',
name='indicators',
field=models.ManyToManyField(through='datasets.ProfileIndicator', to='datasets.Indicator'),
),
]
| 23.684211 | 103 | 0.635556 |
79458b51a643895734aecc3010f2d61b0961da6b | 100 | py | Python | macropodus/network/service/__init__.py | leileixiao/Macropodus | 9de38c06d332bd26e704fd4afd8f44678de7f44f | [
"MIT"
] | 485 | 2019-12-31T16:53:28.000Z | 2022-03-31T08:01:30.000Z | macropodus/network/service/__init__.py | Pull-Qutter/Macropodus | eafe3c00e1e4286ee672ccfada54122e9f86b534 | [
"MIT"
] | 14 | 2020-03-07T04:17:47.000Z | 2022-03-14T01:08:23.000Z | macropodus/network/service/__init__.py | Pull-Qutter/Macropodus | eafe3c00e1e4286ee672ccfada54122e9f86b534 | [
"MIT"
] | 85 | 2020-01-16T05:03:07.000Z | 2022-03-03T11:42:07.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/1/16 22:01
# @author : Mo
# @function: | 20 | 28 | 0.53 |
79458bc0f3191cfa698c20f04e99c35b68e8d903 | 2,314 | py | Python | tests/test_regressor.py | UBC-MDS/simplefit | ba022b38a4479efe11261a292bacfa4bf441a5fa | [
"MIT"
] | null | null | null | tests/test_regressor.py | UBC-MDS/simplefit | ba022b38a4479efe11261a292bacfa4bf441a5fa | [
"MIT"
] | 41 | 2022-01-13T05:36:48.000Z | 2022-03-06T08:56:12.000Z | tests/test_regressor.py | UBC-MDS/simplefit | ba022b38a4479efe11261a292bacfa4bf441a5fa | [
"MIT"
] | 3 | 2022-01-13T05:19:09.000Z | 2022-03-07T03:20:38.000Z | from simplefit.regressor import regressor
import pandas as pd
from sklearn.model_selection import (train_test_split,)
import pytest
def test_regressor():
"""Test regrssor function outputs with SpotifyFeatures.csv file."""
spotify_df = pd.read_csv("tests/data/SpotifyFeatures.csv")
train_df, test_df = train_test_split(spotify_df, test_size=0.97, random_state=123)
regressor_df = regressor(train_df, target_col="popularity",
numeric_feats=['acousticness', 'danceability', 'duration_ms'],
categorical_feats=['genre'])
actual_test_score = regressor_df.loc["test_score"].tolist()
actual_test_score = [round(num, 2) for num in actual_test_score]
expected_test_score = [-0.00, 0.73, 0.73, 0.73]
actual_train_score = regressor_df.loc["train_score"].tolist()
actual_train_score = [round(num, 2) for num in actual_train_score]
expected_train_score = [0.0, 0.73, 0.73, 0.73]
assert actual_test_score == expected_test_score, "regressor modeled incorrectly test scores are not equal to what they should be!"
assert actual_train_score == expected_train_score, "regressor modeled incorrectly train scores are not equal to what they should be!"
def test_regressor_error() :
"""
Test edges cases
4 tests in total.
"""
spotify_df = pd.read_csv("tests/data/SpotifyFeatures.csv")
with pytest.raises(TypeError) as e:
regressor(1, target_col = "popularity", numeric_feats=['acousticness'])
assert str(e.value) == "train_df must be a pandas dataframe. Please pass a pd.core.frame.DataFrame train_df."
with pytest.raises(TypeError) as e:
regressor(spotify_df, target_col = 1, numeric_feats=['acousticness'])
assert str(e.value) == "target_col must be a str. Please pass target column in str object."
with pytest.raises(TypeError) as e:
regressor(spotify_df, target_col = "popularity", numeric_feats=1)
assert str(e.value) == "numeric_feats must be a list. Please pass a list of numeric columns."
with pytest.raises(TypeError) as e:
regressor(spotify_df, target_col = "popularity", numeric_feats=['acousticness'],categorical_feats=1)
assert str(e.value) == "categorical_feats must be a list. Please pass a list of categorical columns."
| 49.234043 | 137 | 0.71478 |
79458c4f5d32e13d722ca7677cfec3226cf8ded5 | 1,944 | py | Python | tools/validation_solo.py | WanxinT/Balanced-RODNet | f6c9c5b4696b697254698cce65a97ec2d92c7a3c | [
"MIT"
] | null | null | null | tools/validation_solo.py | WanxinT/Balanced-RODNet | f6c9c5b4696b697254698cce65a97ec2d92c7a3c | [
"MIT"
] | null | null | null | tools/validation_solo.py | WanxinT/Balanced-RODNet | f6c9c5b4696b697254698cce65a97ec2d92c7a3c | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
@author:Zehui Yu
@file: validation_rod2021.py
@time: 2021/01/31
"""
import sys
import os
from cruw import CRUW
from cruw.eval import evaluate_rod2021, evaluate_rod2021_APAR
import argparse
"python tools/validation_rod2021.py --config configs/my_config_rodnet_hg1_win16_lovasz_bs16_lr1e5_2020_2_11.py " \
" --checkpoint_name rodnet-hg1-win16-wobg-lovasz_bs16_lr1e5_2020_2_11-20210211-103511"
def parse_args():
parser = argparse.ArgumentParser(description='Test RODNet.')
parser.add_argument('--config', type=str, help='choose rodnet model configurations')
parser.add_argument('--checkpoint_name', type=str, default='./data/', help='directory to the prepared data')
args = parser.parse_args()
return args
def eval_rod2021_batch(config_file, checkpoint_name):
data_root = "/nfs/volume-95-8/ROD_Challenge/src_dataset"
dataset = CRUW(data_root=data_root, sensor_config_name='sensor_config_rod2021')
submit_dir = '/nfs/volume-95-8/tianwanxin/RODNet/valid_results/%s' % checkpoint_name
truth_dir = '/nfs/volume-95-8/aocheng/RODNeto/data/scene_ave/for_valid'
AP, AR = evaluate_rod2021_APAR(submit_dir, truth_dir, dataset)
print('AP: %.4f, AR: %.4f' % (AP, AR))
with open('/nfs/volume-95-8/tianwanxin/RODNet/valid_res/%s/valid_res.txt' % checkpoint_name, 'a') as f:
f.write('AP: %.4f, AR: %.4f\n' % (AP, AR))
if __name__ == '__main__':
# data_root = "/nfs/volume-95-8/ROD_Challenge/src_dataset"
# dataset = CRUW(data_root=data_root, sensor_config_name='sensor_config_rod2021')
# submit_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/tools/valid_results/rodnet-hg1-win16-wobg-20210206-124028'
# truth_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/for_validation/gt_zixiang_split'
# ap, ar = evaluate_rod2021_APAR(submit_dir, truth_dir, dataset)
# print(ap, ar)
args = parse_args()
eval_rod2021_batch(args.config, args.checkpoint_name)
| 43.2 | 116 | 0.739198 |
79458d0e4a827f83588cead3bb0c7b2ab7a52ae4 | 4,521 | py | Python | workspace/riva_quickstart_v1.8.0-beta/examples/talk_stream.py | shahin-trunk/NeMo | a10ac29a6deb05bcfc672ad287f4a8279c1f9289 | [
"Apache-2.0"
] | null | null | null | workspace/riva_quickstart_v1.8.0-beta/examples/talk_stream.py | shahin-trunk/NeMo | a10ac29a6deb05bcfc672ad287f4a8279c1f9289 | [
"Apache-2.0"
] | null | null | null | workspace/riva_quickstart_v1.8.0-beta/examples/talk_stream.py | shahin-trunk/NeMo | a10ac29a6deb05bcfc672ad287f4a8279c1f9289 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import argparse
import sys
import time
import wave
import grpc
import numpy as np
import pyaudio
import riva_api.riva_audio_pb2 as ra
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
def get_args():
parser = argparse.ArgumentParser(description="Streaming transcription via Riva AI Services")
parser.add_argument("--server", default="77.242.240.151:5001", type=str, help="URI to GRPC server endpoint")
parser.add_argument("--voice", type=str, help="voice name to use", default="ljspeech")
parser.add_argument("-o", "--output", default=None, type=str, help="Output file to write last utterance")
parser.add_argument("--list-devices", action="store_true", help="list output devices indices")
parser.add_argument("--output-device", type=int, help="Output device to use")
return parser.parse_args()
def main():
args = get_args()
channel = grpc.insecure_channel(args.server)
tts_client = rtts_srv.RivaSpeechSynthesisStub(channel)
audio_handle = pyaudio.PyAudio()
if args.list_devices:
for i in range(audio_handle.get_device_count()):
info = audio_handle.get_device_info_by_index(i)
if info['maxOutputChannels'] < 1:
continue
print(f"{info['index']}: {info['name']}")
sys.exit(0)
print("Connecting...")
# print("Example query:")
# print(
# " Hello, My name is Linda"
# + ", and I am demonstrating streaming speech synthesis with Riva {@EY2}.I. services, running on NVIDIA {@JH}{@IY1}_{@P}{@IY}_{@Y}{@UW0}s."
# )
req = rtts.SynthesizeSpeechRequest()
req.text = "Hello"
req.language_code = "en-US"
req.encoding = ra.AudioEncoding.LINEAR_PCM
req.sample_rate_hz = 22050
req.voice_name = args.voice
stream = audio_handle.open(
format=pyaudio.paFloat32, output_device_index=args.output_device, channels=1, rate=22050, output=True
)
while True:
print("Speak: ", end='')
req.text = str(input())
if args.output:
wav = wave.open(args.output, 'wb')
wav.setnchannels(1)
wav.setsampwidth(2)
wav.setframerate(req.sample_rate_hz)
print("Generating audio for request...")
print(f" > '{req.text}': ", end='')
start = time.time()
responses = tts_client.SynthesizeOnline(req)
stop = time.time()
first = True
for resp in responses:
stop = time.time()
if first:
print(f"Time to first audio: {(stop-start):.3f}s")
first = False
stream.write(resp.audio)
if args.output:
f32_output = (np.frombuffer(resp.audio, dtype=np.float32) * 32767).astype(np.int16)
wav.writeframesraw(f32_output)
if args.output:
wav.close()
stream.stop_stream()
stream.close()
if __name__ == '__main__':
main()
| 39.657895 | 148 | 0.680602 |
79458dbaac4f101bebff22a5151fa53e54f6d294 | 25,985 | py | Python | qa/rpc-tests/test_framework/script.py | koba24/tcoin | 04b9caaca587fa1bc928c81940d7ba3d2754083b | [
"MIT"
] | 2 | 2018-06-24T19:51:25.000Z | 2019-06-11T14:00:16.000Z | qa/rpc-tests/test_framework/script.py | koba24/tcoin | 04b9caaca587fa1bc928c81940d7ba3d2754083b | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/script.py | koba24/tcoin | 04b9caaca587fa1bc928c81940d7ba3d2754083b | [
"MIT"
] | 2 | 2018-09-13T22:54:32.000Z | 2019-02-20T02:04:25.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Tcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# script.py
#
# This file is modified from python-tcoinlib.
#
"""Scripts
Functionality to build scripts, as well as SignatureHash().
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY,
'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| 27.410338 | 146 | 0.612007 |
79458e476952c9ebedc6d8ed82636f69567cc2fe | 1,218 | py | Python | app/cers/migrations/0001_initial.py | greenlaver/okulab-cers | 47b432563180ef605af89fa17d8dfcf73696e58d | [
"MIT"
] | null | null | null | app/cers/migrations/0001_initial.py | greenlaver/okulab-cers | 47b432563180ef605af89fa17d8dfcf73696e58d | [
"MIT"
] | null | null | null | app/cers/migrations/0001_initial.py | greenlaver/okulab-cers | 47b432563180ef605af89fa17d8dfcf73696e58d | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2020-06-02 19:22
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, validators=[django.core.validators.RegexValidator(message='全角カタカナのみ有効です', regex='^[ァ-ヶ \u3000]+$')])),
('student_number', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_entry', models.BooleanField()),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cers.User')),
],
),
]
| 34.8 | 159 | 0.607553 |
79458e760b22bd183a077d723124cd824deb0fde | 26 | py | Python | pynqhls/io/__init__.py | 3togo/PYNQ-HLS | fc3bcf4b6afe8cfcf411035ac50f34089b2ab3bf | [
"BSD-3-Clause"
] | 87 | 2018-02-14T18:49:48.000Z | 2022-03-03T14:43:59.000Z | pynqhls/io/__init__.py | xupsh/PYNQ-HLS | 60cfd98362632ea4417309a3d73ff60885eba1ab | [
"BSD-3-Clause"
] | 3 | 2018-12-11T06:30:25.000Z | 2020-12-30T04:54:21.000Z | pynqhls/io/__init__.py | xupsh/PYNQ-HLS | 60cfd98362632ea4417309a3d73ff60885eba1ab | [
"BSD-3-Clause"
] | 25 | 2018-08-08T13:52:48.000Z | 2022-03-29T18:24:04.000Z | from .io import ioOverlay
| 13 | 25 | 0.807692 |
79458e835245a1428d7b2a4c5155731f69665339 | 2,051 | py | Python | lccs_db/models/base.py | raphaelrpl/lccs-db | 4db4666bb8ea7e18527b6530e48896002a841cb4 | [
"MIT"
] | null | null | null | lccs_db/models/base.py | raphaelrpl/lccs-db | 4db4666bb8ea7e18527b6530e48896002a841cb4 | [
"MIT"
] | null | null | null | lccs_db/models/base.py | raphaelrpl/lccs-db | 4db4666bb8ea7e18527b6530e48896002a841cb4 | [
"MIT"
] | null | null | null | #
# This file is part of Land Cover Classification System Database Model.
# Copyright (C) 2019-2020 INPE.
#
# Land Cover Classification System Database Model is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Model Configuration."""
from datetime import datetime
from bdc_db.db import db
from sqlalchemy import Column, DateTime
from ..config import Config
class BaseModel(db.Model):
"""Abstract class for ORM models."""
__abstract__ = True
created_at = Column(DateTime, default=datetime.utcnow())
updated_at = Column(DateTime, default=datetime.utcnow(),
onupdate=datetime.utcnow())
def save(self, commit=True):
"""Save and persists object in database."""
db.session.add(self)
if not commit:
return
try:
db.session.commit()
except Exception as e:
db.session.rollback()
raise e
def delete(self):
"""Delete object from database."""
try:
db.session.delete(self)
db.session.commit()
except Exception as e:
db.session.rollback()
raise e
@classmethod
def _filter(cls, **properties):
"""Filter abstraction."""
return db.session.query(cls).filter_by(**properties)
@classmethod
def filter(cls, **properties):
"""Filter data set rows following the provided restrictions.
Provides a wrapper of SQLAlchemy session query.
:param properties: List of properties to filter of.
:type properties: dict.
"""
return cls._filter(**properties).all()
@classmethod
def get(cls, **restrictions):
"""Get one data set from database.
Throws exception **NoResultFound** when the filter does not match any result.
:param properties: List of properties to filter of.
:type properties: dict.
"""
return cls._filter(**restrictions).one()
| 27.716216 | 108 | 0.628961 |
794590802b96ba431f0521d56d0a8d4f9971531b | 601 | py | Python | config.py | etherisc/arc2-server | ba2a59382fa7dd09eed71780318eed243bd52543 | [
"Apache-2.0"
] | null | null | null | config.py | etherisc/arc2-server | ba2a59382fa7dd09eed71780318eed243bd52543 | [
"Apache-2.0"
] | null | null | null | config.py | etherisc/arc2-server | ba2a59382fa7dd09eed71780318eed243bd52543 | [
"Apache-2.0"
] | null | null | null | import logging
from logging.config import dictConfig
# logging config from https://flask.palletsprojects.com/en/2.0.x/logging/
LOGGING_CONFIG = {
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
}
}
def configure_logging():
dictConfig(LOGGING_CONFIG)
| 26.130435 | 74 | 0.569052 |
7945910277533d30330a5444dcbecbcec879ba1c | 2,712 | py | Python | tests/common_tests.py | agorinenko/drf-toolkit | 2c0bc2137ab7ae7de085c03e2bdfcac13431ba0d | [
"MIT"
] | 2 | 2019-12-16T12:17:16.000Z | 2020-02-05T10:56:48.000Z | tests/common_tests.py | agorinenko/drf-toolkit | 2c0bc2137ab7ae7de085c03e2bdfcac13431ba0d | [
"MIT"
] | 8 | 2020-03-05T14:35:05.000Z | 2022-02-10T09:58:52.000Z | tests/common_tests.py | agorinenko/drf-toolkit | 2c0bc2137ab7ae7de085c03e2bdfcac13431ba0d | [
"MIT"
] | null | null | null | import unittest
from drf_toolkit.drf_utils import DrfUtils
class CommonTests(unittest.TestCase):
def test_transform_list_parameters(self):
schema = [
{'name': 'type_of_message', 'type': 'list',
'items': {'type': 'enum', 'choices': ['text_message', 'issue', 'solution', 'solution_without_request']},
'required': False, 'unique': True, 'description': 'Тип сообщения'},
{'name': 'limit', 'type': 'int', 'required': False, 'default_value': 20,
'description': 'Количество извлекаемых элементов'},
{'name': 'offset', 'type': 'int', 'required': False, 'default_value': 0,
'description': 'Начальная позиция для извлечения элементов'}]
context = {'type_of_message': 'solution,solution_without_request'}
context = DrfUtils.transform_list_parameters(context, schema)
self.assertIsInstance(context['type_of_message'], list)
self.assertEqual(context['type_of_message'].sort(), ['solution', 'solution_without_request'].sort())
def test_transform_list_parameters_2(self):
schema = [
{'name': 'limit', 'type': 'int', 'required': False, 'default_value': 20,
'description': 'Количество извлекаемых элементов'},
{'name': 'offset', 'type': 'int', 'required': False, 'default_value': 0,
'description': 'Начальная позиция для извлечения элементов'}]
context = {'limit': 1, 'offset': 0}
context_2 = DrfUtils.transform_list_parameters(context, schema)
self.assertEqual(context, context_2)
def test_transform_list_parameters_3(self):
schema = [
{'name': 'type_of_message', 'type': 'list',
'items': {'type': 'enum', 'choices': ['text_message', 'issue', 'solution', 'solution_without_request']},
'required': False, 'unique': True, 'description': 'Тип сообщения'}]
context = {'type_of_message': ''}
context = DrfUtils.transform_list_parameters(context, schema)
self.assertIsInstance(context['type_of_message'], list)
self.assertEqual(context['type_of_message'], [])
def test_transform_list_parameters_4(self):
schema = [
{'name': 'type_of_message', 'type': 'list',
'items': {'type': 'enum', 'choices': ['text_message', 'issue', 'solution', 'solution_without_request']},
'required': False, 'unique': True, 'description': 'Тип сообщения'}]
context = {'type_of_message': '1,'}
context = DrfUtils.transform_list_parameters(context, schema)
self.assertIsInstance(context['type_of_message'], list)
self.assertEqual(context['type_of_message'], ['1'])
| 46.758621 | 117 | 0.626106 |
794591cefed3ef67e7f482f2d6ba79fe06564625 | 8,531 | py | Python | python/tvm/meta_schedule/integration.py | prateek9623/tvm | 891fd0132b7bbdd9e6b75935db4a4a688fef6afc | [
"Apache-2.0"
] | null | null | null | python/tvm/meta_schedule/integration.py | prateek9623/tvm | 891fd0132b7bbdd9e6b75935db4a4a688fef6afc | [
"Apache-2.0"
] | null | null | null | python/tvm/meta_schedule/integration.py | prateek9623/tvm | 891fd0132b7bbdd9e6b75935db4a4a688fef6afc | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta schedule integration with high-level IR"""
from contextlib import contextmanager
from typing import Callable, Dict, List, Optional, Union
from tvm._ffi import register_object
from tvm.ir import IRModule, transform
from tvm.relay import Any, Function as RelayFunc, vm
from tvm.runtime import NDArray, Object
from tvm.target import Target
from tvm.tir import PrimFunc
from .database import Database
from . import _ffi_api
@register_object("meta_schedule.ExtractedTask")
class ExtractedTask(Object):
"""A tuning task extracted from the high-level IR
Parameters
----------
task_name : str
The name of the task extracted
mod : IRModule
The high-level IR
dispatched : List[IRModule]
A list of low-level IRs that the high-level IR could potentially dispatch to
"""
task_name: str
mod: IRModule
dispatched: List[IRModule]
def __init__(
self,
task_name: str,
mod: IRModule,
dispatched: List[IRModule],
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ExtractedTask, # type: ignore # pylint: disable=no-member
task_name,
mod,
dispatched,
)
@register_object("meta_schedule.MetaScheduleContext")
class MetaScheduleContext(Object):
"""A context manager interface for the integration"""
def query(
self,
task_name: str,
mod: IRModule,
dispatched: Optional[List[IRModule]],
) -> Union[IRModule, RelayFunc, PrimFunc, None]:
"""The entry point of the integration
Parameters
----------
task_name : str
The name of the task extracted
mod : IRModule
The high-level IR
dispatched : Optional[List[IRModule]]
A list of low-level IRs that the high-level IR could potentially dispatch to
Returns
-------
result : Union[IRModule, RelayFunc, PrimFunc, None]
There are different types of the output:
1) NullOpt if there is no feedback hint;
2) tir::PrimFunc if `mod` should be lowered to a PrimFunc;
3) relay::Function if `mod` should be dispatched to BYOC workflow;
4) IRModule for unified dispatch
"""
return _ffi_api.MetaScheduleContextQuery( # type: ignore # pylint: disable=no-member
self,
task_name,
mod,
dispatched,
)
@staticmethod
def current() -> Optional["MetaScheduleContext"]:
"""The context manager in the current scope
Returns
-------
ctx : Optional[MetaScheduleContext]
The MetaScheduleContext in the current scope.
NullOpt if it's currently not under any MetaScheduleContext.
"""
return _ffi_api.MetaScheduleContextCurrent() # type: ignore # pylint: disable=no-member
@staticmethod
def query_inside_with_scope(
task_name: str,
mod: IRModule,
dispatched: Optional[List[IRModule]],
) -> Union[IRModule, RelayFunc, PrimFunc, None]:
"""The entry point of the integration workflow. The compilation process of the high-level
IR should call this method for task extraction and for feedback hints
Basically, this method is equivalent to:
.. code-block:: python
def query_inside_with_scope(task_name, mod, dispatched):
ctx = MetaScheduleContext.current()
assert ctx is not None
ctx.query(task_name, mod, dispatched)
Parameters
----------
task_name : str
The name of the task
mod : IRModule
The high-level IR
dispatched : Optional[List[IRModule]]
A list of low-level IRs that the high-level IR could potentially dispatch to
Returns
-------
result : Union[IRModule, RelayFunc, PrimFunc, None]
There are different types of the output:
1) NullOpt if there is no feedback hint;
2) tir::PrimFunc if `mod` should be lowered to a PrimFunc;
3) relay::Function if `mod` should be dispatched to BYOC workflow;
4) IRModule for unified dispatch
"""
return _ffi_api.MetaScheduleContextQueryInsideWithScope( # type: ignore # pylint: disable=no-member
task_name,
mod,
dispatched,
)
def __enter__(self) -> "MetaScheduleContext":
"""Entering the scope of the context manager"""
_ffi_api.MetaScheduleContextEnterScope(self) # type: ignore # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None:
"""Exiting the scope of the context manager"""
_ffi_api.MetaScheduleContextExitScope(self) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.TaskExtraction")
class TaskExtraction(MetaScheduleContext):
"""An integration context for task extraction"""
tasks: List[ExtractedTask]
"""The extracted tasks"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(_ffi_api.TaskExtraction) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.ApplyHistoryBest")
class ApplyHistoryBest(MetaScheduleContext):
"""An integration context that allows application of historically best record from database"""
database: Database
""" The database to be queried from"""
def __init__(self, database) -> None:
self.__init_handle_by_constructor__(_ffi_api.ApplyHistoryBest, database) # type: ignore # pylint: disable=no-member
def extract_task(
mod: Union[IRModule, RelayFunc],
target: Target,
params: Optional[Dict[str, NDArray]] = None,
*,
opt_level: int = 3,
pass_config: Dict[str, Any] = {
"relay.backend.use_meta_schedule": True,
},
disabled_pass: List[str] = [],
) -> List[ExtractedTask]:
"""Extract tuning tasks from a relay program.
Parameters
----------
mod : Union[tvm.IRModule, tvm.relay.Function]
The module or function to tune
target : tvm.target.Target
The compilation target
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
opt_level : int
The optimization level of the compiler
pass_config : Dict[str, Any]
The pass config of the compiler
disabled_pass : List[str]
The list of disabled passes of the compiler
Returns
-------
tasks: List[ExtractedTask]
The tasks extracted from this network
"""
@contextmanager
def _autotvm_silencer():
from tvm import autotvm # pylint: disable=import-outside-toplevel
silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
try:
yield
finally:
autotvm.GLOBAL_SCOPE.silent = silent
def _thread_run(func: Callable[[], None]) -> None:
import threading # pylint: disable=import-outside-toplevel
thread = threading.Thread(target=func)
thread.start()
thread.join()
env = TaskExtraction()
if isinstance(mod, RelayFunc):
mod = IRModule.from_expr(mod)
if not isinstance(target, Target):
target = Target(target)
def _func():
with env, _autotvm_silencer(), transform.PassContext(
config=pass_config,
disabled_pass=disabled_pass,
opt_level=opt_level,
):
compiler = vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target)
_thread_run(_func)
return env.tasks
| 33.065891 | 124 | 0.647286 |
794592a3c51632137e116650e2d26a67a3543578 | 1,766 | py | Python | PCA/plot.py | ZechangSun/VisHW | 7dc1bed84a67f2cdd523e7e4799a1ce31405de38 | [
"MIT"
] | null | null | null | PCA/plot.py | ZechangSun/VisHW | 7dc1bed84a67f2cdd523e7e4799a1ce31405de38 | [
"MIT"
] | null | null | null | PCA/plot.py | ZechangSun/VisHW | 7dc1bed84a67f2cdd523e7e4799a1ce31405de38 | [
"MIT"
] | null | null | null | from pyecharts import Scatter, Scatter3D
from pyecharts import Page
import pyecharts
import numpy as np
import pandas as pd
if __name__ == '__main__':
data = pd.read_csv('img2d.csv', sep=',', names=['x', 'y'])
pyecharts.configure(global_theme='shine')
label = np.load('../data/sampled_label.npy')
page = Page(page_title='PCA visualization')
scatter2d = Scatter(title='PCA with 2 components', width=1400, height=720, title_pos='center')
for i in range(10):
scatter2d.add('%i' % i, data['x'][label == i], data['y'][label == i], legend_orient='vertical',
legend_pos='5%', legend_top='center', yaxis_pos='right', label_fomatter='{a}', is_datazoom_show=True, datazoom_type='both', label_formatter='{a}')
page.add_chart(scatter2d)
data3d = pd.read_csv('img3d.csv', sep=',', names=['x', 'y', 'z'])
scatter3d = Scatter(title='PCA with 3 components', width=1400, height=720, title_pos='center')
for i in range(10):
t = list(data3d['z'][label == i])
scatter3d.add('%i' % i, data3d['x'][label == i], data3d['y'][label == i], extra_data=list(data3d['z'][label == i]), is_visualmap=True, visual_type='size', visual_range_size=[5, 15], visual_range=[min(t), max(t)], legend_orient='vertical',
legend_pos='5%', legend_top='center', yaxis_pos='right', label_fomatter='{a}', is_datazoom_show=True, datazoom_type='both', label_formatter='{a}')
page.add_chart(scatter3d)
scatter3D = Scatter3D('PCA with 3 components (3D)', width=1400, height=720, title_pos='center')
for i in range(10):
scatter3D.add('%i'%i, data3d.values[label == i], legend_pos='5%', legend_orient='vertical', legend_top='center')
page.add_chart(scatter3D)
page.render('test.html')
| 58.866667 | 246 | 0.656285 |
794592ed4e8e8dec10076ba8588095d07710d493 | 108,813 | py | Python | nematus/nmt.py | cindyxinyiwang/nematus | 30fa0bd4524e9f8b2811b04e1c541e9745131ebb | [
"BSD-3-Clause"
] | null | null | null | nematus/nmt.py | cindyxinyiwang/nematus | 30fa0bd4524e9f8b2811b04e1c541e9745131ebb | [
"BSD-3-Clause"
] | null | null | null | nematus/nmt.py | cindyxinyiwang/nematus | 30fa0bd4524e9f8b2811b04e1c541e9745131ebb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Build a neural machine translation model with soft attention
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import json
import numpy
import copy
import argparse
import os
import sys
import time
import logging
import itertools
from subprocess import Popen
from collections import OrderedDict
import util
from settings import TranslationSettings
import subprocess
profile = False
from data_iterator import TextIterator, MultiSrcTextIterator
from training_progress import TrainingProgress
from util import *
from theano_util import *
from alignment_util import *
from raml_distributions import *
from layers import *
from initializers import *
from optimizers import *
from metrics.scorer_provider import ScorerProvider
from domain_interpolation_data_iterator import DomainInterpolatorTextIterator
# batch preparation
def prepare_data_multi_src(seqs_x1, seqs_x2, seqs_y, weights=None, maxlen=None, n_words_src=30000,
n_words=30000, n_factors=1):
# x: a list of sentences
lengths_x1 = [len(s) for s in seqs_x1]
lengths_x2 = [len(s) for s in seqs_x2]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x1 = []
new_seqs_x2 = []
new_seqs_y = []
new_lengths_x1 = []
new_lengths_x2 = []
new_lengths_y = []
new_weights = []
if weights is None:
weights = [None] * len(seqs_y) # to make the zip easier
for l_x1, s_x1, l_x2, s_x2, l_y, s_y, w in zip(lengths_x, seqs_x, lengths_x2, seqs_x2, lengths_y, seqs_y, weights):
if l_x < maxlen and l_y < maxlen:
new_seqs_x1.append(s_x1)
new_seqs_x2.append(s_x2)
new_lengths_x1.append(l_x1)
new_lengths_x2.append(l_x2)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
new_weights.append(w)
lengths_x1 = new_lengths_x1
seqs_x1 = new_seqs_x1
lengths_x2 = new_lengths_x2
seqs_x2 = new_seqs_x2
lengths_y = new_lengths_y
seqs_y = new_seqs_y
weights = new_weights
if len(lengths_x1) < 1 or len(lengths_x2) < 1 or len(lengths_y) < 1:
if weights is not None:
return None, None, None, None, None
else:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x1 = numpy.max(lengths_x1) + 1
maxlen_x2 = numpy.max(lengths_x2) + 1
maxlen_y = numpy.max(lengths_y) + 1
x1 = numpy.zeros((n_factors, maxlen_x1, n_samples)).astype('int64')
x2 = numpy.zeros((n_factors, maxlen_x2, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x1_mask = numpy.zeros((maxlen_x1, n_samples)).astype(floatX)
x2_mask = numpy.zeros((maxlen_x2, n_samples)).astype(floatX)
y_mask = numpy.zeros((maxlen_y, n_samples)).astype(floatX)
for idx, [s_x1, s_x2, s_y] in enumerate(zip(seqs_x1, seqs_x2, seqs_y)):
x1[:, :lengths_x1[idx], idx] = zip(*s_x1)
x1_mask[:lengths_x1[idx]+1, idx] = 1.
x2[:, :lengths_x2[idx], idx] = zip(*s_x1)
x2_mask[:lengths_x2[idx]+1, idx] = 1.
y[:lengths_y[idx], idx] = s_y
y_mask[:lengths_y[idx]+1, idx] = 1.
if weights is not None:
return x1, x1_mask, x2, x2_mask, y, y_mask, weights
else:
return x1, x1_mask, x2, x2_mask, y, y_mask
def prepare_data(seqs_x, seqs_y, weights=None, maxlen=None, n_words_src=30000,
n_words=30000, n_factors=1):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
new_weights = []
if weights is None:
weights = [None] * len(seqs_y) # to make the zip easier
for l_x, s_x, l_y, s_y, w in zip(lengths_x, seqs_x, lengths_y, seqs_y, weights):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
new_weights.append(w)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
weights = new_weights
if len(lengths_x) < 1 or len(lengths_y) < 1:
if weights is not None:
return None, None, None, None, None
else:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((n_factors, maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype(floatX)
y_mask = numpy.zeros((maxlen_y, n_samples)).astype(floatX)
for idx, [s_x, s_y] in enumerate(zip(seqs_x, seqs_y)):
x[:, :lengths_x[idx], idx] = zip(*s_x)
x_mask[:lengths_x[idx]+1, idx] = 1.
y[:lengths_y[idx], idx] = s_y
y_mask[:lengths_y[idx]+1, idx] = 1.
if weights is not None:
return x, x_mask, y, y_mask, weights
else:
return x, x_mask, y, y_mask
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params = get_layer_param('embedding')(options, params, options['n_words_src'], options['dim_per_factor'], options['factors'], suffix='')
if not options['tie_encoder_decoder_embeddings']:
params = get_layer_param('embedding')(options, params, options['n_words'], options['dim_word'], suffix='_dec')
# encoder: bidirectional RNN
params = get_layer_param(options['encoder'])(options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'])
params = get_layer_param(options['encoder'])(options, params,
prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'])
if options['enc_depth'] > 1:
for level in range(2, options['enc_depth'] + 1):
prefix_f = pp('encoder', level)
prefix_r = pp('encoder_r', level)
if level <= options['enc_depth_bidirectional']:
params = get_layer_param(options['encoder'])(options, params,
prefix=prefix_f,
nin=options['dim'],
dim=options['dim'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'])
params = get_layer_param(options['encoder'])(options, params,
prefix=prefix_r,
nin=options['dim'],
dim=options['dim'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'])
else:
params = get_layer_param(options['encoder'])(options, params,
prefix=prefix_f,
nin=options['dim'] * 2,
dim=options['dim'] * 2,
recurrence_transition_depth=options['enc_recurrence_transition_depth'])
ctxdim = 2 * options['dim']
dec_state = options['dim']
if options['decoder'].startswith('lstm'):
dec_state *= 2
# init_state, init_cell
params = get_layer_param('ff')(options, params, prefix='ff_state',
nin=ctxdim, nout=dec_state)
# decoder
params = get_layer_param(options['decoder'])(options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim,
recurrence_transition_depth=options['dec_base_recurrence_transition_depth'])
# deeper layers of the decoder
if options['dec_depth'] > 1:
if options['dec_deep_context']:
input_dim = options['dim'] + ctxdim
else:
input_dim = options['dim']
for level in range(2, options['dec_depth'] + 1):
params = get_layer_param(options['decoder_deep'])(options, params,
prefix=pp('decoder', level),
nin=input_dim,
dim=options['dim'],
dimctx=ctxdim,
recurrence_transition_depth=options['dec_high_recurrence_transition_depth'])
# readout
if options['deep_fusion_lm'] and options['concatenate_lm_decoder']:
params = get_layer_param('ff')(options, params, prefix='ff_logit_lstm',
nin=(options['dim']+options['lm_dim']), nout=options['dim_word'],
ortho=False)
else:
params = get_layer_param('ff')(options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer_param('ff')(options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer_param('ff')(options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer_param('ff')(options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'],
weight_matrix = not options['tie_decoder_embeddings'],
followed_by_softmax=True)
return params
# initialize LM parameters (deep fusion)
def init_params_lm(options, params):
# LM controller mechanism
prefix = 'fusion_lm'
v_g = norm_weight(options['lm_dim'], 1)
params[pp(prefix, 'v_g')] = v_g
# bias initialization
b_g = -1 * numpy.ones((1,)).astype(floatX)
#b_g = numpy.zeros((1,)).astype(floatX)
params[pp(prefix, 'b_g')] = b_g
# readout for LM
if not options['concatenate_lm_decoder']:
params = get_layer_param('ff')(options, params, prefix='ff_logit_lm',
nin=options['lm_dim'],
nout=options['dim_word'], ortho=False)
return params
# bidirectional RNN encoder: take input x (optionally with mask), and produce sequence of context vectors (ctx)
def build_encoder(tparams, options, dropout, x_mask=None, sampling=False):
x = tensor.tensor3('x', dtype='int64')
# source text; factors 1; length 5; batch size 10
x.tag.test_value = (numpy.random.rand(1, 5, 10)*100).astype('int64')
# for the backward rnn, we just need to invert x
xr = x[:,::-1]
if x_mask is None:
xr_mask = None
else:
xr_mask = x_mask[::-1]
n_timesteps = x.shape[1]
n_samples = x.shape[2]
# word embedding for forward rnn (source)
emb = get_layer_constr('embedding')(tparams, x, suffix='', factors= options['factors'])
# word embedding for backward rnn (source)
embr = get_layer_constr('embedding')(tparams, xr, suffix='', factors= options['factors'])
if options['use_dropout']:
source_dropout = dropout((n_timesteps, n_samples, 1), options['dropout_source'])
if not sampling:
source_dropout = tensor.tile(source_dropout, (1,1,options['dim_word']))
emb *= source_dropout
if sampling:
embr *= source_dropout
else:
# we drop out the same words in both directions
embr *= source_dropout[::-1]
## level 1
proj = get_layer_constr(options['encoder'])(tparams, emb, options, dropout,
prefix='encoder',
mask=x_mask,
dropout_probability_below=options['dropout_embedding'],
dropout_probability_rec=options['dropout_hidden'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'],
truncate_gradient=options['encoder_truncate_gradient'],
profile=profile)
projr = get_layer_constr(options['encoder'])(tparams, embr, options, dropout,
prefix='encoder_r',
mask=xr_mask,
dropout_probability_below=options['dropout_embedding'],
dropout_probability_rec=options['dropout_hidden'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'],
truncate_gradient=options['encoder_truncate_gradient'],
profile=profile)
# discard LSTM cell state
if options['encoder'].startswith('lstm'):
proj[0] = get_slice(proj[0], 0, options['dim'])
projr[0] = get_slice(projr[0], 0, options['dim'])
## bidirectional levels before merge
for level in range(2, options['enc_depth_bidirectional'] + 1):
prefix_f = pp('encoder', level)
prefix_r = pp('encoder_r', level)
# run forward on previous backward and backward on previous forward
input_f = projr[0][::-1]
input_r = proj[0][::-1]
proj = get_layer_constr(options['encoder'])(tparams, input_f, options, dropout,
prefix=prefix_f,
mask=x_mask,
dropout_probability_below=options['dropout_hidden'],
dropout_probability_rec=options['dropout_hidden'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'],
truncate_gradient=options['encoder_truncate_gradient'],
profile=profile)
projr = get_layer_constr(options['encoder'])(tparams, input_r, options, dropout,
prefix=prefix_r,
mask=xr_mask,
dropout_probability_below=options['dropout_hidden'],
dropout_probability_rec=options['dropout_hidden'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'],
truncate_gradient=options['encoder_truncate_gradient'],
profile=profile)
# discard LSTM cell state
if options['encoder'].startswith('lstm'):
proj[0] = get_slice(proj[0], 0, options['dim'])
projr[0] = get_slice(projr[0], 0, options['dim'])
# residual connections
if level > 1:
proj[0] += input_f
projr[0] += input_r
# context will be the concatenation of forward and backward rnns
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
## forward encoder layers after bidirectional layers are concatenated
for level in range(options['enc_depth_bidirectional'] + 1, options['enc_depth'] + 1):
ctx += get_layer_constr(options['encoder'])(tparams, ctx, options, dropout,
prefix=pp('encoder', level),
mask=x_mask,
dropout_probability_below=options['dropout_hidden'],
dropout_probability_rec=options['dropout_hidden'],
recurrence_transition_depth=options['enc_recurrence_transition_depth'],
truncate_gradient=options['encoder_truncate_gradient'],
profile=profile)[0]
return x, ctx
# RNN decoder (including embedding and feedforward layer before output)
def build_decoder(tparams, options, y, ctx, init_state, dropout, x_mask=None, y_mask=None, sampling=False, pctx_=None, shared_vars=None, lm_init_state=None):
opt_ret = dict()
# tell RNN whether to advance just one step at a time (for sampling),
# or loop through sequence (for training)
if sampling:
one_step=True
else:
one_step=False
if options['use_dropout']:
if sampling:
target_dropout = dropout(dropout_probability=options['dropout_target'])
else:
n_timesteps_trg = y.shape[0]
n_samples = y.shape[1]
target_dropout = dropout((n_timesteps_trg, n_samples, 1), options['dropout_target'])
target_dropout = tensor.tile(target_dropout, (1, 1, options['dim_word']))
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
decoder_embedding_suffix = '' if options['tie_encoder_decoder_embeddings'] else '_dec'
emb = get_layer_constr('embedding')(tparams, y, suffix=decoder_embedding_suffix)
if options['use_dropout']:
emb *= target_dropout
if sampling:
emb = tensor.switch(y[:, None] < 0,
tensor.zeros((1, options['dim_word'])),
emb)
else:
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder - pass through the decoder conditional gru with attention
proj = get_layer_constr(options['decoder'])(tparams, emb, options, dropout,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
pctx_=pctx_,
one_step=one_step,
init_state=init_state[0],
recurrence_transition_depth=options['dec_base_recurrence_transition_depth'],
dropout_probability_below=options['dropout_embedding'],
dropout_probability_ctx=options['dropout_hidden'],
dropout_probability_rec=options['dropout_hidden'],
truncate_gradient=options['decoder_truncate_gradient'],
profile=profile)
# hidden states of the decoder gru
next_state = proj[0]
# weighted averages of context, generated by attention module
ctxs = proj[1]
# weights (alignment matrix)
opt_ret['dec_alphas'] = proj[2]
# we return state of each layer
if sampling:
ret_state = [next_state.reshape((1, next_state.shape[0], next_state.shape[1]))]
else:
ret_state = None
if options['dec_depth'] > 1:
for level in range(2, options['dec_depth'] + 1):
# don't pass LSTM cell state to next layer
if options['decoder'].startswith('lstm'):
next_state = get_slice(next_state, 0, options['dim'])
if options['dec_deep_context']:
if sampling:
axis=1
else:
axis=2
input_ = tensor.concatenate([next_state, ctxs], axis=axis)
else:
input_ = next_state
out_state = get_layer_constr(options['decoder_deep'])(tparams, input_, options, dropout,
prefix=pp('decoder', level),
mask=y_mask,
context=ctx,
context_mask=x_mask,
pctx_=None, #TODO: we can speed up sampler by precomputing this
one_step=one_step,
init_state=init_state[level-1],
dropout_probability_below=options['dropout_hidden'],
dropout_probability_rec=options['dropout_hidden'],
recurrence_transition_depth=options['dec_high_recurrence_transition_depth'],
truncate_gradient=options['decoder_truncate_gradient'],
profile=profile)[0]
if sampling:
ret_state.append(out_state.reshape((1, proj[0].shape[0], proj[0].shape[1])))
# don't pass LSTM cell state to next layer
if options['decoder'].startswith('lstm'):
out_state = get_slice(out_state, 0, options['dim'])
# residual connection
next_state += out_state
# don't pass LSTM cell state to next layer
elif options['decoder'].startswith('lstm'):
next_state = get_slice(next_state, 0, options['dim'])
if sampling:
if options['dec_depth'] > 1:
ret_state = tensor.concatenate(ret_state, axis=0)
else:
ret_state = ret_state[0]
# language model encoder (deep fusion)
lm_ret_state = None
if options['deep_fusion_lm']:
lm_emb = get_layer_constr('embedding')(tparams, y, prefix='lm_')
if sampling:
lm_emb = tensor.switch(y[:, None] < 0,
tensor.zeros((1, options['dim_word'])),
lm_emb)
if not lm_init_state:
lm_init_state = tensor.zeros((1, options['lm_dim']))
else:
lm_emb_shifted = tensor.zeros_like(lm_emb)
lm_emb_shifted = tensor.set_subtensor(lm_emb_shifted[1:], lm_emb[:-1])
lm_emb = lm_emb_shifted
lm_dropout = dropout_constr(options={'use_dropout':False}, use_noise=False, trng=None, sampling=False)
lm_proj = get_layer_constr(options['lm_encoder'])(tparams, lm_emb, options, lm_dropout,
prefix='lm_encoder',
mask=y_mask,
one_step=one_step,
init_state=lm_init_state,
profile=profile)
lm_next_state = lm_proj[0]
lm_ret_state = lm_proj[0]
# don't pass LSTM cell state to next layer
if options['lm_encoder'].startswith('lstm'):
lm_next_state = get_slice(lm_next_state, 0, options['lm_dim'])
# controller mechanism
prefix = 'fusion_lm'
lm_gate = tensor.dot(lm_next_state, tparams[pp(prefix, 'v_g')])+tparams[pp(prefix, 'b_g')]
lm_gate = tensor.nnet.sigmoid(lm_gate)
if one_step:
lm_gate = tensor.tile(lm_gate, (1, options['lm_dim']))
else:
lm_gate = tensor.tile(lm_gate, (1, 1, options['lm_dim']))
lm_next_state = lm_next_state * lm_gate
# hidden layer taking RNN state, previous word embedding and context vector as input
# (this counts as the first layer in our deep output, which is always on)
if options['deep_fusion_lm'] and options['concatenate_lm_decoder']:
next_state = concatenate([lm_next_state, next_state], axis=next_state.ndim-1)
logit_lstm = get_layer_constr('ff')(tparams, next_state, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer_constr('ff')(tparams, emb, options, dropout,
dropout_probability=options['dropout_embedding'],
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer_constr('ff')(tparams, ctxs, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_logit_ctx', activ='linear')
if options['deep_fusion_lm'] and not options['concatenate_lm_decoder']:
# add current lm encoder state to last layer
logit_lm = get_layer_constr('ff')(tparams, lm_next_state, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_logit_lm', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx+logit_lm)
else:
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
# last layer
logit_W = tparams['Wemb' + decoder_embedding_suffix].T if options['tie_decoder_embeddings'] else None
logit = get_layer_constr('ff')(tparams, logit, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_logit', activ='linear', W=logit_W, followed_by_softmax=True)
return logit, opt_ret, ret_state, lm_ret_state
# build a training model
def build_model(tparams, options):
trng = RandomStreams(1234)
use_noise = theano.shared(numpy_floatX(0.))
dropout = dropout_constr(options, use_noise, trng, sampling=False)
x_mask = tensor.matrix('x_mask', dtype=floatX)
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype=floatX)
# source text length 5; batch size 10
x_mask.tag.test_value = numpy.ones(shape=(5, 10)).astype(floatX)
# target text length 8; batch size 10
y.tag.test_value = (numpy.random.rand(8, 10)*100).astype('int64')
y_mask.tag.test_value = numpy.ones(shape=(8, 10)).astype(floatX)
x, ctx = build_encoder(tparams, options, dropout, x_mask, sampling=False)
n_samples = x.shape[2]
# mean of the context (across time) will be used to initialize decoder rnn
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
# or you can use the last state of forward + backward encoder rnns
# ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer_constr('ff')(tparams, ctx_mean, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_state', activ='tanh')
# every decoder RNN layer gets its own copy of the init state
init_state = init_state.reshape([1, init_state.shape[0], init_state.shape[1]])
if options['dec_depth'] > 1:
init_state = tensor.tile(init_state, (options['dec_depth'], 1, 1))
logit, opt_ret, _, _ = build_decoder(tparams, options, y, ctx, init_state, dropout, x_mask=x_mask, y_mask=y_mask, sampling=False)
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0], y.shape[1]])
cost = (cost * y_mask).sum(0)
#print "Print out in build_model()"
#print opt_ret
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, use_noise, trng, return_alignment=False):
dropout = dropout_constr(options, use_noise, trng, sampling=True)
x, ctx = build_encoder(tparams, options, dropout, x_mask=None, sampling=True)
n_samples = x.shape[2]
# get the input for decoder rnn initializer mlp
ctx_mean = ctx.mean(0)
# ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer_constr('ff')(tparams, ctx_mean, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_state', activ='tanh')
# every decoder RNN layer gets its own copy of the init state
init_state = init_state.reshape([1, init_state.shape[0], init_state.shape[1]])
if options['dec_depth'] > 1:
init_state = tensor.tile(init_state, (options['dec_depth'], 1, 1))
logging.info('Building f_init...')
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
logging.info('Done')
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
y.tag.test_value = -1 * numpy.ones((10,)).astype('int64')
init_state_old = init_state
init_state = tensor.tensor3('init_state', dtype=floatX)
if theano.config.compute_test_value != 'off':
init_state.tag.test_value = numpy.random.rand(*init_state_old.tag.test_value.shape).astype(floatX)
lm_init_state = None
if options['deep_fusion_lm']:
lm_init_state = tensor.matrix('lm_init_state', dtype=floatX)
logit, opt_ret, ret_state, lm_ret_state = build_decoder(tparams, options, y, ctx, init_state, dropout, x_mask=None, y_mask=None, sampling=True, lm_init_state=lm_init_state)
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# compile a function to do the whole thing above, next word probability,
# sampled word for the next target, next hidden state to be used
logging.info('Building f_next..')
if options['deep_fusion_lm']:
inps = [y, ctx, init_state, lm_init_state]
outs = [next_probs, next_sample, ret_state, lm_ret_state]
else:
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, ret_state]
if return_alignment:
outs.append(opt_ret['dec_alphas'])
f_next = theano.function(inps, outs, name='f_next', profile=profile)
logging.info('Done')
return f_init, f_next
# minimum risk cost
# assumes cost is the negative sentence-level log probability
# and each sentence in the minibatch is a sample of the same source sentence
def mrt_cost(cost, y_mask, options):
loss = tensor.vector('loss', dtype=floatX)
alpha = theano.shared(numpy_floatX(options['mrt_alpha']))
if options['mrt_ml_mix'] > 0:
ml_cost = cost[0]
# remove reference for MRT objective unless enabled
if not options['mrt_reference']:
cost = cost[1:]
cost *= alpha
#get normalized probability
cost = tensor.nnet.softmax(-cost)[0]
# risk: expected loss
if options['mrt_ml_mix'] > 0 and not options['mrt_reference']:
cost *= loss[1:]
else:
cost *= loss
cost = cost.sum()
if options['mrt_ml_mix'] > 0:
#normalize ML by length (because MRT is length-invariant)
ml_cost /= y_mask[:,0].sum(0)
ml_cost *= options['mrt_ml_mix']
cost += ml_cost
return cost, loss
# build a sampler that produces samples in one theano function
def build_full_sampler(tparams, options, use_noise, trng, greedy=False):
dropout = dropout_constr(options, use_noise, trng, sampling=True)
if greedy:
x_mask = tensor.matrix('x_mask', dtype=floatX)
x_mask.tag.test_value = numpy.ones(shape=(5, 10)).astype(floatX)
else:
x_mask = None
x, ctx = build_encoder(tparams, options, dropout, x_mask, sampling=True)
n_samples = x.shape[2]
if x_mask:
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
else:
ctx_mean = ctx.mean(0)
init_state = get_layer_constr('ff')(tparams, ctx_mean, options, dropout,
dropout_probability=options['dropout_hidden'],
prefix='ff_state', activ='tanh')
# every decoder RNN layer gets its own copy of the init state
init_state = init_state.reshape([1, init_state.shape[0], init_state.shape[1]])
if options['dec_depth'] > 1:
init_state = tensor.tile(init_state, (options['dec_depth'], 1, 1))
if greedy:
init_w = tensor.alloc(numpy.int64(-1), n_samples)
else:
k = tensor.iscalar("k")
k.tag.test_value = 12
init_w = tensor.alloc(numpy.int64(-1), k*n_samples)
ctx = tensor.tile(ctx, [k, 1])
init_state = tensor.tile(init_state, [1, k, 1])
# projected context
assert ctx.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(ctx*dropout(dropout_probability=options['dropout_hidden']), tparams[pp('decoder', 'Wc_att')]) +\
tparams[pp('decoder', 'b_att')]
def decoder_step(y, init_state, ctx, pctx_, *shared_vars):
logit, opt_ret, ret_state, _ = build_decoder(tparams, options, y, ctx, init_state, dropout, x_mask=x_mask, y_mask=None, sampling=True, pctx_=pctx_, shared_vars=shared_vars)
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
if greedy:
next_sample = next_probs.argmax(1)
else:
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# do not produce words after EOS
next_sample = tensor.switch(
tensor.eq(y,0),
0,
next_sample)
return [next_sample, ret_state, next_probs[:, next_sample].diagonal()], \
theano.scan_module.until(tensor.all(tensor.eq(next_sample, 0))) # stop when all outputs are 0 (EOS)
shared_vars = []
n_steps = tensor.iscalar("n_steps")
n_steps.tag.test_value = 50
(sample, state, probs), updates = theano.scan(decoder_step,
outputs_info=[init_w, init_state, None],
non_sequences=[ctx, pctx_]+shared_vars,
n_steps=n_steps, truncate_gradient=options['decoder_truncate_gradient'])
logging.info('Building f_sample...')
if greedy:
inps = [x, x_mask, n_steps]
else:
inps = [x, k, n_steps]
outs = [sample, probs]
f_sample = theano.function(inps, outs, name='f_sample', updates=updates, profile=profile)
logging.info('Done')
return f_sample
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_sample(f_init, f_next, x, model_options=[None], trng=None, k=1, maxlen=30,
stochastic=True, argmax=False, return_alignment=False, suppress_unk=False,
return_hyp_graph=False):
# k is the beam size we have
if k > 1 and argmax:
assert not stochastic, \
'Beam search does not support stochastic sampling with argmax'
sample = []
sample_score = []
sample_word_probs = []
alignment = []
hyp_graph = None
if stochastic:
if argmax:
sample_score = 0
live_k=k
else:
live_k = 1
if return_hyp_graph:
from hypgraph import HypGraph
hyp_graph = HypGraph()
dead_k = 0
hyp_samples=[ [] for i in xrange(live_k) ]
word_probs=[ [] for i in xrange(live_k) ]
hyp_scores = numpy.zeros(live_k).astype(floatX)
hyp_states = []
if return_alignment:
hyp_alignment = [[] for _ in xrange(live_k)]
# for ensemble decoding, we keep track of states and probability distribution
# for each model in the ensemble
num_models = len(f_init)
next_state = [None]*num_models
lm_next_state = [None]*num_models
ctx0 = [None]*num_models
next_p = [None]*num_models
dec_alphas = [None]*num_models
# get initial state of decoder rnn and encoder context
for i in xrange(num_models):
ret = f_init[i](x)
# to more easily manipulate batch size, go from (layers, batch_size, dim) to (batch_size, layers, dim)
ret[0] = numpy.transpose(ret[0], (1,0,2))
next_state[i] = numpy.tile( ret[0] , (live_k, 1, 1))
if 'deep_fusion_lm' in model_options and model_options['deep_fusion_lm']:
lm_dim = model_options['lm_dim']
lm_next_state[i] = numpy.tile( numpy.zeros((1, lm_dim)).astype(floatX) , (live_k, 1))
ctx0[i] = ret[1]
next_w = -1 * numpy.ones((live_k,)).astype('int64') # bos indicator
# x is a sequence of word ids followed by 0, eos id
for ii in xrange(maxlen):
for i in xrange(num_models):
ctx = numpy.tile(ctx0[i], [live_k, 1])
# for theano function, go from (batch_size, layers, dim) to (layers, batch_size, dim)
next_state[i] = numpy.transpose(next_state[i], (1,0,2))
if 'deep_fusion_lm' in model_options and model_options['deep_fusion_lm']:
inps = [next_w, ctx, next_state[i], lm_next_state[i]]
ret = f_next[i](*inps)
# dimension of dec_alpha (k-beam-size, number-of-input-hidden-units)
next_p[i], next_w_tmp, next_state[i], lm_next_state[i] = ret[0], ret[1], ret[2], ret[3]
else:
inps = [next_w, ctx, next_state[i]]
ret = f_next[i](*inps)
# dimension of dec_alpha (k-beam-size, number-of-input-hidden-units)
next_p[i], next_w_tmp, next_state[i] = ret[0], ret[1], ret[2]
# dummy LM states
lm_next_state[i] = [None]*live_k
if return_alignment:
dec_alphas[i] = ret[3]
# to more easily manipulate batch size, go from (layers, batch_size, dim) to (batch_size, layers, dim)
next_state[i] = numpy.transpose(next_state[i], (1,0,2))
if suppress_unk:
next_p[i][:,1] = -numpy.inf
if stochastic:
#batches are not supported with argmax: output data structure is different
if argmax:
nw = sum(next_p)[0].argmax()
sample.append(nw)
sample_score += numpy.log(next_p[0][0, nw])
if nw == 0:
break
else:
#FIXME: sampling is currently performed according to the last model only
nws = next_w_tmp
cand_scores = numpy.array(hyp_scores)[:, None] - numpy.log(next_p[-1])
probs = next_p[-1]
for idx,nw in enumerate(nws):
hyp_samples[idx].append(nw)
hyp_states=[]
hyp_lm_states=[]
for ti in xrange(live_k):
hyp_states.append([copy.copy(next_state[i][ti]) for i in xrange(num_models)])
hyp_lm_states.append([copy.copy(lm_next_state[i][ti]) for i in xrange(num_models)])
hyp_scores[ti]=cand_scores[ti][nws[ti]]
word_probs[ti].append(probs[ti][nws[ti]])
new_hyp_states=[]
new_hyp_lm_states=[]
new_hyp_samples=[]
new_hyp_scores=[]
new_word_probs=[]
for hyp_sample, hyp_state, hyp_lm_state, hyp_score, hyp_word_prob in zip(hyp_samples, hyp_states, hyp_lm_states ,hyp_scores, word_probs):
if hyp_sample[-1] > 0:
new_hyp_samples.append(copy.copy(hyp_sample))
new_hyp_states.append(copy.copy(hyp_state))
new_hyp_lm_states.append(copy.copy(hyp_lm_state))
new_hyp_scores.append(hyp_score)
new_word_probs.append(hyp_word_prob)
else:
sample.append(copy.copy(hyp_sample))
sample_score.append(hyp_score)
sample_word_probs.append(hyp_word_prob)
hyp_samples=new_hyp_samples
hyp_states=new_hyp_states
hyp_lm_states=new_hyp_lm_states
hyp_scores=new_hyp_scores
word_probs=new_word_probs
live_k=len(hyp_samples)
if live_k < 1:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = [numpy.array(state) for state in zip(*hyp_states)]
lm_next_state = [numpy.array(state) for state in zip(*hyp_lm_states)]
else:
cand_scores = hyp_scores[:, None] - sum(numpy.log(next_p))
probs = sum(next_p)/num_models
cand_flat = cand_scores.flatten()
probs_flat = probs.flatten()
ranks_flat = cand_flat.argpartition(k-dead_k-1)[:(k-dead_k)]
#averaging the attention weights accross models
if return_alignment:
mean_alignment = sum(dec_alphas)/num_models
voc_size = next_p[0].shape[1]
# index of each k-best hypothesis
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype(floatX)
new_word_probs = []
new_hyp_states = []
new_hyp_lm_states = []
if return_alignment:
# holds the history of attention weights for each time step for each of the surviving hypothesis
# dimensions (live_k * target_words * source_hidden_units]
# at each time step we append the attention weights corresponding to the current target word
new_hyp_alignment = [[] for _ in xrange(k-dead_k)]
# ti -> index of k-best hypothesis
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_word_probs.append(word_probs[ti] + [probs_flat[ranks_flat[idx]].tolist()])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append([copy.copy(next_state[i][ti]) for i in xrange(num_models)])
new_hyp_lm_states.append([copy.copy(lm_next_state[i][ti]) for i in xrange(num_models)])
if return_alignment:
# get history of attention weights for the current hypothesis
new_hyp_alignment[idx] = copy.copy(hyp_alignment[ti])
# extend the history with current attention weights
new_hyp_alignment[idx].append(mean_alignment[ti])
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
hyp_lm_states = []
word_probs = []
if return_alignment:
hyp_alignment = []
# sample and sample_score hold the k-best translations and their scores
for idx in xrange(len(new_hyp_samples)):
if return_hyp_graph:
word, history = new_hyp_samples[idx][-1], new_hyp_samples[idx][:-1]
score = new_hyp_scores[idx]
word_prob = new_word_probs[idx][-1]
hyp_graph.add(word, history, word_prob=word_prob, cost=score)
if new_hyp_samples[idx][-1] == 0:
sample.append(copy.copy(new_hyp_samples[idx]))
sample_score.append(new_hyp_scores[idx])
sample_word_probs.append(new_word_probs[idx])
if return_alignment:
alignment.append(new_hyp_alignment[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(copy.copy(new_hyp_samples[idx]))
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(copy.copy(new_hyp_states[idx]))
hyp_lm_states.append(copy.copy(new_hyp_lm_states[idx]))
word_probs.append(new_word_probs[idx])
if return_alignment:
hyp_alignment.append(new_hyp_alignment[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = [numpy.array(state) for state in zip(*hyp_states)]
lm_next_state = [numpy.array(state) for state in zip(*hyp_lm_states)]
# dump every remaining one
if not argmax and live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
sample_word_probs.append(word_probs[idx])
if return_alignment:
alignment.append(hyp_alignment[idx])
if not return_alignment:
alignment = [None for i in range(len(sample))]
return sample, sample_score, sample_word_probs, alignment, hyp_graph
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True, normalization_alpha=0.0, alignweights=False):
probs = []
n_done = 0
alignments_json = []
for x, y in iterator:
#ensure consistency in number of factors
if len(x[0][0]) != options['factors']:
logging.error('Mismatch between number of factors in settings ({0}), and number in validation corpus ({1})\n'.format(options['factors'], len(x[0][0])))
sys.exit(1)
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'],
n_factors=options['factors'])
### in optional save weights mode.
if alignweights:
pprobs, attention = f_log_probs(x, x_mask, y, y_mask)
for jdata in get_alignments(attention, x_mask, y_mask):
alignments_json.append(jdata)
else:
pprobs = f_log_probs(x, x_mask, y, y_mask)
# normalize scores according to output length
if normalization_alpha:
adjusted_lengths = numpy.array([numpy.count_nonzero(s) ** normalization_alpha for s in y_mask.T])
pprobs /= adjusted_lengths
for pp in pprobs:
probs.append(pp)
logging.debug('%d samples computed' % (n_done))
return numpy.array(probs), alignments_json
def get_translation(f_init, f_next, options, datasets, dictionaries, trng):
translations = []
n_done = 0
#assert options['valid_batch_size'] == 1
source = datasets[0]
input1 = open(source, 'r')
for l1 in input1:
x1 = []
for w in l1.split():
w = [dictionaries[0][f] if f in dictionaries[0] else 1 for (i,f) in enumerate(w.split('|'))]
x1.append(w)
x1 += [[0]*options['factors']]
x1 = numpy.array(x1).T.reshape([len(x1[0]), len(x1), 1])
sample, score, sample_word_probs, alignment, hyp_graph = gen_sample([f_init], [f_next],
x1,
options,
trng=trng, k=12,
maxlen=50,
stochastic=False,
argmax=False,
suppress_unk=False,
return_hyp_graph=False)
#score = score / numpy.array([len(s) for s in sample])
idx = numpy.argmin(score)
ss = sample[idx]
translations.append(ss)
input1.close()
return translations
def augment_raml_data(x, y, tgt_worddict, options):
#augment data with copies, of which the targets will be perturbed
aug_x = []
aug_y = []
sample_weights = numpy.empty((0), dtype=floatX)
vocab = range(1, options['n_words']) # vocabulary for perturbation
vocab.remove(tgt_worddict['eos'])
vocab.remove(tgt_worddict['UNK'])
bleu_scorer = ScorerProvider().get("SENTENCEBLEU n=4")
for x_s, y_s in zip(x, y):
y_sample_weights = []
bleu_scorer.set_reference(y_s)
for s in xrange(options['raml_samples']):
y_c = copy.copy(y_s)
if options['raml_reward'] in ["hamming_distance", "bleu"]:
#sampling based on bleu is done by sampling based on hamming
#distance followed by importance corrections
#based on https://github.com/pcyin/pytorch_nmt
q = hamming_distance_distribution(sentence_length=len(y_c), vocab_size=options['n_words'], tau=options['raml_tau'])
#sample distance from exponentiated payoff distribution
edits = numpy.random.choice(range(len(y_c)), p=q)
if len(y_c) > 1:
#choose random position except last (usually period or question mark etc)
positions = numpy.random.choice(range(len(y_c) - 1), size=edits, replace=False)
else:
#for single word sentence
positions = [0]
for position in positions:
y_c[position] = numpy.random.choice(vocab)
if options['raml_reward'] == "bleu":
#importance correction on the weights
y_bleu = bleu_scorer.score(y_c)
y_sample_weights.append(numpy.exp(y_bleu / options['raml_tau']) / numpy.exp(-edits / options['raml_tau']))
else:
y_sample_weights = [1.0] * options['raml_samples']
elif options['raml_reward'] == "edit_distance":
q = edit_distance_distribution(sentence_length=len(y_c), vocab_size=options['n_words'], tau=options['raml_tau'])
edits = numpy.random.choice(range(len(y_c)), p=q)
for e in range(edits):
if numpy.random.choice(["insertion", "substitution"]) == "insertion":
if len(y_c) > 1:
#insert before last period/question mark
position = numpy.random.choice(range(len(y_c)))
else:
#insert before or after single word
position = numpy.random.choice([0, 1])
y_c.insert(position, numpy.random.choice(vocab))
else:
if len(y_c) > 1:
position = numpy.random.choice(range(len(y_c)))
else:
position = 0
if position == len(y_c) - 1:
#using choice of last position to mean deletion of random word instead
del y_c[numpy.random.choice(range(len(y_c) - 1))]
else:
y_c[position] = numpy.random.choice(vocab)
y_sample_weights = [1.0] * options['raml_samples']
aug_y.append(y_c)
aug_x.append(x_s)
y_sample_weights = numpy.array(y_sample_weights, dtype=floatX)
if options['raml_reward'] == "bleu":
#normalize importance weights
y_sample_weights /= numpy.sum(y_sample_weights)
sample_weights = numpy.concatenate([sample_weights, y_sample_weights])
return aug_x, aug_y, sample_weights
def train(dim_word=512, # word vector dimensionality
dim=1000, # the number of LSTM units
enc_depth=1, # number of layers in the encoder
dec_depth=1, # number of layers in the decoder
enc_recurrence_transition_depth=1, # number of GRU transition operations applied in the encoder. Minimum is 1. (Only applies to gru)
dec_base_recurrence_transition_depth=2, # number of GRU transition operations applied in the first layer of the decoder. Minimum is 2. (Only applies to gru_cond)
dec_high_recurrence_transition_depth=1, # number of GRU transition operations applied in the higher layers of the decoder. Minimum is 1. (Only applies to gru)
dec_deep_context=False, # include context vectors in deeper layers of the decoder
enc_depth_bidirectional=None, # first n encoder layers are bidirectional (default: all)
factors=1, # input factors
dim_per_factor=None, # list of word vector dimensionalities (one per factor): [250,200,50] for total dimensionality of 500
encoder='gru',
decoder='gru_cond',
decoder_deep='gru',
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
dispFreq=1000,
decay_c=0., # L2 regularization penalty
map_decay_c=0., # L2 regularization penalty towards original weights
clip_c=-1., # gradient clipping threshold
lrate=0.0001, # learning rate
n_words_src=None, # source vocabulary size
n_words=None, # target vocabulary size
maxlen=100, # maximum length of the description
optimizer='adam',
batch_size=16,
valid_batch_size=16,
saveto='model.npz',
deep_fusion_lm=None,
concatenate_lm_decoder=False,
validFreq=10000,
saveFreq=30000, # save the parameters after every saveFreq updates
sampleFreq=10000, # generate some samples after every sampleFreq
datasets=[ # path to training datasets (source and target)
None,
None],
valid_datasets=[None, # path to validation datasets (source and target)
None],
dictionaries=[ # path to dictionaries (json file created with ../data/build_dictionary.py). One dictionary per input factor; last dictionary is target-side dictionary.
None,
None],
use_dropout=False,
dropout_embedding=0.2, # dropout for input embeddings (0: no dropout)
dropout_hidden=0.2, # dropout for hidden layers (0: no dropout)
dropout_source=0, # dropout source words (0: no dropout)
dropout_target=0, # dropout target words (0: no dropout)
reload_=False,
reload_training_progress=True, # reload trainig progress (only used if reload_ is True)
overwrite=False,
external_validation_script=None,
shuffle_each_epoch=True,
sort_by_length=True,
use_domain_interpolation=False, # interpolate between an out-domain training corpus and an in-domain training corpus
domain_interpolation_min=0.1, # minimum (initial) fraction of in-domain training data
domain_interpolation_max=1.0, # maximum fraction of in-domain training data
domain_interpolation_inc=0.1, # interpolation increment to be applied each time patience runs out, until maximum amount of interpolation is reached
domain_interpolation_indomain_datasets=[None, None], # in-domain parallel training corpus (source and target)
anneal_restarts=0, # when patience run out, restart with annealed learning rate X times before early stopping
anneal_decay=0.5, # decay learning rate by this amount on each restart
maxibatch_size=20, #How many minibatches to load at one time
objective="CE", #CE: cross-entropy; MRT: minimum risk training (see https://www.aclweb.org/anthology/P/P16/P16-1159.pdf) \
#RAML: reward-augmented maximum likelihood (see https://papers.nips.cc/paper/6547-reward-augmented-maximum-likelihood-for-neural-structured-prediction.pdf)
mrt_alpha=0.005,
mrt_samples=100,
mrt_samples_meanloss=10,
mrt_reference=False,
mrt_loss="SENTENCEBLEU n=4", # loss function for minimum risk training
mrt_ml_mix=0, # interpolate mrt loss with ML loss
raml_tau=0.85, # in (0,1] 0: becomes equivalent to ML
raml_samples=1,
raml_reward="hamming_distance",
model_version=0.1, #store version used for training for compatibility
prior_model=None, # Prior model file, used for MAP
tie_encoder_decoder_embeddings=False, # Tie the input embeddings of the encoder and the decoder (first factor only)
tie_decoder_embeddings=False, # Tie the input embeddings of the decoder with the softmax output embeddings
encoder_truncate_gradient=-1, # Truncate BPTT gradients in the encoder to this value. Use -1 for no truncation
decoder_truncate_gradient=-1, # Truncate BPTT gradients in the decoder to this value. Use -1 for no truncation
layer_normalisation=False, # layer normalisation https://arxiv.org/abs/1607.06450
weight_normalisation=False, # normalize weights
multi_src=0,
bleu=False,
postprocess=None,
valid_ref=None
):
# Model options
model_options = OrderedDict(sorted(locals().copy().items()))
# load LM options (deep fusion LM)
if model_options['concatenate_lm_decoder'] and not model_options['deep_fusion_lm']:
logging.error('Error: option \'concatenate_lm_decoder\' is enabled and no language model is provided.\n')
sys.exit(1)
if model_options['deep_fusion_lm']:
path = model_options['deep_fusion_lm']
try:
hp = pkl.load(open(path + '.pkl'))
except IOError:
hp = pkl.load(open(path + '.npz.pkl'))
model_options['lm_dim'] = hp['dim']
model_options['lm_dim_word'] = hp['dim_word']
model_options['lm_encoder'] = hp['encoder']
if model_options['dim_per_factor'] == None:
if factors == 1:
model_options['dim_per_factor'] = [model_options['dim_word']]
else:
logging.error('Error: if using factored input, you must specify \'dim_per_factor\'\n')
sys.exit(1)
#assert(len(dictionaries) == factors + 1) # one dictionary per source factor + 1 for target factor
#assert(len(model_options['dim_per_factor']) == factors) # each factor embedding has its own dimensionality
#assert(sum(model_options['dim_per_factor']) == model_options['dim_word']) # dimensionality of factor embeddings sums up to total dimensionality of input embedding vector
assert(prior_model != None and (os.path.exists(prior_model)) or (map_decay_c==0.0)) # MAP training requires a prior model file: Use command-line option --prior_model
assert(enc_recurrence_transition_depth >= 1) # enc recurrence transition depth must be at least 1.
assert(dec_base_recurrence_transition_depth >= 2) # dec base recurrence transition depth must be at least 2.
assert(dec_high_recurrence_transition_depth >= 1) # dec higher recurrence transition depth must be at least 1.
if model_options['enc_depth_bidirectional'] is None:
model_options['enc_depth_bidirectional'] = model_options['enc_depth']
# first layer is always bidirectional; make sure people don't forget to increase enc_depth as well
assert(model_options['enc_depth_bidirectional'] >= 1 and model_options['enc_depth_bidirectional'] <= model_options['enc_depth'])
if model_options['dec_depth'] > 1 and model_options['decoder'].startswith('lstm') != model_options['decoder_deep'].startswith('lstm'):
logging.error('cannot mix LSTM and GRU in decoder')
logging.error('decoder: {0}'.format(model_options['decoder']))
logging.error('decoder_deep: {0}'.format(model_options['decoder_deep']))
sys.exit(1)
# load dictionaries and invert them
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
worddicts[ii] = load_dict(dd)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
if n_words_src is None:
n_words_src = max(worddicts[0].values()) + 1
model_options['n_words_src'] = n_words_src
if n_words is None:
n_words = max(worddicts[-1].values()) + 1
model_options['n_words'] = n_words
if tie_encoder_decoder_embeddings:
assert (n_words_src == n_words), "When tying encoder and decoder embeddings, source and target vocabulary size must the same"
if worddicts[0] != worddicts[1]:
logging.warning("Encoder-decoder embedding tying is enabled with different source and target dictionaries. This is usually a configuration error")
if model_options['objective'] == 'RAML' and model_options['raml_tau'] == 0:
#tau=0 is equivalent to CE training and causes division by zero error
#in the RAML code, so simply switch objectives if tau=0.
logging.warning("tau is set to 0. Switching to CE training")
model_options['objective'] = 'CE'
if model_options['objective'] == 'MRT':
# in CE mode parameters are updated once per batch; in MRT mode parameters are updated once
# per pair of train sentences (== per batch of samples), so we set batch_size to 1 to make
# model saving, validation, etc trigger after the same number of updates as before
logging.info('Running in MRT mode, minibatch size set to 1 sentence')
batch_size = 1
elif model_options['objective'] == 'RAML':
# in RAML mode, training examples are augmented with samples, causing the size of the
# batch to increase. Thus, divide batch_size by the number of samples to have approx.
# the same batch size for each update and thus prevent running out of memory.
batch_size = batch_size // model_options['raml_samples']
logging.info('Running in RAML mode, minibatch size divided by number of samples, set to %d' % batch_size)
# initialize training progress
training_progress = TrainingProgress()
best_p = None
best_opt_p = None
training_progress.bad_counter = 0
training_progress.anneal_restarts_done = 0
training_progress.uidx = 0
training_progress.eidx = 0
training_progress.estop = False
training_progress.history_errs = []
training_progress.domain_interpolation_cur = domain_interpolation_min if use_domain_interpolation else None
# reload training progress
training_progress_file = saveto + '.progress.json'
if reload_ and reload_training_progress and os.path.exists(training_progress_file):
logging.info('Reloading training progress')
training_progress.load_from_json(training_progress_file)
if (training_progress.estop == True) or (training_progress.eidx > max_epochs) or (training_progress.uidx >= finish_after):
logging.warning('Training is already complete. Disable reloading of training progress (--no_reload_training_progress) or remove or modify progress file (%s) to train anyway.' % training_progress_file)
return numpy.inf
# adjust learning rate if we resume process that has already entered annealing phase
if training_progress.anneal_restarts_done > 0:
lrate *= anneal_decay**training_progress.anneal_restarts_done
logging.info('Loading data')
if use_domain_interpolation:
logging.info('Using domain interpolation with initial ratio %s, final ratio %s, increase rate %s' % (training_progress.domain_interpolation_cur, domain_interpolation_max, domain_interpolation_inc))
train = DomainInterpolatorTextIterator(datasets[0], datasets[1],
dictionaries[:-1], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen,
skip_empty=True,
shuffle_each_epoch=shuffle_each_epoch,
sort_by_length=sort_by_length,
indomain_source=domain_interpolation_indomain_datasets[0],
indomain_target=domain_interpolation_indomain_datasets[1],
interpolation_rate=training_progress.domain_interpolation_cur,
use_factor=(factors > 1),
maxibatch_size=maxibatch_size)
else:
if multi_src:
train = MultiSrcTextIterator(datasets[0], datasets[1],
dictionaries[:-1], dictionaries[-1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen,
skip_empty=True,
shuffle_each_epoch=shuffle_each_epoch,
sort_by_length=sort_by_length,
use_factor=False,
maxibatch_size=maxibatch_size)
else:
train = TextIterator(datasets[0], datasets[1],
dictionaries[:-1], dictionaries[-1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen,
skip_empty=True,
shuffle_each_epoch=shuffle_each_epoch,
sort_by_length=sort_by_length,
use_factor=(factors > 1),
maxibatch_size=maxibatch_size)
if valid_datasets and validFreq:
if multi_src:
valid = MultiSrcTextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[:-1], dictionaries[-1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
use_factor=False,
maxlen=maxlen)
else:
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[:-1], dictionaries[-1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
use_factor=(factors>1),
maxlen=maxlen)
else:
valid = None
comp_start = time.time()
logging.info('Building model')
params = init_params(model_options)
optimizer_params = {}
# prepare parameters
if reload_ and os.path.exists(saveto):
logging.info('Reloading model parameters')
params = load_params(saveto, params)
logging.info('Reloading optimizer parameters')
try:
logging.info('trying to load optimizer params from {0} or {1}'.format(saveto + '.gradinfo', saveto + '.gradinfo.npz'))
optimizer_params = load_optimizer_params(saveto + '.gradinfo', optimizer)
except IOError:
logging.warning('{0}(.npz) not found. Trying to load optimizer params from {1}(.npz)'.format(saveto + '.gradinfo', saveto))
optimizer_params = load_optimizer_params(saveto, optimizer)
elif prior_model:
logging.info('Initializing model parameters from prior')
params = load_params(prior_model, params)
# load prior model if specified
if prior_model:
logging.info('Loading prior model parameters')
params, model_options = load_params(prior_model, params, model_options, with_prefix='prior_')
# language model parameters and
# parameter initialization (deep fusion)
if deep_fusion_lm:
logging.info('Loading language model parameters')
#params, model_options = load_params_lm(model_options, params)
params = load_params_lm(model_options, params)
params = init_params_lm(model_options, params)
tparams = init_theano_params(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
if validFreq or sampleFreq:
logging.info('Building sampler')
f_init, f_next = build_sampler(tparams, model_options, use_noise, trng)
if model_options['objective'] == 'MRT':
logging.info('Building MRT sampler')
f_sampler = build_full_sampler(tparams, model_options, use_noise, trng)
# before any regularizer
logging.info('Building f_log_probs...')
f_log_probs = theano.function(inps, cost, profile=profile)
logging.info('Done')
if model_options['objective'] == 'CE':
cost = cost.mean()
elif model_options['objective'] == 'RAML':
sample_weights = tensor.vector('sample_weights', dtype='floatX')
cost *= sample_weights
cost = cost.mean()
inps += [sample_weights]
elif model_options['objective'] == 'MRT':
#MRT objective function
cost, loss = mrt_cost(cost, y_mask, model_options)
inps += [loss]
else:
logging.error('Objective must be one of ["CE", "MRT", "RAML"]')
sys.exit(1)
# apply L2 regularization on weights
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
if kk.startswith('prior_'):
continue
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# apply L2 regularisation to loaded model (map training)
if map_decay_c > 0:
map_decay_c = theano.shared(numpy_floatX(map_decay_c), name="map_decay_c")
weight_map_decay = 0.
for kk, vv in tparams.iteritems():
if kk.startswith('prior_'):
continue
init_value = tparams['prior_' + kk]
weight_map_decay += ((vv -init_value) ** 2).sum()
weight_map_decay *= map_decay_c
cost += weight_map_decay
updated_params = OrderedDict(tparams)
# don't update prior model parameters
if prior_model:
updated_params = OrderedDict([(key,value) for (key,value) in updated_params.iteritems() if not key.startswith('prior_')])
# don't update deep fusion LM parameters
if deep_fusion_lm:
updated_params = OrderedDict([(key,value) for (key,value) in updated_params.iteritems() if not key.startswith('lm_')])
logging.info('Computing gradient...')
grads = tensor.grad(cost, wrt=itemlist(updated_params))
logging.info('Done')
# apply gradient clipping here
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c**2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
# compile the optimizer, the actual computational graph is compiled here
lr = tensor.scalar(name='lr')
logging.info('Building optimizers...')
f_update, optimizer_tparams = eval(optimizer)(lr, updated_params,
grads, inps, cost,
profile=profile,
optimizer_params=optimizer_params)
logging.info('Done')
logging.info('Total compilation time: {0:.1f}s'.format(time.time() - comp_start))
if validFreq == -1 or saveFreq == -1 or sampleFreq == -1:
logging.info('Computing number of training batches')
num_batches = len(train)
logging.info('There are {} batches in the train set'.format(num_batches))
if validFreq == -1:
validFreq = num_batches
if saveFreq == -1:
saveFreq = num_batches
if sampleFreq == -1:
sampleFreq = num_batches
logging.info('Optimization')
#save model options
json.dump(model_options, open('%s.json' % saveto, 'wb'), indent=2)
valid_err = None
cost_sum = 0
cost_batches = 0
last_disp_samples = 0
last_words = 0
ud_start = time.time()
p_validation = None
for training_progress.eidx in xrange(training_progress.eidx, max_epochs):
n_samples = 0
for x, y in train:
training_progress.uidx += 1
use_noise.set_value(1.)
#ensure consistency in number of factors
#if len(x) and len(x[0]) and len(x[0][0]) != factors:
# logging.error('Mismatch between number of factors in settings ({0}), and number in training corpus ({1})\n'.format(factors, len(x[0][0])))
# sys.exit(1)
if model_options['objective'] in ['CE', 'RAML']:
if model_options['objective'] == 'RAML':
x, y, sample_weights = augment_raml_data(x, y, options=model_options,
tgt_worddict=worddicts[-1])
else:
sample_weights = [1.0] * len(y)
if multi_src:
xlen = len(x[0])
n_samples += xlen
x1, x1_mask, x1, x2_mask, y, y_mask, sample_weights = prepare_data_multi_src(x[0], x[1], y, weights=sample_weights,
maxlen=maxlen,
n_factors=factors,
n_words_src=n_words_src,
n_words=n_words)
else:
xlen = len(x)
n_samples += xlen
x, x_mask, y, y_mask, sample_weights = prepare_data(x, y, weights=sample_weights,
maxlen=maxlen,
n_factors=factors,
n_words_src=n_words_src,
n_words=n_words)
if x is None:
logging.warning('Minibatch with zero sample under length %d' % maxlen)
training_progress.uidx -= 1
continue
cost_batches += 1
last_disp_samples += xlen
if multi_src:
last_words += (numpy.sum(x1_mask) + numpy.sum(x2_mask) + numpy.sum(y_mask))/2.0
else:
last_words += (numpy.sum(x_mask) + numpy.sum(y_mask))/2.0
# compute cost, grads and update parameters
if model_options['objective'] == 'RAML':
cost = f_update(lrate, x, x_mask, y, y_mask, sample_weights)
else:
if multi_src:
cost = f_update(lrate, x1, x1_mask, y, y_mask)
#cost = f_update(lrate, x1, x1_mask, x2, x2_mask, y, y_mask)
else:
cost = f_update(lrate, x, x_mask, y, y_mask)
cost_sum += cost
elif model_options['objective'] == 'MRT':
xlen = len(x)
n_samples += xlen
assert maxlen is not None and maxlen > 0
xy_pairs = [(x_i, y_i) for (x_i, y_i) in zip(x, y) if len(x_i) < maxlen and len(y_i) < maxlen]
if not xy_pairs:
training_progress.uidx -= 1
continue
for x_s, y_s in xy_pairs:
# add EOS and prepare factored data
x, _, _, _ = prepare_data([x_s], [y_s], maxlen=None,
n_factors=factors,
n_words_src=n_words_src, n_words=n_words)
# draw independent samples to compute mean reward
if model_options['mrt_samples_meanloss']:
use_noise.set_value(0.)
samples, _ = f_sampler(x, model_options['mrt_samples_meanloss'], maxlen)
use_noise.set_value(1.)
samples = [numpy.trim_zeros(item) for item in zip(*samples)]
# map integers to words (for character-level metrics)
samples = [seqs2words(sample, worddicts_r[-1]) for sample in samples]
ref = seqs2words(y_s, worddicts_r[-1])
#scorers expect tokenized hypotheses/references
ref = ref.split(" ")
samples = [sample.split(" ") for sample in samples]
# get negative smoothed BLEU for samples
scorer = ScorerProvider().get(model_options['mrt_loss'])
scorer.set_reference(ref)
mean_loss = numpy.array(scorer.score_matrix(samples), dtype=floatX).mean()
else:
mean_loss = 0.
# create k samples
use_noise.set_value(0.)
samples, _ = f_sampler(x, model_options['mrt_samples'], maxlen)
use_noise.set_value(1.)
samples = [numpy.trim_zeros(item) for item in zip(*samples)]
# remove duplicate samples
samples.sort()
samples = [s for s, _ in itertools.groupby(samples)]
# add gold translation [always in first position]
if model_options['mrt_reference'] or model_options['mrt_ml_mix']:
samples = [y_s] + [s for s in samples if s != y_s]
# create mini-batch with masking
x, x_mask, y, y_mask = prepare_data([x_s for _ in xrange(len(samples))], samples,
maxlen=None,
n_factors=factors,
n_words_src=n_words_src,
n_words=n_words)
cost_batches += 1
last_disp_samples += xlen
last_words += (numpy.sum(x_mask) + numpy.sum(y_mask))/2.0
# map integers to words (for character-level metrics)
samples = [seqs2words(sample, worddicts_r[-1]) for sample in samples]
y_s = seqs2words(y_s, worddicts_r[-1])
#scorers expect tokenized hypotheses/references
y_s = y_s.split(" ")
samples = [sample.split(" ") for sample in samples]
# get negative smoothed BLEU for samples
scorer = ScorerProvider().get(model_options['mrt_loss'])
scorer.set_reference(y_s)
loss = mean_loss - numpy.array(scorer.score_matrix(samples), dtype=floatX)
# compute cost, grads and update parameters
cost = f_update(lrate, x, x_mask, y, y_mask, loss)
cost_sum += cost
# check for bad numbers, usually we remove non-finite elements
# and continue training - but not done here
if numpy.isnan(cost) or numpy.isinf(cost):
logging.warning('NaN detected')
return 1., 1., 1.
# verbose
if numpy.mod(training_progress.uidx, dispFreq) == 0:
ud = time.time() - ud_start
sps = last_disp_samples / float(ud)
wps = last_words / float(ud)
cost_avg = cost_sum / float(cost_batches)
logging.info(
'Epoch {epoch} Update {update} Cost {cost} UD {ud} {sps} {wps}'.format(
epoch=training_progress.eidx,
update=training_progress.uidx,
cost=cost_avg,
ud=ud,
sps="{0:.2f} sents/s".format(sps),
wps="{0:.2f} words/s".format(wps)
)
)
ud_start = time.time()
cost_batches = 0
last_disp_samples = 0
last_words = 0
cost_sum = 0
# save the best model so far, in addition, save the latest model
# into a separate file with the iteration number for external eval
if numpy.mod(training_progress.uidx, saveFreq) == 0:
logging.info('Saving the best model...')
if best_p is not None:
params = best_p
optimizer_params = best_opt_p
else:
params = unzip_from_theano(tparams, excluding_prefix='prior_')
optimizer_params = unzip_from_theano(optimizer_tparams, excluding_prefix='prior_')
save(params, optimizer_params, training_progress, saveto)
logging.info('Done')
# save with uidx
if not overwrite:
logging.info('Saving the model at iteration {}...'.format(training_progress.uidx))
saveto_uidx = '{}.iter{}.npz'.format(
os.path.splitext(saveto)[0], training_progress.uidx)
params = unzip_from_theano(tparams, excluding_prefix='prior_')
optimizer_params = unzip_from_theano(optimizer_tparams, excluding_prefix='prior_')
save(params, optimizer_params, training_progress, saveto_uidx)
logging.info('Done')
# generate some samples with the model and display them
if sampleFreq and numpy.mod(training_progress.uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5, x.shape[2])):
stochastic = True
x_current = x[:, :, jj][:, :, None]
# remove padding
x_current = x_current[:,:x_mask.astype('int64')[:, jj].sum(),:]
sample, score, sample_word_probs, alignment, hyp_graph = gen_sample([f_init], [f_next],
x_current,
model_options,
trng=trng, k=1,
maxlen=30,
stochastic=stochastic,
argmax=False,
suppress_unk=False,
return_hyp_graph=False)
print 'Source ', jj, ': ',
for pos in range(x.shape[1]):
if x[0, pos, jj] == 0:
break
for factor in range(factors):
vv = x[factor, pos, jj]
if vv in worddicts_r[factor]:
sys.stdout.write(worddicts_r[factor][vv])
else:
sys.stdout.write('UNK')
if factor+1 < factors:
sys.stdout.write('|')
else:
sys.stdout.write(' ')
print
print 'Truth ', jj, ' : ',
for vv in y[:, jj]:
if vv == 0:
break
if vv in worddicts_r[-1]:
print worddicts_r[-1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample[0]
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[-1]:
print worddicts_r[-1][vv],
else:
print 'UNK',
print
# validate model on validation set and early stop if necessary
if valid is not None and validFreq and numpy.mod(training_progress.uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs, alignment = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
training_progress.history_errs.append(float(valid_err))
if training_progress.uidx == 0 or valid_err <= numpy.array(training_progress.history_errs).min():
best_p = unzip_from_theano(tparams, excluding_prefix='prior_')
best_opt_p = unzip_from_theano(optimizer_tparams, excluding_prefix='prior_')
training_progress.bad_counter = 0
if valid_err >= numpy.array(training_progress.history_errs).min():
training_progress.bad_counter += 1
if training_progress.bad_counter > patience:
# change mix of in-domain and out-of-domain data
if use_domain_interpolation and (training_progress.domain_interpolation_cur < domain_interpolation_max):
training_progress.domain_interpolation_cur = min(training_progress.domain_interpolation_cur + domain_interpolation_inc, domain_interpolation_max)
logging.info('No progress on the validation set, increasing domain interpolation rate to %s and resuming from best params' % training_progress.domain_interpolation_cur)
train.adjust_domain_interpolation_rate(training_progress.domain_interpolation_cur)
if best_p is not None:
zip_to_theano(best_p, tparams)
zip_to_theano(best_opt_p, optimizer_tparams)
training_progress.bad_counter = 0
# anneal learning rate and reset optimizer parameters
elif training_progress.anneal_restarts_done < anneal_restarts:
logging.info('No progress on the validation set, annealing learning rate and resuming from best params.')
lrate *= anneal_decay
training_progress.anneal_restarts_done += 1
training_progress.bad_counter = 0
# reload best parameters
if best_p is not None:
zip_to_theano(best_p, tparams)
# reset optimizer parameters
for item in optimizer_tparams.values():
item.set_value(numpy.array(item.get_value()) * 0.)
# stop
else:
logging.info('Valid {}'.format(valid_err))
logging.info('Early Stop!')
training_progress.estop = True
break
logging.info('Valid {}'.format(valid_err))
if bleu:
translations = get_translation(f_init, f_next, model_options, valid_datasets, valid.source_dicts, trng)
translations = [seqs2words(t, worddicts_r[-1]) for t in translations]
output_file = saveto + '.trans'
valid_output = open(output_file, 'w')
if postprocess == 'bpe':
for i, t in enumerate(translations):
t = t.replace('@@ ', '')
print >> valid_output, t
else:
for i, t in enumerate(translations):
print >> valid_output, t
valid_output.close()
valid_refs = util.get_ref_files(valid_ref)
bleu = 100 * util.bleu_file(output_file, valid_refs)
logging.info('Valid bleu {}\n'.format(bleu))
if external_validation_script:
logging.info("Calling external validation script")
if p_validation is not None and p_validation.poll() is None:
logging.info("Waiting for previous validation run to finish")
logging.info("If this takes too long, consider increasing validation interval, reducing validation set size, or speeding up validation by using multiple processes")
valid_wait_start = time.time()
p_validation.wait()
logging.info("Waited for {0:.1f} seconds".format(time.time()-valid_wait_start))
logging.info('Saving model...')
params = unzip_from_theano(tparams, excluding_prefix='prior_')
optimizer_params = unzip_from_theano(optimizer_tparams, excluding_prefix='prior_')
save(params, optimizer_params, training_progress, saveto+'.dev')
json.dump(model_options, open('%s.dev.npz.json' % saveto, 'wb'), indent=2)
logging.info('Done')
p_validation = Popen([external_validation_script])
# finish after this many updates
if training_progress.uidx >= finish_after:
logging.info('Finishing after %d iterations!' % training_progress.uidx)
training_progress.estop = True
break
logging.info('Seen %d samples' % n_samples)
if training_progress.estop:
break
if best_p is not None:
zip_to_theano(best_p, tparams)
zip_to_theano(best_opt_p, optimizer_tparams)
if valid is not None:
use_noise.set_value(0.)
valid_errs, alignment = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
logging.info('Valid {}'.format(valid_err))
if best_p is not None:
params = copy.copy(best_p)
optimizer_params = copy.copy(best_opt_p)
else:
params = unzip_from_theano(tparams, excluding_prefix='prior_')
optimizer_params = unzip_from_theano(optimizer_tparams, excluding_prefix='prior_')
save(params, optimizer_params, training_progress, saveto)
return valid_err
if __name__ == '__main__':
parser = argparse.ArgumentParser()
data = parser.add_argument_group('data sets; model loading and saving')
data.add_argument('--datasets', type=str, required=True, metavar='PATH', nargs=2,
help="parallel training corpus (source and target)")
data.add_argument('--dictionaries', type=str, required=True, metavar='PATH', nargs="+",
help="network vocabularies (one per source factor, plus target vocabulary)")
data.add_argument('--model', type=str, default='model.npz', metavar='PATH', dest='saveto',
help="model file name (default: %(default)s)")
data.add_argument('--deep_fusion_lm', type=str, default=None, metavar='PATH', dest='deep_fusion_lm',
help="deep fusion language model file name")
data.add_argument('--saveFreq', type=int, default=30000, metavar='INT',
help="save frequency (default: %(default)s)")
data.add_argument('--reload', action='store_true', dest='reload_',
help="load existing model (if '--model' points to existing model)")
data.add_argument('--no_reload_training_progress', action='store_false', dest='reload_training_progress',
help="don't reload training progress (only used if --reload is enabled)")
data.add_argument('--overwrite', action='store_true',
help="write all models to same file")
network = parser.add_argument_group('network parameters')
network.add_argument('--dim_word', type=int, default=512, metavar='INT',
help="embedding layer size (default: %(default)s)")
network.add_argument('--dim', type=int, default=1000, metavar='INT',
help="hidden layer size (default: %(default)s)")
network.add_argument('--n_words_src', type=int, default=None, metavar='INT',
help="source vocabulary size (default: %(default)s)")
network.add_argument('--n_words', type=int, default=None, metavar='INT',
help="target vocabulary size (default: %(default)s)")
network.add_argument('--enc_depth', type=int, default=1, metavar='INT',
help="number of encoder layers (default: %(default)s)")
network.add_argument('--dec_depth', type=int, default=1, metavar='INT',
help="number of decoder layers (default: %(default)s)")
network.add_argument('--enc_recurrence_transition_depth', type=int, default=1, metavar='INT',
help="number of GRU transition operations applied in the encoder. Minimum is 1. (Only applies to gru). (default: %(default)s)")
network.add_argument('--dec_base_recurrence_transition_depth', type=int, default=2, metavar='INT',
help="number of GRU transition operations applied in the first layer of the decoder. Minimum is 2. (Only applies to gru_cond). (default: %(default)s)")
network.add_argument('--dec_high_recurrence_transition_depth', type=int, default=1, metavar='INT',
help="number of GRU transition operations applied in the higher layers of the decoder. Minimum is 1. (Only applies to gru). (default: %(default)s)")
network.add_argument('--dec_deep_context', action='store_true',
help="pass context vector (from first layer) to deep decoder layers")
network.add_argument('--enc_depth_bidirectional', type=int, default=None, metavar='INT',
help="number of bidirectional encoder layer; if enc_depth is greater, remaining layers are unidirectional; by default, all layers are bidirectional.")
network.add_argument('--factors', type=int, default=1, metavar='INT',
help="number of input factors (default: %(default)s)")
network.add_argument('--dim_per_factor', type=int, default=None, nargs='+', metavar='INT',
help="list of word vector dimensionalities (one per factor): '--dim_per_factor 250 200 50' for total dimensionality of 500 (default: %(default)s)")
network.add_argument('--use_dropout', action="store_true",
help="use dropout layer (default: %(default)s)")
network.add_argument('--dropout_embedding', type=float, default=0.2, metavar="FLOAT",
help="dropout for input embeddings (0: no dropout) (default: %(default)s)")
network.add_argument('--dropout_hidden', type=float, default=0.2, metavar="FLOAT",
help="dropout for hidden layer (0: no dropout) (default: %(default)s)")
network.add_argument('--dropout_source', type=float, default=0, metavar="FLOAT",
help="dropout source words (0: no dropout) (default: %(default)s)")
network.add_argument('--dropout_target', type=float, default=0, metavar="FLOAT",
help="dropout target words (0: no dropout) (default: %(default)s)")
network.add_argument('--layer_normalisation', action="store_true",
help="use layer normalisation (default: %(default)s)")
network.add_argument('--weight_normalisation', action="store_true",
help=" normalize weights (default: %(default)s)")
network.add_argument('--tie_encoder_decoder_embeddings', action="store_true", dest="tie_encoder_decoder_embeddings",
help="tie the input embeddings of the encoder and the decoder (first factor only). Source and target vocabulary size must the same")
network.add_argument('--tie_decoder_embeddings', action="store_true", dest="tie_decoder_embeddings",
help="tie the input embeddings of the decoder with the softmax output embeddings")
network.add_argument('--encoder', type=str, default='gru',
choices=['gru', 'lstm'],
help='encoder recurrent layer (default: %(default)s)')
network.add_argument('--decoder', type=str, default='gru_cond',
choices=['gru_cond', 'lstm_cond'],
help='first decoder recurrent layer (default: %(default)s)')
network.add_argument('--decoder_deep', type=str, default='gru',
choices=['gru', 'gru_cond', 'lstm'],
help='decoder recurrent layer after first one (default: %(default)s)')
network.add_argument('--concatenate_lm_decoder', action="store_true", dest="concatenate_lm_decoder",
help="concatenate LM state and decoder state (deep fusion)")
training = parser.add_argument_group('training parameters')
training.add_argument('--multi_src', action="store_true")
training.add_argument('--maxlen', type=int, default=100, metavar='INT',
help="maximum sequence length (default: %(default)s)")
training.add_argument('--optimizer', type=str, default="adam",
choices=['adam', 'adadelta', 'rmsprop', 'sgd', 'sgdmomentum'],
help="optimizer (default: %(default)s)")
training.add_argument('--batch_size', type=int, default=80, metavar='INT',
help="minibatch size (default: %(default)s)")
training.add_argument('--max_epochs', type=int, default=5000, metavar='INT',
help="maximum number of epochs (default: %(default)s)")
training.add_argument('--finish_after', type=int, default=10000000, metavar='INT',
help="maximum number of updates (minibatches) (default: %(default)s)")
training.add_argument('--decay_c', type=float, default=0, metavar='FLOAT',
help="L2 regularization penalty (default: %(default)s)")
training.add_argument('--map_decay_c', type=float, default=0, metavar='FLOAT',
help="MAP-L2 regularization penalty towards original weights (default: %(default)s)")
training.add_argument('--prior_model', type=str, metavar='PATH',
help="Prior model for MAP-L2 regularization. Unless using \"--reload\", this will also be used for initialization.")
training.add_argument('--clip_c', type=float, default=1, metavar='FLOAT',
help="gradient clipping threshold (default: %(default)s)")
training.add_argument('--lrate', type=float, default=0.0001, metavar='FLOAT',
help="learning rate (default: %(default)s)")
training.add_argument('--no_shuffle', action="store_false", dest="shuffle_each_epoch",
help="disable shuffling of training data (for each epoch)")
training.add_argument('--no_sort_by_length', action="store_false", dest="sort_by_length",
help='do not sort sentences in maxibatch by length')
training.add_argument('--maxibatch_size', type=int, default=20, metavar='INT',
help='size of maxibatch (number of minibatches that are sorted by length) (default: %(default)s)')
training.add_argument('--objective', choices=['CE', 'MRT', 'RAML'], default='CE',
help='training objective. CE: cross-entropy minimization (default); MRT: Minimum Risk Training (https://www.aclweb.org/anthology/P/P16/P16-1159.pdf) \
RAML: Reward Augmented Maximum Likelihood (https://papers.nips.cc/paper/6547-reward-augmented-maximum-likelihood-for-neural-structured-prediction.pdf)')
training.add_argument('--encoder_truncate_gradient', type=int, default=-1, metavar='INT',
help="truncate BPTT gradients in the encoder to this value. Use -1 for no truncation (default: %(default)s)")
training.add_argument('--decoder_truncate_gradient', type=int, default=-1, metavar='INT',
help="truncate BPTT gradients in the encoder to this value. Use -1 for no truncation (default: %(default)s)")
validation = parser.add_argument_group('validation parameters')
validation.add_argument('--valid_datasets', type=str, default=None, metavar='PATH', nargs=2,
help="parallel validation corpus (source and target) (default: %(default)s)")
validation.add_argument('--valid_batch_size', type=int, default=80, metavar='INT',
help="validation minibatch size (default: %(default)s)")
validation.add_argument('--validFreq', type=int, default=10000, metavar='INT',
help="validation frequency (default: %(default)s)")
validation.add_argument('--patience', type=int, default=10, metavar='INT',
help="early stopping patience (default: %(default)s)")
validation.add_argument('--anneal_restarts', type=int, default=0, metavar='INT',
help="when patience runs out, restart training INT times with annealed learning rate (default: %(default)s)")
validation.add_argument('--anneal_decay', type=float, default=0.5, metavar='FLOAT',
help="learning rate decay on each restart (default: %(default)s)")
validation.add_argument('--external_validation_script', type=str, default=None, metavar='PATH',
help="location of validation script (to run your favorite metric for validation) (default: %(default)s)")
display = parser.add_argument_group('display parameters')
display.add_argument('--dispFreq', type=int, default=1000, metavar='INT',
help="display loss after INT updates (default: %(default)s)")
display.add_argument('--sampleFreq', type=int, default=10000, metavar='INT',
help="display some samples after INT updates (default: %(default)s)")
mrt = parser.add_argument_group('minimum risk training parameters')
mrt.add_argument('--mrt_alpha', type=float, default=0.005, metavar='FLOAT',
help="MRT alpha (default: %(default)s)")
mrt.add_argument('--mrt_samples', type=int, default=100, metavar='INT',
help="samples per source sentence (default: %(default)s)")
mrt.add_argument('--mrt_samples_meanloss', type=int, default=10, metavar='INT',
help="draw n independent samples to calculate mean loss (which is subtracted from loss) (default: %(default)s)")
mrt.add_argument('--mrt_loss', type=str, default='SENTENCEBLEU n=4', metavar='STR',
help='loss used in MRT (default: %(default)s)')
mrt.add_argument('--mrt_reference', action="store_true",
help='add reference to MRT samples.')
mrt.add_argument('--mrt_ml_mix', type=float, default=0, metavar='FLOAT',
help="mix in ML objective in MRT training with this scaling factor (default: %(default)s)")
raml = parser.add_argument_group('reward augmented maximum likelihood parameters')
raml.add_argument('--raml_tau', type=float, default=0.85, metavar='FLOAT',
help="temperature for sharpness of exponentiated payoff distribution (default: %(default)s)")
raml.add_argument('--raml_samples', type=int, default=1, metavar='INT',
help="augment outputs with n samples (default: %(default)s)")
raml.add_argument('--raml_reward', type=str, default='hamming_distance', metavar='STR',
help="reward for sampling from exponentiated payoff distribution (default: %(default)s)")
domain_interpolation = parser.add_argument_group('domain interpolation parameters')
domain_interpolation.add_argument('--use_domain_interpolation', action='store_true', dest='use_domain_interpolation',
help="interpolate between an out-domain training corpus and an in-domain training corpus")
domain_interpolation.add_argument('--domain_interpolation_min', type=float, default=0.1, metavar='FLOAT',
help="minimum (initial) fraction of in-domain training data (default: %(default)s)")
domain_interpolation.add_argument('--domain_interpolation_max', type=float, default=1.0, metavar='FLOAT',
help="maximum fraction of in-domain training data (default: %(default)s)")
domain_interpolation.add_argument('--domain_interpolation_inc', type=float, default=0.1, metavar='FLOAT',
help="interpolation increment to be applied each time patience runs out, until maximum amount of interpolation is reached (default: %(default)s)")
domain_interpolation.add_argument('--domain_interpolation_indomain_datasets', type=str, metavar='PATH', nargs=2,
help="indomain parallel training corpus (source and target)")
decode = parser.add_argument_group('decoding')
decode.add_argument('--bleu', action="store_true")
decode.add_argument('--valid_ref', type=str, help="reference for bleu")
decode.add_argument('--postprocess', type=str, help="post process: (bpe)")
args = parser.parse_args()
# set up logging
level = logging.INFO
logging.basicConfig(level=level, format='%(levelname)s: %(message)s')
#print vars(args)
train(**vars(args))
# Profile peak GPU memory usage by uncommenting next line and enabling theano CUDA memory profiling (http://deeplearning.net/software/theano/tutorial/profiling.html)
# print theano.sandbox.cuda.theano_allocated()
| 49.437983 | 212 | 0.572652 |
794592f45cf556cbf0e6cdd470ab31428777ddb4 | 672 | py | Python | manage.py | dibinvm1/CraigslistClone | bad79c21ea225afbc1aaed744dc5e4c877158f02 | [
"MIT"
] | null | null | null | manage.py | dibinvm1/CraigslistClone | bad79c21ea225afbc1aaed744dc5e4c877158f02 | [
"MIT"
] | null | null | null | manage.py | dibinvm1/CraigslistClone | bad79c21ea225afbc1aaed744dc5e4c877158f02 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Craigslist_Clone.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.217391 | 80 | 0.683036 |
794593e320cda55d8ec0d2831a27560356509e21 | 8,144 | py | Python | ucscentralsdk/mometa/fabric/FabricVsan.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/mometa/fabric/FabricVsan.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/mometa/fabric/FabricVsan.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for FabricVsan ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class FabricVsanConsts():
DEFAULT_ZONING_DISABLED = "disabled"
DEFAULT_ZONING_ENABLED = "enabled"
FC_ZONE_SHARING_MODE_CLEAR_UNMANAGED_ZONE_ALL = "clear-unmanaged-zone-all"
FC_ZONE_SHARING_MODE_COALESCE = "coalesce"
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
IF_TYPE_AGGREGATION = "aggregation"
IF_TYPE_PHYSICAL = "physical"
IF_TYPE_UNKNOWN = "unknown"
IF_TYPE_VIRTUAL = "virtual"
OPER_STATE_ERROR_MISCONFIGURED = "error-misconfigured"
OPER_STATE_ERROR_RESERVED = "error-reserved"
OPER_STATE_OK = "ok"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
SWITCH_ID_DUAL = "dual"
SWITCH_ID_MGMT = "mgmt"
ZONING_STATE_DISABLED = "disabled"
ZONING_STATE_ENABLED = "enabled"
class FabricVsan(ManagedObject):
"""This is FabricVsan class."""
consts = FabricVsanConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("FabricVsan", "fabricVsan", "net-[name]", VersionMeta.Version111a, "InputOutput", 0x3ff, [], ["admin", "ext-san-config", "ext-san-policy"], [u'fabricFcEstc', u'fabricFcEstcCloud', u'fabricFcSan'], [u'fabricConsumer', u'fabricEtherRef', u'fabricExtension', u'fabricFcVsanPc', u'fabricFcVsanPortEp', u'fabricFcoeVsanPc', u'fabricFcoeVsanPortEp', u'fabricSwSubGroup'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"default_zoning": MoPropertyMeta("default_zoning", "defaultZoning", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["disabled", "enabled"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"fc_zone_sharing_mode": MoPropertyMeta("fc_zone_sharing_mode", "fcZoneSharingMode", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["clear-unmanaged-zone-all", "coalesce"], []),
"fcoe_vlan": MoPropertyMeta("fcoe_vlan", "fcoeVlan", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-4029", "4048-4091"]),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"r_global": MoPropertyMeta("r_global", "global", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["1-4093"]),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["aggregation", "physical", "unknown", "virtual"], []),
"local": MoPropertyMeta("local", "local", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{1,32}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["error-misconfigured", "error-reserved", "ok"], []),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE", "dual", "mgmt"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"zoning_state": MoPropertyMeta("zoning_state", "zoningState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["disabled", "enabled"], []),
}
prop_map = {
"childAction": "child_action",
"defaultZoning": "default_zoning",
"dn": "dn",
"epDn": "ep_dn",
"fcZoneSharingMode": "fc_zone_sharing_mode",
"fcoeVlan": "fcoe_vlan",
"fltAggr": "flt_aggr",
"global": "r_global",
"id": "id",
"ifRole": "if_role",
"ifType": "if_type",
"local": "local",
"locale": "locale",
"name": "name",
"operState": "oper_state",
"peerDn": "peer_dn",
"policyOwner": "policy_owner",
"rn": "rn",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
"zoningState": "zoning_state",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.default_zoning = None
self.ep_dn = None
self.fc_zone_sharing_mode = None
self.fcoe_vlan = None
self.flt_aggr = None
self.r_global = None
self.id = None
self.if_role = None
self.if_type = None
self.local = None
self.locale = None
self.oper_state = None
self.peer_dn = None
self.policy_owner = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
self.zoning_state = None
ManagedObject.__init__(self, "FabricVsan", parent_mo_or_dn, **kwargs)
| 61.69697 | 419 | 0.661591 |
79459413f3f343538da94968dfc7c217e2a9d107 | 1,336 | py | Python | LSTM/reader_test.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 17 | 2015-12-20T14:10:35.000Z | 2022-02-28T13:06:33.000Z | LSTM/reader_test.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 1 | 2019-02-20T12:37:56.000Z | 2019-02-20T12:37:56.000Z | LSTM/reader_test.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 8 | 2015-11-14T04:32:10.000Z | 2020-12-26T01:12:18.000Z | # encoding:utf-8
import os.path
# pylint: disable=g-bad-import-order,unused-import
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
import reader
from tensorflow.python.platform import gfile
class PtbReaderTest(tf.test.TestCase):
def setUp(self):
self._string_data = "\n".join(
[" hello there i am",
" rain as day",
" want some cheesy puffs ?"])
def testPtbRawData(self):
tmpdir = 'texts/simple-examples/data'
for suffix in "train", "valid", "test":
filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
with gfile.GFile(filename, "w") as fh:
fh.write(self._string_data)
# Smoke test
output = reader.ptb_raw_data(tmpdir)
print output
self.assertEqual(len(output), 4)
def testPtbIterator(self):
raw_data = [4, 3, 2, 1, 0, 5, 6, 1, 1, 1, 1, 0, 3, 4, 1]
batch_size = 3
num_steps = 2
output = list(reader.ptb_iterator(raw_data, batch_size, num_steps))
self.assertEqual(len(output), 2)
o1, o2 = (output[0], output[1])
self.assertEqual(o1[0].shape, (batch_size, num_steps))
self.assertEqual(o1[1].shape, (batch_size, num_steps))
self.assertEqual(o2[0].shape, (batch_size, num_steps))
self.assertEqual(o2[1].shape, (batch_size, num_steps))
if __name__ == "__main__":
tf.test.main()
| 27.833333 | 71 | 0.660928 |
794594c4a9642a7bad6f3c8d318f63993f48dace | 1,977 | py | Python | Tests/scripts/circleci_spell_checker.py | vibhuabharadwaj/content | 30d639dbea0015536a3040ec18f93e50322bded0 | [
"MIT"
] | 7 | 2020-09-24T22:38:01.000Z | 2021-07-14T15:58:35.000Z | Tests/scripts/circleci_spell_checker.py | vibhuabharadwaj/content | 30d639dbea0015536a3040ec18f93e50322bded0 | [
"MIT"
] | 9 | 2021-02-08T20:51:18.000Z | 2021-09-23T23:27:38.000Z | Tests/scripts/circleci_spell_checker.py | vibhuabharadwaj/content | 30d639dbea0015536a3040ec18f93e50322bded0 | [
"MIT"
] | 2 | 2020-12-08T17:03:33.000Z | 2021-07-13T18:32:06.000Z | import re
import sys
from Tests.scripts.spell_checker import spell_checker
from Tests.test_utils import run_command, checked_type
from Tests.scripts.constants import SPELLCHECK_FILE_TYPES, DESCRIPTION_REGEX
def get_modified_files(files_string):
"""Get lists of the modified files in your branch according to the files string.
Args:
files_string (string): String that was calculated by git using `git diff` command.
Returns:
(yml_files, md_files). Tuple of sets.
"""
all_files = files_string.split('\n')
yml_files = set([])
md_files = set([])
for f in all_files:
file_data = f.split()
if not file_data:
continue
file_status = file_data[0]
file_path = file_data[1]
if file_path.endswith('.js') or file_path.endswith('.py'):
continue
if file_status.lower().startswith('r'):
file_path = file_data[2]
if file_status.lower() == 'm' or file_status.lower() == 'a' or file_status.lower().startswith('r'):
if checked_type(file_path, SPELLCHECK_FILE_TYPES):
yml_files.add(file_path)
elif re.match(DESCRIPTION_REGEX, file_path, re.IGNORECASE):
md_files.add(file_path)
return yml_files, md_files
def check_changed_files():
branch_name = sys.argv[1]
if branch_name != "master":
all_changed_files_string = run_command("git diff --name-status origin/master...{}".format(branch_name))
yml_files, md_files = get_modified_files(all_changed_files_string)
for yml_file in yml_files:
print("Checking the file - {}".format(yml_file))
spell_checker(yml_file)
for md_file in md_files:
print("Checking the file - {}".format(md_file))
spell_checker(md_file, is_md=True)
else:
print("Not checking for spelling errors in master branch")
if __name__ == "__main__":
check_changed_files()
| 31.380952 | 111 | 0.654021 |
794594de1c794d94d360a00f0a51e76a36641d91 | 14,792 | py | Python | bindings/python/cntk/learners/tests/learner_test.py | rickyHong/MS-CNTK | 2bcdc9dff6dc6393813f6043d80e167fb31aed72 | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/learners/tests/learner_test.py | rickyHong/MS-CNTK | 2bcdc9dff6dc6393813f6043d80e167fb31aed72 | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/learners/tests/learner_test.py | rickyHong/MS-CNTK | 2bcdc9dff6dc6393813f6043d80e167fb31aed72 | [
"RSA-MD"
] | null | null | null | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import division, print_function
import numpy as np
import cntk as C
from cntk import parameter
import pytest
import sys
from cntk.logging import ProgressPrinter
from cntk.learners import sgd, learning_rate_schedule, UnitType, universal
from cntk.layers import Dense, Sequential
LR_SCHEDULE_PARAMS = [
((0.2, UnitType.sample), [0.2]),
((0.2, UnitType.sample), [0.2, 0.2, 0.2, 0.2]),
(([0.2,0.4], UnitType.sample, 5), [0.2]*5+[0.4]*20),
(([(3,0.2),(2,0.4),(1,0.8)], UnitType.sample, 5), [0.2]*15+[0.4]*10+[0.8]*20),
]
MOMENTUM_SCHEDULE_PARAMS = [
((0.2,), [0.2]),
((0.2,), [0.2, 0.2, 0.2, 0.2]),
(([0.2,0.4], 5), [0.2]*5+[0.4]*20),
(([(3,0.2),(2,0.4),(1,0.8)], 5), [0.2]*15+[0.4]*10+[0.8]*20),
]
@pytest.mark.parametrize("params, expectation", LR_SCHEDULE_PARAMS)
def test_learning_rate_schedule(params, expectation):
l = learning_rate_schedule(*params)
assert [l[i] for i in range(len(expectation))] == expectation
def sweep_based_schedule_fails():
with pytest.raises(Exception):
learning_rate_schedule([1], unit=UnitType.sample, epoch_size=0)
def test_momentum_schedule():
m = 2500
ms = C.momentum_as_time_constant_schedule([m])
assert ms[0] == np.exp(-1.0 / np.asarray(m))
ms = C.momentum_as_time_constant_schedule(m)
assert ms[0] == np.exp(-1.0 / np.asarray(m))
mlist = [980, 520]
msl = C.momentum_as_time_constant_schedule(mlist)
expected = np.exp(-1.0 / np.asarray(mlist))
assert all(mi == ei for mi,ei in zip(msl,expected))
@pytest.mark.parametrize("params, expectation", MOMENTUM_SCHEDULE_PARAMS)
def test_momentum_schedule_per_sample(params, expectation):
l = C.momentum_schedule(*params)
assert [l[i] for i in range(len(expectation))] == expectation
def test_learner_init():
i = C.input_variable(shape=(1,), needs_gradient=True, name='a')
w = parameter(shape=(1,))
res = i * w
learner = sgd(res.parameters, lr=learning_rate_schedule(0.1, UnitType.sample))
assert learner.learning_rate() == 0.1
learner.reset_learning_rate(learning_rate_schedule([1,2,3], UnitType.minibatch));
assert learner.learning_rate() == 1.0
learner_parameter = learner.parameters
from cntk.variables import Parameter
param = learner_parameter[0]
assert isinstance(param, Parameter)
unit_gain_value = C.default_unit_gain_value()
assert unit_gain_value
momentum_time_constant = C.momentum_as_time_constant_schedule(1100)
lr_per_sample = learning_rate_schedule(0.1, UnitType.sample)
C.momentum_sgd(res.parameters, lr_per_sample, momentum_time_constant)
C.momentum_sgd(res.parameters, lr_per_sample, momentum_time_constant, unit_gain_value)
C.momentum_sgd(res.parameters, lr_per_sample, momentum_time_constant, unit_gain=unit_gain_value)
C.set_default_unit_gain_value(False)
unit_gain_value = C.default_unit_gain_value()
assert not unit_gain_value
lr_per_sample = learning_rate_schedule([0.1, 0.2], UnitType.sample)
C.nesterov(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant)
C.nesterov(res.parameters, lr_per_sample, momentum_time_constant, unit_gain_value)
C.nesterov(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant, unit_gain=unit_gain_value)
lr_per_sample = learning_rate_schedule([0.1]*3 +[0.2]*2 +[0.3], UnitType.sample)
C.adagrad(res.parameters, lr=lr_per_sample, need_ave_multiplier=True)
C.set_default_unit_gain_value(True)
unit_gain_value = C.default_unit_gain_value()
assert unit_gain_value
lr_per_sample = learning_rate_schedule([(3,0.1), (2, 0.2), (1, 0.3)], UnitType.sample)
C.fsadagrad(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant)
C.fsadagrad(res.parameters, lr_per_sample, momentum_time_constant, unit_gain_value)
C.fsadagrad(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant, unit_gain=unit_gain_value)
gamma, inc, dec, max, min = [0.1]*5
lr_per_sample = learning_rate_schedule([0.1, 0.2], UnitType.sample, 100)
C.rmsprop(res.parameters, lr_per_sample, gamma, inc, dec, max, min, True)
C.set_default_use_mean_gradient_value(False)
use_mean_gradient_value = C.default_use_mean_gradient_value()
assert not use_mean_gradient_value
C.adadelta(res.parameters, lr_per_sample)
C.set_default_use_mean_gradient_value(True)
use_mean_gradient_value = C.default_use_mean_gradient_value()
assert use_mean_gradient_value
C.adadelta(res.parameters, lr_per_sample)
def test_learner_update():
i = C.input_variable(shape=(1,), needs_gradient=True, name='a')
w_init = 1
w = parameter(shape=(1,), init=w_init)
res = i * w
learner = sgd(res.parameters, lr=learning_rate_schedule([0.1]*50 + [0.2]*50, UnitType.sample, 1))
assert learner.learning_rate() == 0.1
x = learner.update({w: np.asarray([[2.]], dtype=np.float32)}, 100)
assert learner.learning_rate() == 0.2
assert w.value < w_init
learner.reset_learning_rate(learning_rate_schedule([0.3]*50 + [0.4]*50, UnitType.sample, 1));
assert learner.learning_rate() == 0.3
x = learner.update({w: np.asarray([[2.]], dtype=np.float32)}, 100)
assert learner.learning_rate() == 0.4
def test_noise_injection_with_checkpointing():
from cntk import initializer
shape = (100,100)
w1 = parameter(shape=shape, init=initializer.glorot_uniform(seed=123))
w2 = parameter(shape=shape, init=initializer.glorot_uniform(seed=123))
w3 = parameter(shape=shape, init=initializer.glorot_uniform(seed=123))
lr=learning_rate_schedule(0.5, UnitType.sample)
m=C.momentum_schedule(0.99)
learner1 = C.momentum_sgd([w1], lr, m, gaussian_noise_injection_std_dev=0.5)
learner2 = C.momentum_sgd([w2], lr, m, gaussian_noise_injection_std_dev=0.5)
learner3 = C.momentum_sgd([w3], lr, m, gaussian_noise_injection_std_dev=0.5)
assert np.allclose(w1.value, w2.value) and np.allclose(w1.value, w3.value)
for i in range(10):
checkpoint = learner1.create_checkpoint()
v = np.float32(np.random.rand(100,100))
learner1.update({w1: v}, 1)
learner2.update({w2: v}, 1)
assert not np.allclose(w1.value, w2.value)
learner3.restore_from_checkpoint(checkpoint)
learner3.update({w3: v}, 1)
assert np.allclose(w1.value, w3.value)
class TestProgressWriter(C.cntk_py.ProgressWriter):
def __init__(self):
super(TestProgressWriter, self).__init__(1, 0, 1, 0, sys.maxsize, 0)
self.log_output = []
self.__disown__()
def write(self, key, value):
self.log_output.append(float(value))
def test_learner_logging():
from cntk import Trainer
from cntk.logging import ProgressPrinter
from cntk import cross_entropy_with_softmax, classification_error
features = C.input_variable(shape=(1,), needs_gradient=True, name='a')
w_init = 1
w = parameter(shape=(1,), init=w_init)
z = features * w
labels = C.input_variable(shape=(1,), name='b')
ce = cross_entropy_with_softmax(z, labels)
errs = classification_error(z, labels)
writer = TestProgressWriter();
lr_values = [0.3, 0.2, 0.1, 0]
m_values = [0.6, 0.7, 0.8]
learner = C.momentum_sgd(z.parameters,
learning_rate_schedule(lr_values, UnitType.sample, 1),
C.momentum_schedule(m_values, 1))
trainer = Trainer(z, (ce, errs), [learner], writer)
for i in range(10):
trainer.train_minibatch({features: [[2.]], labels: [[1.]]})
assert len(writer.log_output) == len(lr_values + m_values)
values = [j for i in zip(lr_values,m_values) for j in i] + [0]
for i in range(len(values)):
assert (values[i] == writer.log_output[i])
def test_training_parameter_schedule():
C.training_parameter_schedule(0.01, unit='minibatch')
C.training_parameter_schedule(0.01, unit='sample')
with pytest.raises(ValueError):
C.training_parameter_schedule(0.01, unit='not_supported')
with pytest.raises(ValueError):
C.training_parameter_schedule(0.01, unit=5)
def test_sweep_based_schedule(tmpdir, device_id):
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs
from cntk import cross_entropy_with_softmax, classification_error, plus, reduce_sum, sequence
from cntk import Trainer
input_dim = 69
ctf_data = '''\
0 |S0 3:1 |S1 3:1 |# <s>
0 |S0 4:1 |# A |S1 32:1 |# ~AH
0 |S0 5:1 |# B |S1 36:1 |# ~B
0 |S0 4:1 |# A |S1 31:1 |# ~AE
0 |S0 7:1 |# D |S1 38:1 |# ~D
0 |S0 12:1 |# I |S1 47:1 |# ~IY
0 |S0 1:1 |# </s> |S1 1:1 |# </s>
2 |S0 60:1 |# <s> |S1 3:1 |# <s>
2 |S0 61:1 |# A |S1 32:1 |# ~AH
'''
ctf_file = str(tmpdir/'2seqtest.txt')
with open(ctf_file, 'w') as f:
f.write(ctf_data)
mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=input_dim, is_sparse=True)
)), randomize=False)
in1 = sequence.input_variable(shape=(input_dim,))
labels = sequence.input_variable(shape=(input_dim,))
p = parameter(shape=(input_dim,), init=10)
z = plus(in1, reduce_sum(p), name='z')
ce = cross_entropy_with_softmax(z, labels)
errs = classification_error(z, labels)
lr_per_sample = learning_rate_schedule([0.3, 0.2, 0.1, 0.0], UnitType.sample)
learner = sgd(z.parameters, lr_per_sample)
trainer = Trainer(z, (ce, errs), [learner])
input_map = {
in1 : mbs.streams.features,
labels : mbs.streams.labels
}
# fetch minibatch (first sequence)
data = mbs.next_minibatch(1, input_map=input_map)
trainer.train_minibatch(data)
assert learner.learning_rate() == 0.3
# fetch minibatch (second sequence, sweep ends at this point)
data = mbs.next_minibatch(1, input_map=input_map)
trainer.train_minibatch(data)
assert learner.learning_rate() == 0.2
# fetch minibatch (both sequences -- entire sweep in one go)
data = mbs.next_minibatch(9, input_map=input_map)
trainer.train_minibatch(data)
assert learner.learning_rate() == 0.1
# fetch minibatch (multiple sweeps)
data = mbs.next_minibatch(30, input_map=input_map)
trainer.train_minibatch(data, outputs=[z.output])
assert learner.learning_rate() == 0.0
def generate_random_data(sample_size, feature_dim, num_classes):
# Create synthetic data using NumPy.
Y = np.random.randint(size=(sample_size, 1), low=0, high=num_classes)
# Make sure that the data is separable
X = (np.random.randn(sample_size, feature_dim) + 3) * (Y + 1)
X = X.astype(np.float32)
# converting class 0 into the vector "1 0 0",
# class 1 into vector "0 1 0", ...
class_ind = [Y == class_number for class_number in range(num_classes)]
Y = np.asarray(np.hstack(class_ind), dtype=np.float32)
return X, Y
def test_learner_empy_parameters_list():
lr_per_sample = learning_rate_schedule(0.1, UnitType.sample)
with pytest.raises(ValueError):
learner = C.sgd([], lr_per_sample)
def ffnet(learner):
inputs = 3
outputs = 3
layers = 2
hidden_dimension = 3
# input variables denoting the features and label data
features = C.input_variable((inputs), np.float32)
label = C.input_variable((outputs), np.float32)
# Instantiate the feedforward classification model
my_model = Sequential ([
Dense(hidden_dimension, activation=C.sigmoid, init=C.glorot_uniform(seed=98052)),
Dense(outputs, init=C.glorot_uniform(seed=98052))])
z = my_model(features)
ce = C.cross_entropy_with_softmax(z, label)
pe = C.classification_error(z, label)
# Instantiate the trainer object to drive the model training
progress_printer = ProgressPrinter(0)
trainer = C.Trainer(z, (ce, pe), [learner(z.parameters)], [progress_printer])
# Get minibatches of training data and perform model training
minibatch_size = 25
num_minibatches_to_train = 100
aggregate_loss = 0.0
for i in range(num_minibatches_to_train):
train_features, labels = generate_random_data(minibatch_size, inputs, outputs)
# Specify the mapping of input variables in the model to actual minibatch data to be trained with
trainer.train_minibatch({features : train_features, label : labels})
sample_count = trainer.previous_minibatch_sample_count
aggregate_loss += trainer.previous_minibatch_loss_average * sample_count
last_avg_error = aggregate_loss / trainer.total_number_of_samples_seen
test_features, test_labels = generate_random_data(minibatch_size, inputs, outputs)
avg_error = trainer.test_minibatch({features : test_features, label : test_labels})
print(' error rate on an unseen minibatch: {}'.format(avg_error))
return last_avg_error, avg_error
def test_sgd_with_noise():
# Runs a network where the number of parameters is odd
# in some layers. This tests that cuRand library will not
# complain about generating an odd number of random values
np.random.seed(98052)
learner = lambda params: sgd(params, lr=learning_rate_schedule(0.125, UnitType.minibatch), gaussian_noise_injection_std_dev=0.01)
ffnet(learner)
# We just verify that we did not crash
assert(True)
def test_universal():
np.random.seed(98052)
builtin_sgd = lambda params: sgd(params, lr=learning_rate_schedule(0.125, UnitType.minibatch))
builtin_last_avg_error, builtin_avg_error = ffnet(builtin_sgd)
np.random.seed(98052)
my_sgd = lambda ps, gs: C.combine([C.assign(p, p - 0.125/25 * g) for p, g in zip(ps, gs)])
universal_sgd = lambda params: universal(my_sgd, params)
my_last_avg_error, my_avg_error = ffnet(universal_sgd)
assert np.allclose(my_last_avg_error, builtin_last_avg_error)
assert np.allclose(my_avg_error, builtin_avg_error)
def test_0d_1d_parameter_set_value():
x = C.input_variable(2)
w_0d = C.parameter(())
op = x + w_0d
w_0d_grad = op.grad({x : np.asarray([1, 2], dtype=np.float32)}, wrt=[w_0d], as_numpy=False)
w_0d.value = w_0d_grad.data
assert w_0d.value == 2.
w_1d = C.parameter(shape=2)
op = x + w_1d
w_1d_grad = op.grad({x : np.asarray([1, 2], dtype=np.float32)}, wrt=[w_1d], as_numpy=False)
w_1d.value = w_1d_grad.data
assert np.array_equal(w_1d.value, [1., 1.])
| 38.824147 | 133 | 0.68848 |
7945963033ff69f472a5d418dc1b2814e3543fcf | 1,766 | py | Python | analyses/evolution_stats.py | marisgg/evolutionary-potts | 7216bbac497697eba22cb4e877d70a73e22d8ed1 | [
"MIT"
] | null | null | null | analyses/evolution_stats.py | marisgg/evolutionary-potts | 7216bbac497697eba22cb4e877d70a73e22d8ed1 | [
"MIT"
] | null | null | null | analyses/evolution_stats.py | marisgg/evolutionary-potts | 7216bbac497697eba22cb4e877d70a73e22d8ed1 | [
"MIT"
] | null | null | null | import numpy as np
import os
from matplotlib import pyplot as plt
result_dirs = {"none":[], "medium":[], "high":[]}
for filename in os.listdir("results/"):
filename = "results/"+filename
if os.path.isdir(filename):
print(filename)
if filename.startswith("results"):
runtype = filename.split("-")[1]
result_dirs[runtype].append(filename)
for runtype in result_dirs.keys():
if(len(result_dirs[runtype]) == 0):
continue
fig, axs = plt.subplots(1,len(result_dirs[runtype]), sharey="all")
labels = []
for i, dirname in enumerate(result_dirs[runtype]):
with open(f"{dirname}/evolutionary-potts/out.txt", "r") as f:
desired_lines = []
fitnessdata = []
for line in f:
if line.startswith("Genfitness"):
words = [w for w in line.rstrip().split(" ") if w != '']
if words[0] == 'Genfitness':
words = words[1:]
words = [word.rstrip(':') for word in words]
words = [word for word in words if word != 'dev']
keys = words[::2]
values = list(map(float, words[1::2]))
linedict = dict(zip(keys, values))
fitnessdata.append(linedict)
lcl_labels = []
for key in fitnessdata[0].keys():
lcl_labels.append(key)
axs[i].plot(range(len(fitnessdata)), [gen[key] for gen in fitnessdata], label=key)
labels = lcl_labels
plt.gca().set_ylim([-500,9000])
fig.legend(labels)
fig.suptitle(runtype)
plt.tight_layout()
plt.savefig(f"results/{runtype}_evohistory.png")
plt.show()
| 37.574468 | 98 | 0.537939 |
79459685f2e1c1c366e2ffc08bbb17ad62a66de8 | 1,879 | py | Python | server.py | tooshar/flask_image_upload | 18ebe6973e2cda8bb63504fd470cf38e303e3692 | [
"MIT"
] | 1 | 2022-01-23T13:30:33.000Z | 2022-01-23T13:30:33.000Z | server.py | tooshar/flask_image_upload | 18ebe6973e2cda8bb63504fd470cf38e303e3692 | [
"MIT"
] | null | null | null | server.py | tooshar/flask_image_upload | 18ebe6973e2cda8bb63504fd470cf38e303e3692 | [
"MIT"
] | 1 | 2019-04-23T21:41:15.000Z | 2019-04-23T21:41:15.000Z | import os
from flask import Flask, flash, request, redirect, url_for,make_response, render_template, send_from_directory, send_file
from werkzeug.utils import secure_filename
from tempfile import NamedTemporaryFile
from shutil import copyfileobj
from os import remove
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.secret_key = 'some_secret'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return 'No file part'
# return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return 'No file selected'
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(filename)
return redirect(url_for('uploaded_image',
filename=filename))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload New Image</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file accept=".jpeg,.jpg,.png">
<input type=submit value=Submit>
</form>
'''
@app.route('/img/<filename>')
def uploaded_image(filename):
tempFileObj = NamedTemporaryFile(mode='w+b',suffix='jpg')
pilImage = open(filename,'rb')
copyfileobj(pilImage,tempFileObj)
pilImage.close()
remove(filename)
tempFileObj.seek(0,0)
response = send_file(tempFileObj, mimetype='image/jpeg')
return response | 34.163636 | 121 | 0.63917 |
79459721584f56f182da2abf6f64a638f6cc786f | 1,082 | py | Python | tests/test_parser/test_method_block.py | vbondarevsky/ones_analyzer | ab8bff875192db238ed17c20d61c9fa5b55c3fa8 | [
"MIT"
] | 12 | 2017-11-23T07:04:13.000Z | 2022-03-01T21:06:56.000Z | tests/test_parser/test_method_block.py | vbondarevsky/analyzer_test | ab8bff875192db238ed17c20d61c9fa5b55c3fa8 | [
"MIT"
] | 2 | 2017-06-25T21:32:32.000Z | 2017-11-19T19:05:40.000Z | tests/test_parser/test_method_block.py | vbondarevsky/analyzer_test | ab8bff875192db238ed17c20d61c9fa5b55c3fa8 | [
"MIT"
] | 5 | 2017-11-21T08:24:56.000Z | 2021-08-17T23:21:18.000Z | from analyzer.syntax_kind import SyntaxKind
from tests.utils import TestCaseParser
class TestParserMethodBlock(TestCaseParser):
def test_one_procedure(self):
code = \
"""Процедура МояПроцедура()
КонецПроцедуры"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.ProcedureBlock])
self.assertNode(self.syntax_tree.methods[0].end, SyntaxKind.EndProcedureKeyword)
def test_one_function(self):
code = \
"""Функция МояПроцедура()
КонецФункции"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.FunctionBlock])
self.assertNode(self.syntax_tree.methods[0].end, SyntaxKind.EndFunctionKeyword)
def test_two_methods(self):
code = \
"""Процедура МояПроцедура()
КонецПроцедуры
Функция МояФункция()
КонецФункции"""
self.parse_source(code)
self.assertNode(self.syntax_tree.methods, [SyntaxKind.ProcedureBlock, SyntaxKind.FunctionBlock])
| 34.903226 | 104 | 0.67098 |
794597b234f0920530bd02e95c19845f2a346bfe | 665 | py | Python | adv/fjorm.py | 6tennis/dl | 69eb7e71da9fabe9e7ec40c461b525b4f967f345 | [
"Apache-2.0"
] | null | null | null | adv/fjorm.py | 6tennis/dl | 69eb7e71da9fabe9e7ec40c461b525b4f967f345 | [
"Apache-2.0"
] | null | null | null | adv/fjorm.py | 6tennis/dl | 69eb7e71da9fabe9e7ec40c461b525b4f967f345 | [
"Apache-2.0"
] | null | null | null | from core.advbase import *
from slot.a import *
def module():
return Fjorm
class Fjorm(Adv):
comment = 'last bravery once at start'
a3 = [('prep',1.00), ('scharge_all', 0.05)]
conf = {}
conf['slots.a'] = Resounding_Rendition()+His_Clever_Brother()
conf['acl'] = """
`s1
`s3
`s2, fsc or s=3
`fs, x=5
"""
coab = ['Blade', 'Summer_Estelle', 'Xander']
def prerun(self):
Teambuff('last_bravery',0.3,15).on()
def s1_proc(self, e):
self.afflics.frostbite('s1',120,0.41)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | 22.166667 | 65 | 0.581955 |
794597df104e6aa737b0d0e853dede850dc603ca | 641 | py | Python | scripts/systems/YearnSystem.py | mandalorian-101/badger-system | 2b0ee9bd77a2cc6f875b9b984ae4dfe713bbc55c | [
"MIT"
] | null | null | null | scripts/systems/YearnSystem.py | mandalorian-101/badger-system | 2b0ee9bd77a2cc6f875b9b984ae4dfe713bbc55c | [
"MIT"
] | null | null | null | scripts/systems/YearnSystem.py | mandalorian-101/badger-system | 2b0ee9bd77a2cc6f875b9b984ae4dfe713bbc55c | [
"MIT"
] | null | null | null | from brownie import interface
class YearnSystem:
def __init__(self, chain_registry):
self.chain_registry = chain_registry
def registry(self):
return interface.RegistryAPI(self.chain_registry.yearn.registry)
def experimental_vault_by_key(self, key):
if not key in self.chain_registry.yearn.experimental_vaults:
raise Exception("Token with key {} not found in registry".format(key))
address = self.chain_registry.yearn.experimental_vaults[key]
return self.vault_by_address(address)
def vault_by_address(self, address):
return interface.VaultAPI(address) | 33.736842 | 82 | 0.717629 |
794597ff17d416bb232d1a378c064d0ae9860836 | 71,139 | py | Python | alphafold/model/modules.py | thenotcompany/alphafold | 1d43aaff941c84dc56311076b58795797e49107b | [
"Apache-2.0"
] | 45 | 2022-01-12T04:39:36.000Z | 2022-03-25T12:33:36.000Z | alphafold/model/modules.py | thenotcompany/alphafold | 1d43aaff941c84dc56311076b58795797e49107b | [
"Apache-2.0"
] | 6 | 2022-01-15T16:48:39.000Z | 2022-03-15T16:20:34.000Z | alphafold/model/modules.py | thenotcompany/alphafold | 1d43aaff941c84dc56311076b58795797e49107b | [
"Apache-2.0"
] | 10 | 2022-01-12T11:28:03.000Z | 2022-03-30T11:36:41.000Z | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules and code used in the core part of AlphaFold.
The structure generation code is in 'folding.py'.
"""
import functools
from alphafold.common import residue_constants
from alphafold.model import all_atom
from alphafold.model import common_modules
from alphafold.model import folding
from alphafold.model import layer_stack
from alphafold.model import lddt
from alphafold.model import mapping
from alphafold.model import prng
from alphafold.model import quat_affine
from alphafold.model import utils
import haiku as hk
import jax
import jax.numpy as jnp
def softmax_cross_entropy(logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels."""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return jnp.asarray(loss)
def sigmoid_cross_entropy(logits, labels):
"""Computes sigmoid cross entropy given logits and multiple class labels."""
log_p = jax.nn.log_sigmoid(logits)
# log(1 - sigmoid(x)) = log_sigmoid(-x), the latter is more numerically stable
log_not_p = jax.nn.log_sigmoid(-logits)
loss = -labels * log_p - (1. - labels) * log_not_p
return jnp.asarray(loss)
def apply_dropout(*, tensor, safe_key, rate, is_training, broadcast_dim=None):
"""Applies dropout to a tensor."""
if is_training and rate != 0.0:
shape = list(tensor.shape)
if broadcast_dim is not None:
shape[broadcast_dim] = 1
keep_rate = 1.0 - rate
keep = jax.random.bernoulli(safe_key.get(), keep_rate, shape=shape)
return keep * tensor / keep_rate
else:
return tensor
def dropout_wrapper(module,
input_act,
mask,
safe_key,
global_config,
output_act=None,
is_training=True,
**kwargs):
"""Applies module + dropout + residual update."""
if output_act is None:
output_act = input_act
gc = global_config
residual = module(input_act, mask, is_training=is_training, **kwargs)
dropout_rate = 0.0 if gc.deterministic else module.config.dropout_rate
if module.config.shared_dropout:
if module.config.orientation == 'per_row':
broadcast_dim = 0
else:
broadcast_dim = 1
else:
broadcast_dim = None
residual = apply_dropout(tensor=residual,
safe_key=safe_key,
rate=dropout_rate,
is_training=is_training,
broadcast_dim=broadcast_dim)
new_act = output_act + residual
return new_act
def create_extra_msa_feature(batch):
"""Expand extra_msa into 1hot and concat with other extra msa features.
We do this as late as possible as the one_hot extra msa can be very large.
Arguments:
batch: a dictionary with the following keys:
* 'extra_msa': [N_extra_seq, N_res] MSA that wasn't selected as a cluster
centre. Note, that this is not one-hot encoded.
* 'extra_has_deletion': [N_extra_seq, N_res] Whether there is a deletion to
the left of each position in the extra MSA.
* 'extra_deletion_value': [N_extra_seq, N_res] The number of deletions to
the left of each position in the extra MSA.
Returns:
Concatenated tensor of extra MSA features.
"""
# 23 = 20 amino acids + 'X' for unknown + gap + bert mask
msa_1hot = jax.nn.one_hot(batch['extra_msa'], 23)
msa_feat = [msa_1hot,
jnp.expand_dims(batch['extra_has_deletion'], axis=-1),
jnp.expand_dims(batch['extra_deletion_value'], axis=-1)]
return jnp.concatenate(msa_feat, axis=-1)
class AlphaFoldIteration(hk.Module):
"""A single recycling iteration of AlphaFold architecture.
Computes ensembled (averaged) representations from the provided features.
These representations are then passed to the various heads
that have been requested by the configuration file. Each head also returns a
loss which is combined as a weighted sum to produce the total loss.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 3-22
"""
def __init__(self, config, global_config, name='alphafold_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
ensembled_batch,
non_ensembled_batch,
is_training,
compute_loss=False,
ensemble_representations=False,
return_representations=False):
num_ensemble = jnp.asarray(ensembled_batch['seq_length'].shape[0])
if not ensemble_representations:
assert ensembled_batch['seq_length'].shape[0] == 1
def slice_batch(i):
b = {k: v[i] for k, v in ensembled_batch.items()}
b.update(non_ensembled_batch)
return b
# Compute representations for each batch element and average.
evoformer_module = EmbeddingsAndEvoformer(
self.config.embeddings_and_evoformer, self.global_config)
batch0 = slice_batch(0)
representations = evoformer_module(batch0, is_training)
# MSA representations are not ensembled so
# we don't pass tensor into the loop.
msa_representation = representations['msa']
del representations['msa']
# Average the representations (except MSA) over the batch dimension.
if ensemble_representations:
def body(x):
"""Add one element to the representations ensemble."""
i, current_representations = x
feats = slice_batch(i)
representations_update = evoformer_module(
feats, is_training)
new_representations = {}
for k in current_representations:
new_representations[k] = (
current_representations[k] + representations_update[k])
return i+1, new_representations
if hk.running_init():
# When initializing the Haiku module, run one iteration of the
# while_loop to initialize the Haiku modules used in `body`.
_, representations = body((1, representations))
else:
_, representations = hk.while_loop(
lambda x: x[0] < num_ensemble,
body,
(1, representations))
for k in representations:
if k != 'msa':
representations[k] /= num_ensemble.astype(representations[k].dtype)
representations['msa'] = msa_representation
batch = batch0 # We are not ensembled from here on.
heads = {}
for head_name, head_config in sorted(self.config.heads.items()):
if not head_config.weight:
continue # Do not instantiate zero-weight heads.
head_factory = {
'masked_msa': MaskedMsaHead,
'distogram': DistogramHead,
'structure_module': functools.partial(
folding.StructureModule, compute_loss=compute_loss),
'predicted_lddt': PredictedLDDTHead,
'predicted_aligned_error': PredictedAlignedErrorHead,
'experimentally_resolved': ExperimentallyResolvedHead,
}[head_name]
heads[head_name] = (head_config,
head_factory(head_config, self.global_config))
total_loss = 0.
ret = {}
ret['representations'] = representations
def loss(module, head_config, ret, name, filter_ret=True):
if filter_ret:
value = ret[name]
else:
value = ret
loss_output = module.loss(value, batch)
ret[name].update(loss_output)
loss = head_config.weight * ret[name]['loss']
return loss
for name, (head_config, module) in heads.items():
# Skip PredictedLDDTHead and PredictedAlignedErrorHead until
# StructureModule is executed.
if name in ('predicted_lddt', 'predicted_aligned_error'):
continue
else:
ret[name] = module(representations, batch, is_training)
if 'representations' in ret[name]:
# Extra representations from the head. Used by the structure module
# to provide activations for the PredictedLDDTHead.
representations.update(ret[name].pop('representations'))
if compute_loss:
total_loss += loss(module, head_config, ret, name)
if self.config.heads.get('predicted_lddt.weight', 0.0):
# Add PredictedLDDTHead after StructureModule executes.
name = 'predicted_lddt'
# Feed all previous results to give access to structure_module result.
head_config, module = heads[name]
ret[name] = module(representations, batch, is_training)
if compute_loss:
total_loss += loss(module, head_config, ret, name, filter_ret=False)
if ('predicted_aligned_error' in self.config.heads
and self.config.heads.get('predicted_aligned_error.weight', 0.0)):
# Add PredictedAlignedErrorHead after StructureModule executes.
name = 'predicted_aligned_error'
# Feed all previous results to give access to structure_module result.
head_config, module = heads[name]
ret[name] = module(representations, batch, is_training)
if compute_loss:
total_loss += loss(module, head_config, ret, name, filter_ret=False)
if compute_loss:
return ret, total_loss
else:
return ret
class AlphaFold(hk.Module):
"""AlphaFold model with recycling.
Jumper et al. (2021) Suppl. Alg. 2 "Inference"
"""
def __init__(self, config, name='alphafold'):
super().__init__(name=name)
self.config = config
self.global_config = config.global_config
def __call__(
self,
batch,
is_training,
compute_loss=False,
ensemble_representations=False,
return_representations=False):
"""Run the AlphaFold model.
Arguments:
batch: Dictionary with inputs to the AlphaFold model.
is_training: Whether the system is in training or inference mode.
compute_loss: Whether to compute losses (requires extra features
to be present in the batch and knowing the true structure).
ensemble_representations: Whether to use ensembling of representations.
return_representations: Whether to also return the intermediate
representations.
Returns:
When compute_loss is True:
a tuple of loss and output of AlphaFoldIteration.
When compute_loss is False:
just output of AlphaFoldIteration.
The output of AlphaFoldIteration is a nested dictionary containing
predictions from the various heads.
"""
impl = AlphaFoldIteration(self.config, self.global_config)
batch_size, num_residues = batch['aatype'].shape
def get_prev(ret):
new_prev = {
'prev_pos':
ret['structure_module']['final_atom_positions'],
'prev_msa_first_row': ret['representations']['msa_first_row'],
'prev_pair': ret['representations']['pair'],
}
return jax.tree_map(jax.lax.stop_gradient, new_prev)
def do_call(prev,
recycle_idx,
compute_loss=compute_loss):
if self.config.resample_msa_in_recycling:
num_ensemble = batch_size // (self.config.num_recycle + 1)
def slice_recycle_idx(x):
start = recycle_idx * num_ensemble
size = num_ensemble
return jax.lax.dynamic_slice_in_dim(x, start, size, axis=0)
ensembled_batch = jax.tree_map(slice_recycle_idx, batch)
else:
num_ensemble = batch_size
ensembled_batch = batch
non_ensembled_batch = jax.tree_map(lambda x: x, prev)
return impl(
ensembled_batch=ensembled_batch,
non_ensembled_batch=non_ensembled_batch,
is_training=is_training,
compute_loss=compute_loss,
ensemble_representations=ensemble_representations)
if self.config.num_recycle:
emb_config = self.config.embeddings_and_evoformer
prev = {
'prev_pos': jnp.zeros(
[num_residues, residue_constants.atom_type_num, 3]),
'prev_msa_first_row': jnp.zeros(
[num_residues, emb_config.msa_channel]),
'prev_pair': jnp.zeros(
[num_residues, num_residues, emb_config.pair_channel]),
}
if 'num_iter_recycling' in batch:
# Training time: num_iter_recycling is in batch.
# The value for each ensemble batch is the same, so arbitrarily taking
# 0-th.
num_iter = batch['num_iter_recycling'][0]
# Add insurance that we will not run more
# recyclings than the model is configured to run.
num_iter = jnp.minimum(num_iter, self.config.num_recycle)
else:
# Eval mode or tests: use the maximum number of iterations.
num_iter = self.config.num_recycle
body = lambda x: (x[0] + 1, # pylint: disable=g-long-lambda
get_prev(do_call(x[1], recycle_idx=x[0],
compute_loss=False)))
if hk.running_init():
# When initializing the Haiku module, run one iteration of the
# while_loop to initialize the Haiku modules used in `body`.
_, prev = body((0, prev))
else:
_, prev = hk.while_loop(
lambda x: x[0] < num_iter,
body,
(0, prev))
else:
prev = {}
num_iter = 0
ret = do_call(prev=prev, recycle_idx=num_iter)
if compute_loss:
ret = ret[0], [ret[1]]
if not return_representations:
del (ret[0] if compute_loss else ret)['representations'] # pytype: disable=unsupported-operands
return ret
class TemplatePairStack(hk.Module):
"""Pair stack for the templates.
Jumper et al. (2021) Suppl. Alg. 16 "TemplatePairStack"
"""
def __init__(self, config, global_config, name='template_pair_stack'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, pair_act, pair_mask, is_training, safe_key=None):
"""Builds TemplatePairStack module.
Arguments:
pair_act: Pair activations for single template, shape [N_res, N_res, c_t].
pair_mask: Pair mask, shape [N_res, N_res].
is_training: Whether the module is in training mode.
safe_key: Safe key object encapsulating the random number generation key.
Returns:
Updated pair_act, shape [N_res, N_res, c_t].
"""
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
gc = self.global_config
c = self.config
if not c.num_block:
return pair_act
def block(x):
"""One block of the template pair stack."""
pair_act, safe_key = x
dropout_wrapper_fn = functools.partial(
dropout_wrapper, is_training=is_training, global_config=gc)
safe_key, *sub_keys = safe_key.split(6)
sub_keys = iter(sub_keys)
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_starting_node, gc,
name='triangle_attention_starting_node'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_ending_node, gc,
name='triangle_attention_ending_node'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
name='triangle_multiplication_outgoing'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_incoming, gc,
name='triangle_multiplication_incoming'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
Transition(c.pair_transition, gc, name='pair_transition'),
pair_act,
pair_mask,
next(sub_keys))
return pair_act, safe_key
if gc.use_remat:
block = hk.remat(block)
res_stack = layer_stack.layer_stack(c.num_block)(block)
pair_act, safe_key = res_stack((pair_act, safe_key))
return pair_act
class Transition(hk.Module):
"""Transition layer.
Jumper et al. (2021) Suppl. Alg. 9 "MSATransition"
Jumper et al. (2021) Suppl. Alg. 15 "PairTransition"
"""
def __init__(self, config, global_config, name='transition_block'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, act, mask, is_training=True):
"""Builds Transition module.
Arguments:
act: A tensor of queries of size [batch_size, N_res, N_channel].
mask: A tensor denoting the mask of size [batch_size, N_res].
is_training: Whether the module is in training mode.
Returns:
A float32 tensor of size [batch_size, N_res, N_channel].
"""
_, _, nc = act.shape
num_intermediate = int(nc * self.config.num_intermediate_factor)
mask = jnp.expand_dims(mask, axis=-1)
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='input_layer_norm')(
act)
transition_module = hk.Sequential([
common_modules.Linear(
num_intermediate,
initializer='relu',
name='transition1'), jax.nn.relu,
common_modules.Linear(
nc,
initializer=utils.final_init(self.global_config),
name='transition2')
])
act = mapping.inference_subbatch(
transition_module,
self.global_config.subbatch_size,
batched_args=[act],
nonbatched_args=[],
low_memory=not is_training)
return act
def glorot_uniform():
return hk.initializers.VarianceScaling(scale=1.0,
mode='fan_avg',
distribution='uniform')
class Attention(hk.Module):
"""Multihead attention."""
def __init__(self, config, global_config, output_dim, name='attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
self.output_dim = output_dim
def __call__(self, q_data, m_data, bias, nonbatched_bias=None):
"""Builds Attention module.
Arguments:
q_data: A tensor of queries, shape [batch_size, N_queries, q_channels].
m_data: A tensor of memories from which the keys and values are
projected, shape [batch_size, N_keys, m_channels].
bias: A bias for the attention, shape [batch_size, N_queries, N_keys].
nonbatched_bias: Shared bias, shape [N_queries, N_keys].
Returns:
A float32 tensor of shape [batch_size, N_queries, output_dim].
"""
# Sensible default for when the config keys are missing
key_dim = self.config.get('key_dim', int(q_data.shape[-1]))
value_dim = self.config.get('value_dim', int(m_data.shape[-1]))
num_head = self.config.num_head
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
q_weights = hk.get_parameter(
'query_w', shape=(q_data.shape[-1], num_head, key_dim),
init=glorot_uniform())
k_weights = hk.get_parameter(
'key_w', shape=(m_data.shape[-1], num_head, key_dim),
init=glorot_uniform())
v_weights = hk.get_parameter(
'value_w', shape=(m_data.shape[-1], num_head, value_dim),
init=glorot_uniform())
q = jnp.einsum('bqa,ahc->bqhc', q_data, q_weights) * key_dim**(-0.5)
k = jnp.einsum('bka,ahc->bkhc', m_data, k_weights)
v = jnp.einsum('bka,ahc->bkhc', m_data, v_weights)
logits = jnp.einsum('bqhc,bkhc->bhqk', q, k) + bias
if nonbatched_bias is not None:
logits += jnp.expand_dims(nonbatched_bias, axis=0)
weights = jax.nn.softmax(logits)
weighted_avg = jnp.einsum('bhqk,bkhc->bqhc', weights, v)
if self.global_config.zero_init:
init = hk.initializers.Constant(0.0)
else:
init = glorot_uniform()
if self.config.gating:
gating_weights = hk.get_parameter(
'gating_w',
shape=(q_data.shape[-1], num_head, value_dim),
init=hk.initializers.Constant(0.0))
gating_bias = hk.get_parameter(
'gating_b',
shape=(num_head, value_dim),
init=hk.initializers.Constant(1.0))
gate_values = jnp.einsum('bqc, chv->bqhv', q_data,
gating_weights) + gating_bias
gate_values = jax.nn.sigmoid(gate_values)
weighted_avg *= gate_values
o_weights = hk.get_parameter(
'output_w', shape=(num_head, value_dim, self.output_dim),
init=init)
o_bias = hk.get_parameter('output_b', shape=(self.output_dim,),
init=hk.initializers.Constant(0.0))
output = jnp.einsum('bqhc,hco->bqo', weighted_avg, o_weights) + o_bias
return output
class GlobalAttention(hk.Module):
"""Global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention" lines 2-7
"""
def __init__(self, config, global_config, output_dim, name='attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
self.output_dim = output_dim
def __call__(self, q_data, m_data, q_mask, bias):
"""Builds GlobalAttention module.
Arguments:
q_data: A tensor of queries with size [batch_size, N_queries,
q_channels]
m_data: A tensor of memories from which the keys and values
projected. Size [batch_size, N_keys, m_channels]
q_mask: A binary mask for q_data with zeros in the padded sequence
elements and ones otherwise. Size [batch_size, N_queries, q_channels]
(or broadcastable to this shape).
bias: A bias for the attention.
Returns:
A float32 tensor of size [batch_size, N_queries, output_dim].
"""
# Sensible default for when the config keys are missing
key_dim = self.config.get('key_dim', int(q_data.shape[-1]))
value_dim = self.config.get('value_dim', int(m_data.shape[-1]))
num_head = self.config.num_head
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
q_weights = hk.get_parameter(
'query_w', shape=(q_data.shape[-1], num_head, key_dim),
init=glorot_uniform())
k_weights = hk.get_parameter(
'key_w', shape=(m_data.shape[-1], key_dim),
init=glorot_uniform())
v_weights = hk.get_parameter(
'value_w', shape=(m_data.shape[-1], value_dim),
init=glorot_uniform())
v = jnp.einsum('bka,ac->bkc', m_data, v_weights)
q_avg = utils.mask_mean(q_mask, q_data, axis=1)
q = jnp.einsum('ba,ahc->bhc', q_avg, q_weights) * key_dim**(-0.5)
k = jnp.einsum('bka,ac->bkc', m_data, k_weights)
bias = (1e9 * (q_mask[:, None, :, 0] - 1.))
logits = jnp.einsum('bhc,bkc->bhk', q, k) + bias
weights = jax.nn.softmax(logits)
weighted_avg = jnp.einsum('bhk,bkc->bhc', weights, v)
if self.global_config.zero_init:
init = hk.initializers.Constant(0.0)
else:
init = glorot_uniform()
o_weights = hk.get_parameter(
'output_w', shape=(num_head, value_dim, self.output_dim),
init=init)
o_bias = hk.get_parameter('output_b', shape=(self.output_dim,),
init=hk.initializers.Constant(0.0))
if self.config.gating:
gating_weights = hk.get_parameter(
'gating_w',
shape=(q_data.shape[-1], num_head, value_dim),
init=hk.initializers.Constant(0.0))
gating_bias = hk.get_parameter(
'gating_b',
shape=(num_head, value_dim),
init=hk.initializers.Constant(1.0))
gate_values = jnp.einsum('bqc, chv->bqhv', q_data, gating_weights)
gate_values = jax.nn.sigmoid(gate_values + gating_bias)
weighted_avg = weighted_avg[:, None] * gate_values
output = jnp.einsum('bqhc,hco->bqo', weighted_avg, o_weights) + o_bias
else:
output = jnp.einsum('bhc,hco->bo', weighted_avg, o_weights) + o_bias
output = output[:, None]
return output
class MSARowAttentionWithPairBias(hk.Module):
"""MSA per-row attention biased by the pair representation.
Jumper et al. (2021) Suppl. Alg. 7 "MSARowAttentionWithPairBias"
"""
def __init__(self, config, global_config,
name='msa_row_attention_with_pair_bias'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
msa_act,
msa_mask,
pair_act,
is_training=False):
"""Builds MSARowAttentionWithPairBias module.
Arguments:
msa_act: [N_seq, N_res, c_m] MSA representation.
msa_mask: [N_seq, N_res] mask of non-padded regions.
pair_act: [N_res, N_res, c_z] pair representation.
is_training: Whether the module is in training mode.
Returns:
Update to msa_act, shape [N_seq, N_res, c_m].
"""
c = self.config
assert len(msa_act.shape) == 3
assert len(msa_mask.shape) == 2
assert c.orientation == 'per_row'
bias = (1e9 * (msa_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
msa_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
msa_act)
pair_act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='feat_2d_norm')(
pair_act)
init_factor = 1. / jnp.sqrt(int(pair_act.shape[-1]))
weights = hk.get_parameter(
'feat_2d_weights',
shape=(pair_act.shape[-1], c.num_head),
init=hk.initializers.RandomNormal(stddev=init_factor))
nonbatched_bias = jnp.einsum('qkc,ch->hqk', pair_act, weights)
attn_mod = Attention(
c, self.global_config, msa_act.shape[-1])
msa_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[msa_act, msa_act, bias],
nonbatched_args=[nonbatched_bias],
low_memory=not is_training)
return msa_act
class MSAColumnAttention(hk.Module):
"""MSA per-column attention.
Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention"
"""
def __init__(self, config, global_config, name='msa_column_attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
msa_act,
msa_mask,
is_training=False):
"""Builds MSAColumnAttention module.
Arguments:
msa_act: [N_seq, N_res, c_m] MSA representation.
msa_mask: [N_seq, N_res] mask of non-padded regions.
is_training: Whether the module is in training mode.
Returns:
Update to msa_act, shape [N_seq, N_res, c_m]
"""
c = self.config
assert len(msa_act.shape) == 3
assert len(msa_mask.shape) == 2
assert c.orientation == 'per_column'
msa_act = jnp.swapaxes(msa_act, -2, -3)
msa_mask = jnp.swapaxes(msa_mask, -1, -2)
bias = (1e9 * (msa_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
msa_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
msa_act)
attn_mod = Attention(
c, self.global_config, msa_act.shape[-1])
msa_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[msa_act, msa_act, bias],
nonbatched_args=[],
low_memory=not is_training)
msa_act = jnp.swapaxes(msa_act, -2, -3)
return msa_act
class MSAColumnGlobalAttention(hk.Module):
"""MSA per-column global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention"
"""
def __init__(self, config, global_config, name='msa_column_global_attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
msa_act,
msa_mask,
is_training=False):
"""Builds MSAColumnGlobalAttention module.
Arguments:
msa_act: [N_seq, N_res, c_m] MSA representation.
msa_mask: [N_seq, N_res] mask of non-padded regions.
is_training: Whether the module is in training mode.
Returns:
Update to msa_act, shape [N_seq, N_res, c_m].
"""
c = self.config
assert len(msa_act.shape) == 3
assert len(msa_mask.shape) == 2
assert c.orientation == 'per_column'
msa_act = jnp.swapaxes(msa_act, -2, -3)
msa_mask = jnp.swapaxes(msa_mask, -1, -2)
bias = (1e9 * (msa_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
msa_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
msa_act)
attn_mod = GlobalAttention(
c, self.global_config, msa_act.shape[-1],
name='attention')
# [N_seq, N_res, 1]
msa_mask = jnp.expand_dims(msa_mask, axis=-1)
msa_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[msa_act, msa_act, msa_mask, bias],
nonbatched_args=[],
low_memory=not is_training)
msa_act = jnp.swapaxes(msa_act, -2, -3)
return msa_act
class TriangleAttention(hk.Module):
"""Triangle Attention.
Jumper et al. (2021) Suppl. Alg. 13 "TriangleAttentionStartingNode"
Jumper et al. (2021) Suppl. Alg. 14 "TriangleAttentionEndingNode"
"""
def __init__(self, config, global_config, name='triangle_attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, pair_act, pair_mask, is_training=False):
"""Builds TriangleAttention module.
Arguments:
pair_act: [N_res, N_res, c_z] pair activations tensor
pair_mask: [N_res, N_res] mask of non-padded regions in the tensor.
is_training: Whether the module is in training mode.
Returns:
Update to pair_act, shape [N_res, N_res, c_z].
"""
c = self.config
assert len(pair_act.shape) == 3
assert len(pair_mask.shape) == 2
assert c.orientation in ['per_row', 'per_column']
if c.orientation == 'per_column':
pair_act = jnp.swapaxes(pair_act, -2, -3)
pair_mask = jnp.swapaxes(pair_mask, -1, -2)
bias = (1e9 * (pair_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
pair_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
pair_act)
init_factor = 1. / jnp.sqrt(int(pair_act.shape[-1]))
weights = hk.get_parameter(
'feat_2d_weights',
shape=(pair_act.shape[-1], c.num_head),
init=hk.initializers.RandomNormal(stddev=init_factor))
nonbatched_bias = jnp.einsum('qkc,ch->hqk', pair_act, weights)
attn_mod = Attention(
c, self.global_config, pair_act.shape[-1])
pair_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[pair_act, pair_act, bias],
nonbatched_args=[nonbatched_bias],
low_memory=not is_training)
if c.orientation == 'per_column':
pair_act = jnp.swapaxes(pair_act, -2, -3)
return pair_act
class MaskedMsaHead(hk.Module):
"""Head to predict MSA at the masked locations.
The MaskedMsaHead employs a BERT-style objective to reconstruct a masked
version of the full MSA, based on a linear projection of
the MSA representation.
Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction"
"""
def __init__(self, config, global_config, name='masked_msa_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds MaskedMsaHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'msa': MSA representation, shape [N_seq, N_res, c_m].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* 'logits': logits of shape [N_seq, N_res, N_aatype] with
(unnormalized) log probabilies of predicted aatype at position.
"""
del batch
logits = common_modules.Linear(
self.config.num_output,
initializer=utils.final_init(self.global_config),
name='logits')(
representations['msa'])
return dict(logits=logits)
def loss(self, value, batch):
errors = softmax_cross_entropy(
labels=jax.nn.one_hot(batch['true_msa'], num_classes=23),
logits=value['logits'])
loss = (jnp.sum(errors * batch['bert_mask'], axis=(-2, -1)) /
(1e-8 + jnp.sum(batch['bert_mask'], axis=(-2, -1))))
return {'loss': loss}
class PredictedLDDTHead(hk.Module):
"""Head to predict the per-residue LDDT to be used as a confidence measure.
Jumper et al. (2021) Suppl. Sec. 1.9.6 "Model confidence prediction (pLDDT)"
Jumper et al. (2021) Suppl. Alg. 29 "predictPerResidueLDDT_Ca"
"""
def __init__(self, config, global_config, name='predicted_lddt_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds ExperimentallyResolvedHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'structure_module': Single representation from the structure module,
shape [N_res, c_s].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing :
* 'logits': logits of shape [N_res, N_bins] with
(unnormalized) log probabilies of binned predicted lDDT.
"""
act = representations['structure_module']
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='input_layer_norm')(
act)
act = common_modules.Linear(
self.config.num_channels,
initializer='relu',
name='act_0')(
act)
act = jax.nn.relu(act)
act = common_modules.Linear(
self.config.num_channels,
initializer='relu',
name='act_1')(
act)
act = jax.nn.relu(act)
logits = common_modules.Linear(
self.config.num_bins,
initializer=utils.final_init(self.global_config),
name='logits')(
act)
# Shape (batch_size, num_res, num_bins)
return dict(logits=logits)
def loss(self, value, batch):
# Shape (num_res, 37, 3)
pred_all_atom_pos = value['structure_module']['final_atom_positions']
# Shape (num_res, 37, 3)
true_all_atom_pos = batch['all_atom_positions']
# Shape (num_res, 37)
all_atom_mask = batch['all_atom_mask']
# Shape (num_res,)
lddt_ca = lddt.lddt(
# Shape (batch_size, num_res, 3)
predicted_points=pred_all_atom_pos[None, :, 1, :],
# Shape (batch_size, num_res, 3)
true_points=true_all_atom_pos[None, :, 1, :],
# Shape (batch_size, num_res, 1)
true_points_mask=all_atom_mask[None, :, 1:2].astype(jnp.float32),
cutoff=15.,
per_residue=True)[0]
lddt_ca = jax.lax.stop_gradient(lddt_ca)
num_bins = self.config.num_bins
bin_index = jnp.floor(lddt_ca * num_bins).astype(jnp.int32)
# protect against out of range for lddt_ca == 1
bin_index = jnp.minimum(bin_index, num_bins - 1)
lddt_ca_one_hot = jax.nn.one_hot(bin_index, num_classes=num_bins)
# Shape (num_res, num_channel)
logits = value['predicted_lddt']['logits']
errors = softmax_cross_entropy(labels=lddt_ca_one_hot, logits=logits)
# Shape (num_res,)
mask_ca = all_atom_mask[:, residue_constants.atom_order['CA']]
mask_ca = mask_ca.astype(jnp.float32)
loss = jnp.sum(errors * mask_ca) / (jnp.sum(mask_ca) + 1e-8)
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
loss *= ((batch['resolution'] >= self.config.min_resolution)
& (batch['resolution'] <= self.config.max_resolution)).astype(
jnp.float32)
output = {'loss': loss}
return output
class PredictedAlignedErrorHead(hk.Module):
"""Head to predict the distance errors in the backbone alignment frames.
Can be used to compute predicted TM-Score.
Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction"
"""
def __init__(self, config, global_config,
name='predicted_aligned_error_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds PredictedAlignedErrorHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [N_res, N_res, c_z].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* logits: logits for aligned error, shape [N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [N_bins - 1].
"""
act = representations['pair']
# Shape (num_res, num_res, num_bins)
logits = common_modules.Linear(
self.config.num_bins,
initializer=utils.final_init(self.global_config),
name='logits')(act)
# Shape (num_bins,)
breaks = jnp.linspace(
0., self.config.max_error_bin, self.config.num_bins - 1)
return dict(logits=logits, breaks=breaks)
def loss(self, value, batch):
# Shape (num_res, 7)
predicted_affine = quat_affine.QuatAffine.from_tensor(
value['structure_module']['final_affines'])
# Shape (num_res, 7)
true_affine = quat_affine.QuatAffine.from_tensor(
batch['backbone_affine_tensor'])
# Shape (num_res)
mask = batch['backbone_affine_mask']
# Shape (num_res, num_res)
square_mask = mask[:, None] * mask[None, :]
num_bins = self.config.num_bins
# (1, num_bins - 1)
breaks = value['predicted_aligned_error']['breaks']
# (1, num_bins)
logits = value['predicted_aligned_error']['logits']
# Compute the squared error for each alignment.
def _local_frame_points(affine):
points = [jnp.expand_dims(x, axis=-2) for x in affine.translation]
return affine.invert_point(points, extra_dims=1)
error_dist2_xyz = [
jnp.square(a - b)
for a, b in zip(_local_frame_points(predicted_affine),
_local_frame_points(true_affine))]
error_dist2 = sum(error_dist2_xyz)
# Shape (num_res, num_res)
# First num_res are alignment frames, second num_res are the residues.
error_dist2 = jax.lax.stop_gradient(error_dist2)
sq_breaks = jnp.square(breaks)
true_bins = jnp.sum((
error_dist2[..., None] > sq_breaks).astype(jnp.int32), axis=-1)
errors = softmax_cross_entropy(
labels=jax.nn.one_hot(true_bins, num_bins, axis=-1), logits=logits)
loss = (jnp.sum(errors * square_mask, axis=(-2, -1)) /
(1e-8 + jnp.sum(square_mask, axis=(-2, -1))))
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
loss *= ((batch['resolution'] >= self.config.min_resolution)
& (batch['resolution'] <= self.config.max_resolution)).astype(
jnp.float32)
output = {'loss': loss}
return output
class ExperimentallyResolvedHead(hk.Module):
"""Predicts if an atom is experimentally resolved in a high-res structure.
Only trained on high-resolution X-ray crystals & cryo-EM.
Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction'
"""
def __init__(self, config, global_config,
name='experimentally_resolved_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds ExperimentallyResolvedHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'single': Single representation, shape [N_res, c_s].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* 'logits': logits of shape [N_res, 37],
log probability that an atom is resolved in atom37 representation,
can be converted to probability by applying sigmoid.
"""
logits = common_modules.Linear(
37, # atom_exists.shape[-1]
initializer=utils.final_init(self.global_config),
name='logits')(representations['single'])
return dict(logits=logits)
def loss(self, value, batch):
logits = value['logits']
assert len(logits.shape) == 2
# Does the atom appear in the amino acid?
atom_exists = batch['atom37_atom_exists']
# Is the atom resolved in the experiment? Subset of atom_exists,
# *except for OXT*
all_atom_mask = batch['all_atom_mask'].astype(jnp.float32)
xent = sigmoid_cross_entropy(labels=all_atom_mask, logits=logits)
loss = jnp.sum(xent * atom_exists) / (1e-8 + jnp.sum(atom_exists))
if self.config.filter_by_resolution:
# NMR & distillation examples have resolution = 0.
loss *= ((batch['resolution'] >= self.config.min_resolution)
& (batch['resolution'] <= self.config.max_resolution)).astype(
jnp.float32)
output = {'loss': loss}
return output
class TriangleMultiplication(hk.Module):
"""Triangle multiplication layer ("outgoing" or "incoming").
Jumper et al. (2021) Suppl. Alg. 11 "TriangleMultiplicationOutgoing"
Jumper et al. (2021) Suppl. Alg. 12 "TriangleMultiplicationIncoming"
"""
def __init__(self, config, global_config, name='triangle_multiplication'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, act, mask, is_training=True):
"""Builds TriangleMultiplication module.
Arguments:
act: Pair activations, shape [N_res, N_res, c_z]
mask: Pair mask, shape [N_res, N_res].
is_training: Whether the module is in training mode.
Returns:
Outputs, same shape/type as act.
"""
del is_training
c = self.config
gc = self.global_config
mask = mask[..., None]
act = hk.LayerNorm(axis=[-1], create_scale=True, create_offset=True,
name='layer_norm_input')(act)
input_act = act
left_projection = common_modules.Linear(
c.num_intermediate_channel,
name='left_projection')
left_proj_act = mask * left_projection(act)
right_projection = common_modules.Linear(
c.num_intermediate_channel,
name='right_projection')
right_proj_act = mask * right_projection(act)
left_gate_values = jax.nn.sigmoid(common_modules.Linear(
c.num_intermediate_channel,
bias_init=1.,
initializer=utils.final_init(gc),
name='left_gate')(act))
right_gate_values = jax.nn.sigmoid(common_modules.Linear(
c.num_intermediate_channel,
bias_init=1.,
initializer=utils.final_init(gc),
name='right_gate')(act))
left_proj_act *= left_gate_values
right_proj_act *= right_gate_values
# "Outgoing" edges equation: 'ikc,jkc->ijc'
# "Incoming" edges equation: 'kjc,kic->ijc'
# Note on the Suppl. Alg. 11 & 12 notation:
# For the "outgoing" edges, a = left_proj_act and b = right_proj_act
# For the "incoming" edges, it's swapped:
# b = left_proj_act and a = right_proj_act
act = jnp.einsum(c.equation, left_proj_act, right_proj_act)
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='center_layer_norm')(
act)
output_channel = int(input_act.shape[-1])
act = common_modules.Linear(
output_channel,
initializer=utils.final_init(gc),
name='output_projection')(act)
gate_values = jax.nn.sigmoid(common_modules.Linear(
output_channel,
bias_init=1.,
initializer=utils.final_init(gc),
name='gating_linear')(input_act))
act *= gate_values
return act
class DistogramHead(hk.Module):
"""Head to predict a distogram.
Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction"
"""
def __init__(self, config, global_config, name='distogram_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds DistogramHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [N_res, N_res, c_z].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* logits: logits for distogram, shape [N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [N_bins - 1,].
"""
half_logits = common_modules.Linear(
self.config.num_bins,
initializer=utils.final_init(self.global_config),
name='half_logits')(
representations['pair'])
logits = half_logits + jnp.swapaxes(half_logits, -2, -3)
breaks = jnp.linspace(self.config.first_break, self.config.last_break,
self.config.num_bins - 1)
return dict(logits=logits, bin_edges=breaks)
def loss(self, value, batch):
return _distogram_log_loss(value['logits'], value['bin_edges'],
batch, self.config.num_bins)
def _distogram_log_loss(logits, bin_edges, batch, num_bins):
"""Log loss of a distogram."""
assert len(logits.shape) == 3
positions = batch['pseudo_beta']
mask = batch['pseudo_beta_mask']
assert positions.shape[-1] == 3
sq_breaks = jnp.square(bin_edges)
dist2 = jnp.sum(
jnp.square(
jnp.expand_dims(positions, axis=-2) -
jnp.expand_dims(positions, axis=-3)),
axis=-1,
keepdims=True)
true_bins = jnp.sum(dist2 > sq_breaks, axis=-1)
errors = softmax_cross_entropy(
labels=jax.nn.one_hot(true_bins, num_bins), logits=logits)
square_mask = jnp.expand_dims(mask, axis=-2) * jnp.expand_dims(mask, axis=-1)
avg_error = (
jnp.sum(errors * square_mask, axis=(-2, -1)) /
(1e-6 + jnp.sum(square_mask, axis=(-2, -1))))
dist2 = dist2[..., 0]
return dict(loss=avg_error, true_dist=jnp.sqrt(1e-6 + dist2))
class OuterProductMean(hk.Module):
"""Computes mean outer product.
Jumper et al. (2021) Suppl. Alg. 10 "OuterProductMean"
"""
def __init__(self,
config,
global_config,
num_output_channel,
name='outer_product_mean'):
super().__init__(name=name)
self.global_config = global_config
self.config = config
self.num_output_channel = num_output_channel
def __call__(self, act, mask, is_training=True):
"""Builds OuterProductMean module.
Arguments:
act: MSA representation, shape [N_seq, N_res, c_m].
mask: MSA mask, shape [N_seq, N_res].
is_training: Whether the module is in training mode.
Returns:
Update to pair representation, shape [N_res, N_res, c_z].
"""
gc = self.global_config
c = self.config
mask = mask[..., None]
act = hk.LayerNorm([-1], True, True, name='layer_norm_input')(act)
left_act = mask * common_modules.Linear(
c.num_outer_channel,
initializer='linear',
name='left_projection')(
act)
right_act = mask * common_modules.Linear(
c.num_outer_channel,
initializer='linear',
name='right_projection')(
act)
if gc.zero_init:
init_w = hk.initializers.Constant(0.0)
else:
init_w = hk.initializers.VarianceScaling(scale=2., mode='fan_in')
output_w = hk.get_parameter(
'output_w',
shape=(c.num_outer_channel, c.num_outer_channel,
self.num_output_channel),
init=init_w)
output_b = hk.get_parameter(
'output_b', shape=(self.num_output_channel,),
init=hk.initializers.Constant(0.0))
def compute_chunk(left_act):
# This is equivalent to
#
# act = jnp.einsum('abc,ade->dceb', left_act, right_act)
# act = jnp.einsum('dceb,cef->bdf', act, output_w) + output_b
#
# but faster.
left_act = jnp.transpose(left_act, [0, 2, 1])
act = jnp.einsum('acb,ade->dceb', left_act, right_act)
act = jnp.einsum('dceb,cef->dbf', act, output_w) + output_b
return jnp.transpose(act, [1, 0, 2])
act = mapping.inference_subbatch(
compute_chunk,
c.chunk_size,
batched_args=[left_act],
nonbatched_args=[],
low_memory=True,
input_subbatch_dim=1,
output_subbatch_dim=0)
epsilon = 1e-3
norm = jnp.einsum('abc,adc->bdc', mask, mask)
act /= epsilon + norm
return act
def dgram_from_positions(positions, num_bins, min_bin, max_bin):
"""Compute distogram from amino acid positions.
Arguments:
positions: [N_res, 3] Position coordinates.
num_bins: The number of bins in the distogram.
min_bin: The left edge of the first bin.
max_bin: The left edge of the final bin. The final bin catches
everything larger than `max_bin`.
Returns:
Distogram with the specified number of bins.
"""
def squared_difference(x, y):
return jnp.square(x - y)
lower_breaks = jnp.linspace(min_bin, max_bin, num_bins)
lower_breaks = jnp.square(lower_breaks)
upper_breaks = jnp.concatenate([lower_breaks[1:],
jnp.array([1e8], dtype=jnp.float32)], axis=-1)
dist2 = jnp.sum(
squared_difference(
jnp.expand_dims(positions, axis=-2),
jnp.expand_dims(positions, axis=-3)),
axis=-1, keepdims=True)
dgram = ((dist2 > lower_breaks).astype(jnp.float32) *
(dist2 < upper_breaks).astype(jnp.float32))
return dgram
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
"""Create pseudo beta features."""
is_gly = jnp.equal(aatype, residue_constants.restype_order['G'])
ca_idx = residue_constants.atom_order['CA']
cb_idx = residue_constants.atom_order['CB']
pseudo_beta = jnp.where(
jnp.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :])
if all_atom_masks is not None:
pseudo_beta_mask = jnp.where(
is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx])
pseudo_beta_mask = pseudo_beta_mask.astype(jnp.float32)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
class EvoformerIteration(hk.Module):
"""Single iteration (block) of Evoformer stack.
Jumper et al. (2021) Suppl. Alg. 6 "EvoformerStack" lines 2-10
"""
def __init__(self, config, global_config, is_extra_msa,
name='evoformer_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
def __call__(self, activations, masks, is_training=True, safe_key=None):
"""Builds EvoformerIteration module.
Arguments:
activations: Dictionary containing activations:
* 'msa': MSA activations, shape [N_seq, N_res, c_m].
* 'pair': pair activations, shape [N_res, N_res, c_z].
masks: Dictionary of masks:
* 'msa': MSA mask, shape [N_seq, N_res].
* 'pair': pair mask, shape [N_res, N_res].
is_training: Whether the module is in training mode.
safe_key: prng.SafeKey encapsulating rng key.
Returns:
Outputs, same shape/type as act.
"""
c = self.config
gc = self.global_config
msa_act, pair_act = activations['msa'], activations['pair']
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
msa_mask, pair_mask = masks['msa'], masks['pair']
dropout_wrapper_fn = functools.partial(
dropout_wrapper,
is_training=is_training,
global_config=gc)
safe_key, *sub_keys = safe_key.split(10)
sub_keys = iter(sub_keys)
msa_act = dropout_wrapper_fn(
MSARowAttentionWithPairBias(
c.msa_row_attention_with_pair_bias, gc,
name='msa_row_attention_with_pair_bias'),
msa_act,
msa_mask,
safe_key=next(sub_keys),
pair_act=pair_act)
if not self.is_extra_msa:
attn_mod = MSAColumnAttention(
c.msa_column_attention, gc, name='msa_column_attention')
else:
attn_mod = MSAColumnGlobalAttention(
c.msa_column_attention, gc, name='msa_column_global_attention')
msa_act = dropout_wrapper_fn(
attn_mod,
msa_act,
msa_mask,
safe_key=next(sub_keys))
msa_act = dropout_wrapper_fn(
Transition(c.msa_transition, gc, name='msa_transition'),
msa_act,
msa_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
OuterProductMean(
config=c.outer_product_mean,
global_config=self.global_config,
num_output_channel=int(pair_act.shape[-1]),
name='outer_product_mean'),
msa_act,
msa_mask,
safe_key=next(sub_keys),
output_act=pair_act)
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
name='triangle_multiplication_outgoing'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_incoming, gc,
name='triangle_multiplication_incoming'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_starting_node, gc,
name='triangle_attention_starting_node'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_ending_node, gc,
name='triangle_attention_ending_node'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
Transition(c.pair_transition, gc, name='pair_transition'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
return {'msa': msa_act, 'pair': pair_act}
class EmbeddingsAndEvoformer(hk.Module):
"""Embeds the input data and runs Evoformer.
Produces the MSA, single and pair representations.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5-18
"""
def __init__(self, config, global_config, name='evoformer'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, batch, is_training, safe_key=None):
c = self.config
gc = self.global_config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
# Embed clustered MSA.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5
# Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder"
preprocess_1d = common_modules.Linear(
c.msa_channel, name='preprocess_1d')(
batch['target_feat'])
preprocess_msa = common_modules.Linear(
c.msa_channel, name='preprocess_msa')(
batch['msa_feat'])
msa_activations = jnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa
left_single = common_modules.Linear(
c.pair_channel, name='left_single')(
batch['target_feat'])
right_single = common_modules.Linear(
c.pair_channel, name='right_single')(
batch['target_feat'])
pair_activations = left_single[:, None] + right_single[None]
mask_2d = batch['seq_mask'][:, None] * batch['seq_mask'][None, :]
# Inject previous outputs for recycling.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6
# Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder"
if c.recycle_pos and 'prev_pos' in batch:
prev_pseudo_beta = pseudo_beta_fn(
batch['aatype'], batch['prev_pos'], None)
dgram = dgram_from_positions(prev_pseudo_beta, **self.config.prev_pos)
pair_activations += common_modules.Linear(
c.pair_channel, name='prev_pos_linear')(
dgram)
if c.recycle_features:
if 'prev_msa_first_row' in batch:
prev_msa_first_row = hk.LayerNorm([-1],
True,
True,
name='prev_msa_first_row_norm')(
batch['prev_msa_first_row'])
msa_activations = jax.ops.index_add(msa_activations, 0,
prev_msa_first_row)
if 'prev_pair' in batch:
pair_activations += hk.LayerNorm([-1],
True,
True,
name='prev_pair_norm')(
batch['prev_pair'])
# Relative position encoding.
# Jumper et al. (2021) Suppl. Alg. 4 "relpos"
# Jumper et al. (2021) Suppl. Alg. 5 "one_hot"
if c.max_relative_feature:
# Add one-hot-encoded clipped residue distances to the pair activations.
pos = batch['residue_index']
offset = pos[:, None] - pos[None, :]
rel_pos = jax.nn.one_hot(
jnp.clip(
offset + c.max_relative_feature,
a_min=0,
a_max=2 * c.max_relative_feature),
2 * c.max_relative_feature + 1)
pair_activations += common_modules.Linear(
c.pair_channel, name='pair_activiations')(
rel_pos)
# Embed templates into the pair activations.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13
if c.template.enabled:
template_batch = {k: batch[k] for k in batch if k.startswith('template_')}
template_pair_representation = TemplateEmbedding(c.template, gc)(
pair_activations,
template_batch,
mask_2d,
is_training=is_training)
pair_activations += template_pair_representation
# Embed extra MSA features.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16
extra_msa_feat = create_extra_msa_feature(batch)
extra_msa_activations = common_modules.Linear(
c.extra_msa_channel,
name='extra_msa_activations')(
extra_msa_feat)
# Extra MSA Stack.
# Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack"
extra_msa_stack_input = {
'msa': extra_msa_activations,
'pair': pair_activations,
}
extra_msa_stack_iteration = EvoformerIteration(
c.evoformer, gc, is_extra_msa=True, name='extra_msa_stack')
def extra_msa_stack_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
extra_evoformer_output = extra_msa_stack_iteration(
activations=act,
masks={
'msa': batch['extra_msa_mask'],
'pair': mask_2d
},
is_training=is_training,
safe_key=safe_subkey)
return (extra_evoformer_output, safe_key)
if gc.use_remat:
extra_msa_stack_fn = hk.remat(extra_msa_stack_fn)
extra_msa_stack = layer_stack.layer_stack(
c.extra_msa_stack_num_block)(
extra_msa_stack_fn)
extra_msa_output, safe_key = extra_msa_stack(
(extra_msa_stack_input, safe_key))
pair_activations = extra_msa_output['pair']
evoformer_input = {
'msa': msa_activations,
'pair': pair_activations,
}
evoformer_masks = {'msa': batch['msa_mask'], 'pair': mask_2d}
# Append num_templ rows to msa_activations with template embeddings.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 7-8
if c.template.enabled and c.template.embed_torsion_angles:
num_templ, num_res = batch['template_aatype'].shape
# Embed the templates aatypes.
aatype_one_hot = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1)
# Embed the templates aatype, torsion angles and masks.
# Shape (templates, residues, msa_channels)
ret = all_atom.atom37_to_torsion_angles(
aatype=batch['template_aatype'],
all_atom_pos=batch['template_all_atom_positions'],
all_atom_mask=batch['template_all_atom_masks'],
# Ensure consistent behaviour during testing:
placeholder_for_undefined=not gc.zero_init)
template_features = jnp.concatenate([
aatype_one_hot,
jnp.reshape(
ret['torsion_angles_sin_cos'], [num_templ, num_res, 14]),
jnp.reshape(
ret['alt_torsion_angles_sin_cos'], [num_templ, num_res, 14]),
ret['torsion_angles_mask']], axis=-1)
template_activations = common_modules.Linear(
c.msa_channel,
initializer='relu',
name='template_single_embedding')(
template_features)
template_activations = jax.nn.relu(template_activations)
template_activations = common_modules.Linear(
c.msa_channel,
initializer='relu',
name='template_projection')(
template_activations)
# Concatenate the templates to the msa.
evoformer_input['msa'] = jnp.concatenate(
[evoformer_input['msa'], template_activations], axis=0)
# Concatenate templates masks to the msa masks.
# Use mask from the psi angle, as it only depends on the backbone atoms
# from a single residue.
torsion_angle_mask = ret['torsion_angles_mask'][:, :, 2]
torsion_angle_mask = torsion_angle_mask.astype(
evoformer_masks['msa'].dtype)
evoformer_masks['msa'] = jnp.concatenate(
[evoformer_masks['msa'], torsion_angle_mask], axis=0)
# Main trunk of the network
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18
evoformer_iteration = EvoformerIteration(
c.evoformer, gc, is_extra_msa=False, name='evoformer_iteration')
def evoformer_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
evoformer_output = evoformer_iteration(
activations=act,
masks=evoformer_masks,
is_training=is_training,
safe_key=safe_subkey)
return (evoformer_output, safe_key)
if gc.use_remat:
evoformer_fn = hk.remat(evoformer_fn)
evoformer_stack = layer_stack.layer_stack(c.evoformer_num_block)(
evoformer_fn)
evoformer_output, safe_key = evoformer_stack(
(evoformer_input, safe_key))
msa_activations = evoformer_output['msa']
pair_activations = evoformer_output['pair']
single_activations = common_modules.Linear(
c.seq_channel, name='single_activations')(
msa_activations[0])
num_sequences = batch['msa_feat'].shape[0]
output = {
'single': single_activations,
'pair': pair_activations,
# Crop away template rows such that they are not used in MaskedMsaHead.
'msa': msa_activations[:num_sequences, :, :],
'msa_first_row': msa_activations[0],
}
return output
class SingleTemplateEmbedding(hk.Module):
"""Embeds a single template.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9+11
"""
def __init__(self, config, global_config, name='single_template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, batch, mask_2d, is_training):
"""Build the single template embedding.
Arguments:
query_embedding: Query pair representation, shape [N_res, N_res, c_z].
batch: A batch of template features (note the template dimension has been
stripped out as this module only runs over a single template).
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
is_training: Whether the module is in training mode.
Returns:
A template embedding [N_res, N_res, c_z].
"""
assert mask_2d.dtype == query_embedding.dtype
dtype = query_embedding.dtype
num_res = batch['template_aatype'].shape[0]
num_channels = (self.config.template_pair_stack
.triangle_attention_ending_node.value_dim)
template_mask = batch['template_pseudo_beta_mask']
template_mask_2d = template_mask[:, None] * template_mask[None, :]
template_mask_2d = template_mask_2d.astype(dtype)
template_dgram = dgram_from_positions(batch['template_pseudo_beta'],
**self.config.dgram_features)
template_dgram = template_dgram.astype(dtype)
to_concat = [template_dgram, template_mask_2d[:, :, None]]
aatype = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1, dtype=dtype)
to_concat.append(jnp.tile(aatype[None, :, :], [num_res, 1, 1]))
to_concat.append(jnp.tile(aatype[:, None, :], [1, num_res, 1]))
n, ca, c = [residue_constants.atom_order[a] for a in ('N', 'CA', 'C')]
rot, trans = quat_affine.make_transform_from_reference(
n_xyz=batch['template_all_atom_positions'][:, n],
ca_xyz=batch['template_all_atom_positions'][:, ca],
c_xyz=batch['template_all_atom_positions'][:, c])
affines = quat_affine.QuatAffine(
quaternion=quat_affine.rot_to_quat(rot, unstack_inputs=True),
translation=trans,
rotation=rot,
unstack_inputs=True)
points = [jnp.expand_dims(x, axis=-2) for x in affines.translation]
affine_vec = affines.invert_point(points, extra_dims=1)
inv_distance_scalar = jax.lax.rsqrt(
1e-6 + sum([jnp.square(x) for x in affine_vec]))
# Backbone affine mask: whether the residue has C, CA, N
# (the template mask defined above only considers pseudo CB).
template_mask = (
batch['template_all_atom_masks'][..., n] *
batch['template_all_atom_masks'][..., ca] *
batch['template_all_atom_masks'][..., c])
template_mask_2d = template_mask[:, None] * template_mask[None, :]
inv_distance_scalar *= template_mask_2d.astype(inv_distance_scalar.dtype)
unit_vector = [(x * inv_distance_scalar)[..., None] for x in affine_vec]
unit_vector = [x.astype(dtype) for x in unit_vector]
template_mask_2d = template_mask_2d.astype(dtype)
if not self.config.use_template_unit_vector:
unit_vector = [jnp.zeros_like(x) for x in unit_vector]
to_concat.extend(unit_vector)
to_concat.append(template_mask_2d[..., None])
act = jnp.concatenate(to_concat, axis=-1)
# Mask out non-template regions so we don't get arbitrary values in the
# distogram for these regions.
act *= template_mask_2d[..., None]
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 9
act = common_modules.Linear(
num_channels,
initializer='relu',
name='embedding2d')(
act)
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 11
act = TemplatePairStack(
self.config.template_pair_stack, self.global_config)(
act, mask_2d, is_training)
act = hk.LayerNorm([-1], True, True, name='output_layer_norm')(act)
return act
class TemplateEmbedding(hk.Module):
"""Embeds a set of templates.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12
Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention"
"""
def __init__(self, config, global_config, name='template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, template_batch, mask_2d, is_training):
"""Build TemplateEmbedding module.
Arguments:
query_embedding: Query pair representation, shape [N_res, N_res, c_z].
template_batch: A batch of template features.
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
is_training: Whether the module is in training mode.
Returns:
A template embedding [N_res, N_res, c_z].
"""
num_templates = template_batch['template_mask'].shape[0]
num_channels = (self.config.template_pair_stack
.triangle_attention_ending_node.value_dim)
num_res = query_embedding.shape[0]
dtype = query_embedding.dtype
template_mask = template_batch['template_mask']
template_mask = template_mask.astype(dtype)
query_num_channels = query_embedding.shape[-1]
# Make sure the weights are shared across templates by constructing the
# embedder here.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12
template_embedder = SingleTemplateEmbedding(self.config, self.global_config)
def map_fn(batch):
return template_embedder(query_embedding, batch, mask_2d, is_training)
template_pair_representation = mapping.sharded_map(map_fn, in_axes=0)(
template_batch)
# Cross attend from the query to the templates along the residue
# dimension by flattening everything else into the batch dimension.
# Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention"
flat_query = jnp.reshape(query_embedding,
[num_res * num_res, 1, query_num_channels])
flat_templates = jnp.reshape(
jnp.transpose(template_pair_representation, [1, 2, 0, 3]),
[num_res * num_res, num_templates, num_channels])
bias = (1e9 * (template_mask[None, None, None, :] - 1.))
template_pointwise_attention_module = Attention(
self.config.attention, self.global_config, query_num_channels)
nonbatched_args = [bias]
batched_args = [flat_query, flat_templates]
embedding = mapping.inference_subbatch(
template_pointwise_attention_module,
self.config.subbatch_size,
batched_args=batched_args,
nonbatched_args=nonbatched_args,
low_memory=not is_training)
embedding = jnp.reshape(embedding,
[num_res, num_res, query_num_channels])
# No gradients if no templates.
embedding *= (jnp.sum(template_mask) > 0.).astype(embedding.dtype)
return embedding
| 34.005258 | 102 | 0.653256 |
794598142fc20ffe6c7bb7d4cc6a8618a490ddb0 | 7,977 | py | Python | psdaq/psdaq/configdb/tt_config.py | ZhenghengLi/lcls2 | 94e75c6536954a58c8937595dcac295163aa1cdf | [
"BSD-3-Clause-LBNL"
] | 16 | 2017-11-09T17:10:56.000Z | 2022-03-09T23:03:10.000Z | psdaq/psdaq/configdb/tt_config.py | ZhenghengLi/lcls2 | 94e75c6536954a58c8937595dcac295163aa1cdf | [
"BSD-3-Clause-LBNL"
] | 6 | 2017-12-12T19:30:05.000Z | 2020-07-09T00:28:33.000Z | psdaq/psdaq/configdb/tt_config.py | ZhenghengLi/lcls2 | 94e75c6536954a58c8937595dcac295163aa1cdf | [
"BSD-3-Clause-LBNL"
] | 25 | 2017-09-18T20:02:43.000Z | 2022-03-27T22:27:42.000Z | from psdaq.configdb.get_config import get_config
from psdaq.configdb.scan_utils import *
from psdaq.cas.xpm_utils import timTxId
from .xpmmini import *
import rogue
import lcls2_timetool
import json
import IPython
import logging
from collections import deque
pv = None
cl = None
# FEB parameters
lane = 0
chan = 0
group = None
ocfg = None
def cl_poll(uart):
while True:
result = uart._rx._last
if result is not None:
uart._rx._last = None
break
time.sleep(0.01)
def tt_init(arg,dev='/dev/datadev_0',lanemask=1,xpmpv=None,timebase="186M",verbosity=0):
global cl
logging.getLogger().setLevel(40-10*verbosity)
myargs = { 'dev' : dev,
'pgp3' : False,
'pollEn' : False,
'initRead' : False,
'dataCapture' : False,
'dataDebug' : False,}
cl = lcls2_timetool.TimeToolKcu1500Root(**myargs)
cl.__enter__()
# Open a new thread here
if xpmpv is not None:
cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.ConfigureXpmMini()
pv = PVCtrls(xpmpv,cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.XpmMiniWrapper)
pv.start()
else:
print('XpmMini')
cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.ConfigureXpmMini()
cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.ConfigLclsTimingV2()
time.sleep(0.1)
return cl
def tt_init_feb(slane=None,schan=None):
global lane
global chan
if slane is not None:
lane = int(slane)
if schan is not None:
chan = int(schan)
def tt_connect(cl):
if(getattr(cl.TimeToolKcu1500.Kcu1500Hsio,'PgpMon[%d]'%lane).RxRemLinkReady.get() != 1):
raise ValueError(f'PGP Link is down' )
txId = timTxId('timetool')
rxId = cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager.XpmMessageAligner.RxId.get()
cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager.XpmMessageAligner.TxId.set(txId)
d = {}
d['paddr'] = rxId
return d
def user_to_expert(cl, cfg, full=False):
global group
global ocfg
d = {}
if full:
d['expert.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager.TriggerEventBuffer.Partition'] = group
try:
rawStart = cfg['user']['start_ns']
partitionDelay = getattr(cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager.XpmMessageAligner,'PartitionDelay[%d]'%group).get()
triggerDelay = int(rawStart*1300/7000 - partitionDelay*200)
print('partitionDelay {:} rawStart {:} triggerDelay {:}'.format(partitionDelay,rawStart,triggerDelay))
if triggerDelay < 0:
print('partitionDelay {:} rawStart {:} triggerDelay {:}'.format(partitionDelay,rawStart,triggerDelay))
raise ValueError('triggerDelay computes to < 0')
d['expert.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager.TriggerEventBuffer.TriggerDelay']=triggerDelay
except KeyError:
pass
try:
gate = cfg['user']['gate_ns']
d['expert.ClinkFeb.TrigCtrl.TrigPulseWidth'] = gate*0.001
except KeyError:
pass
update_config_entry(cfg,ocfg,d)
def config_expert(cl,cfg):
global lane
global chan
rogue_translate = {'ClinkFeb' :'ClinkFeb[%d]'%lane,
'ClinkCh' :'Ch[%d]'%chan,
'TriggerEventBuffer':'TriggerEventBuffer[%d]'%lane,
'TrigCtrl' :'TrigCtrl[%d]'%chan,
'PllConfig0' :'PllConfig[0]',
'PllConfig1' :'PllConfig[1]',
'PllConfig2' :'PllConfig[2]'}
uart = getattr(getattr(cl,'ClinkFeb[%d]'%lane).ClinkTop,'Ch[%d]'%chan).UartPiranha4
depth = 0
path = 'cl'
my_queue = deque([[path,depth,cl,cfg['expert']]]) #contains path, dfs depth, rogue hiearchy, and daq configdb dict tree node
while(my_queue):
path,depth,rogue_node, configdb_node = my_queue.pop()
# Replace configdb lane and febch for the physical values
if(dict is type(configdb_node)):
for i in configdb_node:
# Substitute proper pgp lane or feb channel
if i in rogue_translate:
my_queue.appendleft([path+"."+i,depth+1,rogue_node.nodes[rogue_translate[i]],configdb_node[i]])
else:
try:
my_queue.appendleft([path+"."+i,depth+1,rogue_node.nodes[i],configdb_node[i]])
except KeyError:
print('Lookup failed for node [{:}] in path [{:}]'.format(i,path))
if('get' in dir(rogue_node) and 'set' in dir(rogue_node) and path is not 'cl' ):
# All FIR parameters are stored in configdb as hex strings (I don't know why)
if ".FIR." in path:
print(path+", rogue value = "+str(hex(rogue_node.get()))+", daq config database = " +str(configdb_node))
rogue_node.set(int(str(configdb_node),16))
else:
rogue_node.set(configdb_node)
if 'Uart' in path:
if 'ROI[0]' in path or 'SAD[0]' in path or 'SAD[1]' in path:
# These don't cause the send of a serial command
pass
else:
print('sleeping for {:}'.format(path))
cl_poll(uart)
def tt_config(cl,connect_str,cfgtype,detname,detsegm,grp):
global group
global ocfg
group = grp
cfg = get_config(connect_str,cfgtype,detname,detsegm)
ocfg = cfg
user_to_expert(cl,cfg,full=True)
# set bool parameters
cfg['expert']['ClinkFeb']['TrigCtrl']['EnableTrig'] = True
cfg['expert']['ClinkFeb']['TrigCtrl']['InvCC'] = False
cfg['expert']['ClinkFeb']['ClinkTop']['ClinkCh']['DataEn'] = True
cfg['expert']['ClinkFeb']['ClinkTop']['ClinkCh']['Blowoff'] = False
uart = getattr(getattr(cl,'ClinkFeb[%d]'%lane).ClinkTop,'Ch[%d]'%chan).UartPiranha4
getattr(getattr(cl,'ClinkFeb[%d]'%lane).ClinkTop,'Ch[%d]'%chan).UartPiranha4.SendEscape()
config_expert(cl,cfg)
#commands can be sent manually using cl.ClinkFeb0.ClinkTop.Ch0.UartPiranha4._tx.sendString('GCP')
# GCP returns configuration summary
uart._tx.sendString('gcp')
cl_poll(uart)
#cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.XpmMiniWrapper.XpmMini.HwEnable.set(True)
getattr(cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager,'TriggerEventBuffer[%d]'%lane).MasterEnable.set(True)
# Capture the firmware version to persist in the xtc
cfg['firmwareVersion'] = cl.TimeToolKcu1500.AxiPcieCore.AxiVersion.FpgaVersion.get()
cfg['firmwareBuild' ] = cl.TimeToolKcu1500.AxiPcieCore.AxiVersion.BuildStamp.get()
return json.dumps(cfg)
def tt_scan_keys(update):
global cl
global ocfg
# extract updates
cfg = {}
copy_reconfig_keys(cfg,ocfg, json.loads(update))
# Apply group
user_to_expert(cl,cfg,full=False)
# Retain mandatory fields for XTC translation
for key in ('detType:RO','detName:RO','detId:RO','doc:RO','alg:RO'):
copy_config_entry(cfg,ocfg,key)
copy_config_entry(cfg[':types:'],ocfg[':types:'],key)
return json.dumps(cfg)
def tt_update(update):
global cl
global ocfg
# extract updates
cfg = {}
update_config_entry(cfg,ocfg, json.loads(update))
# Apply group
user_to_expert(cl,cfg,full=False)
# Apply config
config_expert(cl, cfg)
# Retain mandatory fields for XTC translation
for key in ('detType:RO','detName:RO','detId:RO','doc:RO','alg:RO'):
copy_config_entry(cfg,ocfg,key)
copy_config_entry(cfg[':types:'],ocfg[':types:'],key)
return json.dumps(cfg)
def tt_unconfig(cl):
getattr(cl.TimeToolKcu1500.Kcu1500Hsio.TimingRx.TriggerEventManager,'TriggerEventBuffer[%d]'%lane).MasterEnable.set(False)
| 35.29646 | 144 | 0.627178 |
79459953f50a9460aec36dee1ae80772571b1a20 | 1,387 | py | Python | flask_app/analyser/serve.py | 4rr0w/twitterSIte | 672f00030165eadd5eeebec0ba4f8a81a662eba2 | [
"MIT"
] | null | null | null | flask_app/analyser/serve.py | 4rr0w/twitterSIte | 672f00030165eadd5eeebec0ba4f8a81a662eba2 | [
"MIT"
] | null | null | null | flask_app/analyser/serve.py | 4rr0w/twitterSIte | 672f00030165eadd5eeebec0ba4f8a81a662eba2 | [
"MIT"
] | null | null | null | import pandas as pd
class Serve:
def __init__(self, username, since, end) -> None:
since_day = pd.to_datetime(since)
end_day = pd.to_datetime(end)
self.df = pd.read_excel("./Sentimental/%s-tweets-analysed.xlsx" % username, engine= 'openpyxl')
self.df_filtered = self.df[(self.df['created_at'] >= since_day) & (self.df['created_at'] <= end_day)]
self.username = username
def get_sentiments(self)-> dict:
dict = {}
dict["neutral"] = self.df_filtered[self.df_filtered["polarity"] == 0].shape[0]
dict["weak_positive"] = self.df_filtered[(self.df_filtered["polarity"] > 0) & (self.df_filtered["polarity"] <= 0.3)].shape[0]
dict["positive"] = self.df_filtered[(self.df_filtered["polarity"] > 0.3) & (self.df_filtered["polarity"] <= 0.6)].shape[0]
dict["strong_positive"] = self.df_filtered[(self.df_filtered["polarity"] > 0.6) & (self.df_filtered["polarity"] <= 1)].shape[0]
dict["weak_negative"] = self.df_filtered[(self.df_filtered["polarity"] < 0) & (self.df_filtered["polarity"] >= -0.3)].shape[0]
dict["negative"] = self.df_filtered[(self.df_filtered["polarity"] < -0.3) & (self.df_filtered["polarity"] >= -0.6)].shape[0]
dict["strong_negative"] = self.df_filtered[(self.df_filtered["polarity"] < -0.6) & (self.df_filtered["polarity"] >= -1)].shape[0]
return dict
| 60.304348 | 137 | 0.636626 |
7945999670188e22beb06ebc1d7a570e5ff9d297 | 2,040 | py | Python | iam/roles/list_roles.py | flexera-public/py-flexeraone-tools | cf146a1fa65acfdf660475c50477f549e477506f | [
"Apache-2.0"
] | null | null | null | iam/roles/list_roles.py | flexera-public/py-flexeraone-tools | cf146a1fa65acfdf660475c50477f549e477506f | [
"Apache-2.0"
] | null | null | null | iam/roles/list_roles.py | flexera-public/py-flexeraone-tools | cf146a1fa65acfdf660475c50477f549e477506f | [
"Apache-2.0"
] | null | null | null | import logging
import requests
import sys
import click
import pprint
@click.command(no_args_is_help=True)
@click.option('--refresh-token', '-r', prompt="Refresh Token", help='Refresh Token from FlexeraOne', required=True)
@click.option('--host', '-h', prompt="IAM API Endpoint", default="api.flexeratest.com", show_default=True)
@click.option('--org-id', '-i', prompt="Organization ID", help="Organization ID", required=True)
def list_iam_roles(refresh_token, host, org_id):
"""
Organization Add Tool for MSP's
"""
# Tweak the destination (e.g. sys.stdout instead) and level (e.g. logging.DEBUG instead) to taste!
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s', stream=sys.stderr, level=logging.INFO)
access_token = generate_access_token(refresh_token, host)
list_roles(host, access_token, org_id)
def generate_access_token(refresh_token, host):
domain = '.'.join(host.split('.')[-2:])
token_url = "https://login.{}/oidc/token".format(domain)
logging.info("OAuth2: Getting Access Token via Refresh Token for {} ...".format(token_url))
token_post_request = requests.post(token_url, data={"grant_type": "refresh_token", "refresh_token": refresh_token})
token_post_request.raise_for_status()
access_token = token_post_request.json()["access_token"]
return access_token
def list_roles(host, access_token, org_id):
"""
create_org(host, access_token, msp_org_id, org_data)
Creates the org and logs the response.
"""
headers = {"Authorization": "Bearer " + access_token, "Content-Type": "application/json"}
kwargs = {"headers": headers, "allow_redirects": False}
roles_url = "https://{}/iam/v1/orgs/{}/roles".format(host, org_id)
list_roles_request = requests.get(roles_url, **kwargs, stream=True)
list_roles_request.raise_for_status()
pprint.pprint(list_roles_request.json())
if __name__ == '__main__':
# click passes no args
# pylint: disable=no-value-for-parameter
list_iam_roles(auto_envvar_prefix='FLEXERA')
| 43.404255 | 119 | 0.717647 |
79459a46571f7734af90e3c171a56aecfb04a9e9 | 8,093 | py | Python | src/python/dxpy/utils/printing.py | claymcleod/dx-toolkit | 3b51c06b39d4c3925439fff0089d7d28c3bd7f1c | [
"Apache-2.0"
] | null | null | null | src/python/dxpy/utils/printing.py | claymcleod/dx-toolkit | 3b51c06b39d4c3925439fff0089d7d28c3bd7f1c | [
"Apache-2.0"
] | null | null | null | src/python/dxpy/utils/printing.py | claymcleod/dx-toolkit | 3b51c06b39d4c3925439fff0089d7d28c3bd7f1c | [
"Apache-2.0"
] | 1 | 2019-02-02T01:44:30.000Z | 2019-02-02T01:44:30.000Z | # Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This submodule gives basic utilities for printing to the terminal.
'''
import textwrap, subprocess, os, sys
import json
import platform
from ..compat import USING_PYTHON2, sys_encoding
from ..exceptions import DXCLIError
import contextlib
import io
if sys.stdout.isatty():
try:
tty_rows, tty_cols = map(int, subprocess.check_output(['stty', 'size'], stderr=open(os.devnull, 'w')).split())
std_width = min(tty_cols - 2, 100)
except:
tty_rows, tty_cols = 24, 80
std_width = 78
color_state = True
else:
tty_rows, tty_cols = 24, 80
std_width = 78
color_state = False
delimiter = None
# Utility functions to silence output for a function call
# https://stackoverflow.com/questions/2828953/silence-the-stdout-of-a-function-in-python-without-trashing-sys-stdout-and-resto
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostderr():
save_stderr = sys.stderr
sys.stderr = DummyFile()
yield
sys.stderr = save_stderr
def CYAN(message=None):
if message is None:
return '\033[36m' if color_state else ''
else:
return CYAN() + message + ENDC()
def LIGHTBLUE(message=None):
if message is None:
return '\033[1;34m' if color_state else ''
else:
return LIGHTBLUE() + message + ENDC()
def BLUE(message=None):
if message is None:
return '\033[34m' if color_state else ''
else:
return BLUE() + message + ENDC()
def YELLOW(message=None):
if message is None:
return '\033[33m' if color_state else ''
else:
return YELLOW() + message + ENDC()
def GREEN(message=None):
if message is None:
return '\033[32m' if color_state else ''
else:
return GREEN() + message + ENDC()
def RED(message=None):
if message is None:
return '\033[31m' if color_state else ''
else:
return RED() + message + ENDC()
def WHITE(message=None):
if message is None:
return '\033[37m' if color_state else ''
else:
return WHITE() + message + ENDC()
def UNDERLINE(message=None):
if message is None:
return '\033[4m' if color_state else ''
else:
return UNDERLINE() + message + ENDC()
def BOLD(message=None):
if message is None:
return '\033[1m' if color_state else ''
else:
return BOLD() + message + ENDC()
def ENDC():
return '\033[0m' if color_state else ''
def DNANEXUS_LOGO():
return BOLD() + WHITE() + 'DNAne' + CYAN() + 'x' + WHITE() + 'us' + ENDC()
def DNANEXUS_X():
return BOLD() + CYAN() + 'x' + WHITE() + ENDC()
def set_colors(state=True):
global color_state
color_state = state
def set_delimiter(delim=None):
global delimiter
delimiter = delim
def get_delimiter(delim=None):
return delimiter
def DELIMITER(alt_delim):
return alt_delim if delimiter is None else delimiter
def fill(string, width_adjustment=0, **kwargs):
if "width" not in kwargs:
kwargs['width'] = max(std_width + width_adjustment, 20)
if "break_on_hyphens" not in kwargs:
kwargs["break_on_hyphens"] = False
return textwrap.fill(string, **kwargs)
def pager(content, pager=None, file=None):
if file is None:
file = sys.stdout
pager_process = None
try:
if file != sys.stdout or not file.isatty():
raise DXCLIError() # Just print the content, don't use a pager
content_lines = content.splitlines()
content_rows = len(content_lines)
content_cols = max(len(i) for i in content_lines)
if tty_rows > content_rows and tty_cols > content_cols:
raise DXCLIError() # Just print the content, don't use a pager
if pager is None:
pager = os.environ.get('PAGER', 'less -RS')
if platform.system() == 'Windows':
# Verify if the pager is available on Windows
try:
subprocess.call(pager)
except:
raise DXCLIError() # Just print the content, don't use a pager
pager_process = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE, stdout=file)
pager_process.stdin.write(content.encode(sys_encoding))
pager_process.stdin.close()
pager_process.wait()
if pager_process.returncode != os.EX_OK:
raise DXCLIError() # Pager had a problem, print the content without it
except:
file.write(content.encode(sys_encoding) if USING_PYTHON2 else content)
finally:
try:
pager_process.terminate()
except:
pass
def refill_paragraphs(string, ignored_prefix=' '):
"""Refills the given text, where the text is composed of paragraphs
separated by blank lines (i.e. '\n\n'). Lines that begin with
ignored_prefix are not touched; this can be used to keep indented
code snippets from being incorrectly reformatted.
"""
paragraphs = string.split('\n\n')
refilled_paragraphs = [fill(paragraph) if not paragraph.startswith(ignored_prefix) else paragraph for paragraph in paragraphs]
return '\n\n'.join(refilled_paragraphs).strip('\n')
def _format_find_projects_results(results):
for result in results:
print(result["id"] + DELIMITER(" : ") + result['describe']['name'] +
DELIMITER(' (') + result["level"] + DELIMITER(')'))
def _format_find_apps_results(results, verbose=False):
def maybe_x(result):
return DNANEXUS_X() if result['describe']['billTo'] in ['org-dnanexus', 'org-dnanexus_apps'] else ' '
if not verbose:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER("), v") + result["describe"]["version"])
else:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result["id"] + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER('), v') + result['describe']['version'] + DELIMITER(" (") + ("published" if result["describe"].get("published", 0) > 0 else "unpublished") + DELIMITER(")"))
def _format_find_org_members_results(results):
for result in results:
print(result["id"] + DELIMITER(" : ") + result['describe']['first'] + DELIMITER(' ') +
result['describe']['last'] + DELIMITER(' ') + DELIMITER(' (') + result["level"] +
DELIMITER(')'))
def format_find_results(args, results):
"""
Formats the output of ``dx find ...`` commands for `--json` and `--brief` arguments; also formats if no formatting
arguments are given.
Currently used for ``dx find projects``, ``dx find org_projects``, ``dx find org_apps``,
and ``dx find org_members``
"""
if args.json:
print(json.dumps(list(results), indent=4))
elif args.brief:
for result in results:
print(result['id'])
else:
if args.func.__name__ in ("find_projects", "org_find_projects"):
_format_find_projects_results(results)
elif args.func.__name__ in ("org_find_members"):
_format_find_org_members_results(results)
elif args.func.__name__ in ("org_find_apps"): # should have "find_apps" here one day
_format_find_apps_results(results, verbose=args.verbose)
| 34.58547 | 360 | 0.645496 |
79459a6384d420b6eb8d2194787096bbcdb6f993 | 2,157 | py | Python | barbicanclient/barbican_cli/cas.py | mail2nsrajesh/python-barbicanclient | 439ee25dc6c998e5571022ce0094a10c2611d717 | [
"Apache-2.0"
] | null | null | null | barbicanclient/barbican_cli/cas.py | mail2nsrajesh/python-barbicanclient | 439ee25dc6c998e5571022ce0094a10c2611d717 | [
"Apache-2.0"
] | null | null | null | barbicanclient/barbican_cli/cas.py | mail2nsrajesh/python-barbicanclient | 439ee25dc6c998e5571022ce0094a10c2611d717 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line interface sub-commands related to cas.
"""
from cliff import lister
from cliff import show
from barbicanclient import cas
class GetCA(show.ShowOne):
"""Retrieve a CA by providing its URI."""
def get_parser(self, prog_name):
parser = super(GetCA, self).get_parser(prog_name)
parser.add_argument('URI', help='The URI reference for the CA.')
return parser
def take_action(self, args):
entity = self.app.client_manager.key_manager.cas.get(ca_ref=args.URI)
return entity._get_formatted_entity()
class ListCA(lister.Lister):
"""List CAs."""
def get_parser(self, prog_name):
parser = super(ListCA, self).get_parser(prog_name)
parser.add_argument('--limit', '-l', default=10,
help='specify the limit to the number of items '
'to list per page (default: %(default)s; '
'maximum: 100)',
type=int)
parser.add_argument('--offset', '-o', default=0,
help='specify the page offset '
'(default: %(default)s)',
type=int)
parser.add_argument('--name', '-n', default=None,
help='specify the ca name '
'(default: %(default)s)')
return parser
def take_action(self, args):
obj_list = self.app.client_manager.key_manager.cas.list(
args.limit, args.offset, args.name)
return cas.CA._list_objects(obj_list)
| 37.189655 | 77 | 0.607789 |
79459a89b30156c0bc31e8272b443c74247a27b6 | 5,096 | py | Python | spacy/training/loggers.py | g4brielvs/spaCy | cca8651fc8133172ebaa9d9fc438ed1fbf34fb33 | [
"BSD-3-Clause",
"MIT"
] | 2 | 2017-06-23T20:54:31.000Z | 2022-01-06T08:11:49.000Z | spacy/training/loggers.py | g4brielvs/spaCy | cca8651fc8133172ebaa9d9fc438ed1fbf34fb33 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | spacy/training/loggers.py | g4brielvs/spaCy | cca8651fc8133172ebaa9d9fc438ed1fbf34fb33 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2021-06-21T07:17:48.000Z | 2021-06-21T07:17:48.000Z | from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO
from wasabi import Printer
import tqdm
import sys
from ..util import registry
from .. import util
from ..errors import Errors
if TYPE_CHECKING:
from ..language import Language # noqa: F401
def setup_table(
*, cols: List[str], widths: List[int], max_width: int = 13
) -> Tuple[List[str], List[int], List[str]]:
final_cols = []
final_widths = []
for col, width in zip(cols, widths):
if len(col) > max_width:
col = col[: max_width - 3] + "..." # shorten column if too long
final_cols.append(col.upper())
final_widths.append(max(len(col), width))
return final_cols, final_widths, ["r" for _ in final_widths]
@registry.loggers("spacy.ConsoleLogger.v1")
def console_logger(progress_bar: bool = False):
def setup_printer(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
write = lambda text: stdout.write(f"{text}\n")
msg = Printer(no_print=True)
# ensure that only trainable components are logged
logged_pipes = [
name
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
loss_cols = [f"Loss {pipe}" for pipe in logged_pipes]
spacing = 2
table_header, table_widths, table_aligns = setup_table(
cols=["E", "#"] + loss_cols + score_cols + ["Score"],
widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
)
write(msg.row(table_header, widths=table_widths, spacing=spacing))
write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress
if info is None:
# If we don't have a new checkpoint, just return.
if progress is not None:
progress.update(1)
return
losses = [
"{0:.2f}".format(float(info["losses"][pipe_name]))
for pipe_name in logged_pipes
]
scores = []
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
score = float(score)
except TypeError:
err = Errors.E916.format(name=col, score_type=type(score))
raise ValueError(err) from None
if col != "speed":
score *= 100
scores.append("{0:.2f}".format(score))
data = (
[info["epoch"], info["step"]]
+ losses
+ scores
+ ["{0:.2f}".format(float(info["score"]))]
)
if progress is not None:
progress.close()
write(
msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing)
)
if progress_bar:
# Set disable=None, so that it disables on non-TTY
progress = tqdm.tqdm(
total=eval_frequency, disable=None, leave=False, file=stderr
)
progress.set_description(f"Epoch {info['epoch']+1}")
def finalize() -> None:
pass
return log_step, finalize
return setup_printer
@registry.loggers("spacy.WandbLogger.v1")
def wandb_logger(project_name: str, remove_config_values: List[str] = []):
import wandb
console = console_logger(progress_bar=False)
def setup_logger(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Dict[str, Any]], None], Callable[[], None]]:
config = nlp.config.interpolate()
config_dot = util.dict_to_dot(config)
for field in remove_config_values:
del config_dot[field]
config = util.dot_to_dict(config_dot)
wandb.init(project=project_name, config=config, reinit=True)
console_log_step, console_finalize = console(nlp, stdout, stderr)
def log_step(info: Optional[Dict[str, Any]]):
console_log_step(info)
if info is not None:
score = info["score"]
other_scores = info["other_scores"]
losses = info["losses"]
wandb.log({"score": score})
if losses:
wandb.log({f"loss_{k}": v for k, v in losses.items()})
if isinstance(other_scores, dict):
wandb.log(other_scores)
def finalize() -> None:
console_finalize()
wandb.join()
return log_step, finalize
return setup_logger
| 36.4 | 88 | 0.560832 |
79459c4170969ea338451f502f8a8dc8c2fbfeb3 | 414 | py | Python | places/migrations/0005_image_priority.py | stasyao/dvmn | 655ea85c696f36abe8531fd671e6b1d1b07d7c52 | [
"MIT"
] | 1 | 2022-02-17T14:02:08.000Z | 2022-02-17T14:02:08.000Z | places/migrations/0005_image_priority.py | stasyao/dvmn | 655ea85c696f36abe8531fd671e6b1d1b07d7c52 | [
"MIT"
] | null | null | null | places/migrations/0005_image_priority.py | stasyao/dvmn | 655ea85c696f36abe8531fd671e6b1d1b07d7c52 | [
"MIT"
] | 1 | 2021-11-24T20:58:58.000Z | 2021-11-24T20:58:58.000Z | # Generated by Django 3.2.7 on 2021-10-05 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0004_image'),
]
operations = [
migrations.AddField(
model_name='image',
name='priority',
field=models.PositiveIntegerField(default=1),
preserve_default=False,
),
]
| 20.7 | 57 | 0.589372 |
79459c97b8173429985985ba82917bb77bd5465b | 56,218 | py | Python | sample_code/Python_NLP/nlp-6-1-document_classify.py | ice-melt/python-lib | 345e34fff7386d91acbb03a01fd4127c5dfed037 | [
"MIT"
] | null | null | null | sample_code/Python_NLP/nlp-6-1-document_classify.py | ice-melt/python-lib | 345e34fff7386d91acbb03a01fd4127c5dfed037 | [
"MIT"
] | null | null | null | sample_code/Python_NLP/nlp-6-1-document_classify.py | ice-melt/python-lib | 345e34fff7386d91acbb03a01fd4127c5dfed037 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@Author : [email protected]
@File : nlp-6-1-document_classify.py
@Time : 2019/4/23 18:09
@Version : 1.0
@Desc : 文档分类
"""
from nltk.corpus import movie_reviews
import random
import nltk
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words.keys())[:2000]
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
print(document_features(movie_reviews.words('pos/cv957_8737.txt')))
# 训练和测试分类器以进行文档分类
featuresets = [(document_features(d), c) for (d, c) in documents]
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features()
# {'contains(plot)': True, 'contains(:)': True, 'contains(two)': True, 'contains(teen)': False, 'contains(couples)': False, 'contains(go)': False, 'contains(to)': True, 'contains(a)': True, 'contains(church)': False, 'contains(party)': False, 'contains(,)': True, 'contains(drink)': False, 'contains(and)': True, 'contains(then)': True, 'contains(drive)': False, 'contains(.)': True, 'contains(they)': True, 'contains(get)': True, 'contains(into)': True, 'contains(an)': True, 'contains(accident)': False, 'contains(one)': True, 'contains(of)': True, 'contains(the)': True, 'contains(guys)': False, 'contains(dies)': False, 'contains(but)': True, 'contains(his)': True, 'contains(girlfriend)': True, 'contains(continues)': False, 'contains(see)': False, 'contains(him)': True, 'contains(in)': True, 'contains(her)': False, 'contains(life)': False, 'contains(has)': True, 'contains(nightmares)': False, 'contains(what)': True, "contains(')": True, 'contains(s)': True, 'contains(deal)': False, 'contains(?)': False, 'contains(watch)': True, 'contains(movie)': True, 'contains(")': True, 'contains(sorta)': False, 'contains(find)': False, 'contains(out)': True, 'contains(critique)': False, 'contains(mind)': False, 'contains(-)': True, 'contains(fuck)': False, 'contains(for)': True, 'contains(generation)': False, 'contains(that)': True, 'contains(touches)': False, 'contains(on)': True, 'contains(very)': True, 'contains(cool)': False, 'contains(idea)': True, 'contains(presents)': False, 'contains(it)': True, 'contains(bad)': False, 'contains(package)': False, 'contains(which)': True, 'contains(is)': True, 'contains(makes)': False, 'contains(this)': True, 'contains(review)': False, 'contains(even)': False, 'contains(harder)': False, 'contains(write)': False, 'contains(since)': False, 'contains(i)': False, 'contains(generally)': False, 'contains(applaud)': False, 'contains(films)': False, 'contains(attempt)': False, 'contains(break)': False, 'contains(mold)': False, 'contains(mess)': False, 'contains(with)': True, 'contains(your)': False, 'contains(head)': False, 'contains(such)': False, 'contains(()': True, 'contains(lost)': False, 'contains(highway)': False, 'contains(&)': False, 'contains(memento)': False, 'contains())': True, 'contains(there)': True, 'contains(are)': True, 'contains(good)': False, 'contains(ways)': False, 'contains(making)': True, 'contains(all)': True, 'contains(types)': False, 'contains(these)': False, 'contains(folks)': False, 'contains(just)': True, 'contains(didn)': False, 'contains(t)': False, 'contains(snag)': False, 'contains(correctly)': False, 'contains(seem)': False, 'contains(have)': True, 'contains(taken)': False, 'contains(pretty)': False, 'contains(neat)': False, 'contains(concept)': False, 'contains(executed)': False, 'contains(terribly)': False, 'contains(so)': False, 'contains(problems)': True, 'contains(well)': True, 'contains(its)': False, 'contains(main)': False, 'contains(problem)': False, 'contains(simply)': False, 'contains(too)': False, 'contains(jumbled)': False, 'contains(starts)': False, 'contains(off)': False, 'contains(normal)': False, 'contains(downshifts)': False, 'contains(fantasy)': False, 'contains(world)': True, 'contains(you)': True, 'contains(as)': True, 'contains(audience)': False, 'contains(member)': False, 'contains(no)': False, 'contains(going)': False, 'contains(dreams)': False, 'contains(characters)': False, 'contains(coming)': False, 'contains(back)': False, 'contains(from)': True, 'contains(dead)': False, 'contains(others)': True, 'contains(who)': True, 'contains(look)': True, 'contains(like)': True, 'contains(strange)': False, 'contains(apparitions)': False, 'contains(disappearances)': False, 'contains(looooot)': False, 'contains(chase)': True, 'contains(scenes)': False, 'contains(tons)': False, 'contains(weird)': False, 'contains(things)': True, 'contains(happen)': False, 'contains(most)': True, 'contains(not)': True, 'contains(explained)': False, 'contains(now)': False, 'contains(personally)': False, 'contains(don)': False, 'contains(trying)': False, 'contains(unravel)': False, 'contains(film)': False, 'contains(every)': False, 'contains(when)': True, 'contains(does)': False, 'contains(give)': False, 'contains(me)': True, 'contains(same)': True, 'contains(clue)': False, 'contains(over)': False, 'contains(again)': False, 'contains(kind)': True, 'contains(fed)': False, 'contains(up)': False, 'contains(after)': False, 'contains(while)': True, 'contains(biggest)': False, 'contains(obviously)': False, 'contains(got)': True, 'contains(big)': False, 'contains(secret)': False, 'contains(hide)': False, 'contains(seems)': False, 'contains(want)': False, 'contains(completely)': False, 'contains(until)': False, 'contains(final)': False, 'contains(five)': False, 'contains(minutes)': False, 'contains(do)': True, 'contains(make)': True, 'contains(entertaining)': False, 'contains(thrilling)': False, 'contains(or)': False, 'contains(engaging)': False, 'contains(meantime)': False, 'contains(really)': False, 'contains(sad)': False, 'contains(part)': False, 'contains(arrow)': False, 'contains(both)': False, 'contains(dig)': False, 'contains(flicks)': False, 'contains(we)': False, 'contains(actually)': True, 'contains(figured)': False, 'contains(by)': True, 'contains(half)': False, 'contains(way)': True, 'contains(point)': False, 'contains(strangeness)': False, 'contains(did)': False, 'contains(start)': True, 'contains(little)': True, 'contains(bit)': False, 'contains(sense)': False, 'contains(still)': False, 'contains(more)': False, 'contains(guess)': False, 'contains(bottom)': False, 'contains(line)': False, 'contains(movies)': True, 'contains(should)': False, 'contains(always)': False, 'contains(sure)': False, 'contains(before)': False, 'contains(given)': False, 'contains(password)': False, 'contains(enter)': False, 'contains(understanding)': False, 'contains(mean)': False, 'contains(showing)': False, 'contains(melissa)': False, 'contains(sagemiller)': False, 'contains(running)': False, 'contains(away)': False, 'contains(visions)': False, 'contains(about)': True, 'contains(20)': False, 'contains(throughout)': False, 'contains(plain)': False, 'contains(lazy)': False, 'contains(!)': True, 'contains(okay)': False, 'contains(people)': False, 'contains(chasing)': False, 'contains(know)': False, 'contains(need)': False, 'contains(how)': True, 'contains(giving)': False, 'contains(us)': True, 'contains(different)': False, 'contains(offering)': False, 'contains(further)': False, 'contains(insight)': False, 'contains(down)': False, 'contains(apparently)': False, 'contains(studio)': False, 'contains(took)': False, 'contains(director)': False, 'contains(chopped)': False, 'contains(themselves)': False, 'contains(shows)': False, 'contains(might)': False, 'contains(ve)': False, 'contains(been)': False, 'contains(decent)': False, 'contains(here)': True, 'contains(somewhere)': False, 'contains(suits)': False, 'contains(decided)': False, 'contains(turning)': False, 'contains(music)': False, 'contains(video)': False, 'contains(edge)': False, 'contains(would)': False, 'contains(actors)': False, 'contains(although)': False, 'contains(wes)': False, 'contains(bentley)': False, 'contains(seemed)': False, 'contains(be)': True, 'contains(playing)': True, 'contains(exact)': False, 'contains(character)': False, 'contains(he)': True, 'contains(american)': False, 'contains(beauty)': False, 'contains(only)': True, 'contains(new)': False, 'contains(neighborhood)': False, 'contains(my)': False, 'contains(kudos)': False, 'contains(holds)': False, 'contains(own)': True, 'contains(entire)': False, 'contains(feeling)': False, 'contains(unraveling)': False, 'contains(overall)': False, 'contains(doesn)': False, 'contains(stick)': False, 'contains(because)': False, 'contains(entertain)': False, 'contains(confusing)': False, 'contains(rarely)': False, 'contains(excites)': False, 'contains(feels)': False, 'contains(redundant)': False, 'contains(runtime)': False, 'contains(despite)': False, 'contains(ending)': False, 'contains(explanation)': False, 'contains(craziness)': False, 'contains(came)': False, 'contains(oh)': False, 'contains(horror)': False, 'contains(slasher)': False, 'contains(flick)': False, 'contains(packaged)': False, 'contains(someone)': False, 'contains(assuming)': False, 'contains(genre)': False, 'contains(hot)': False, 'contains(kids)': False, 'contains(also)': True, 'contains(wrapped)': False, 'contains(production)': False, 'contains(years)': False, 'contains(ago)': False, 'contains(sitting)': False, 'contains(shelves)': False, 'contains(ever)': True, 'contains(whatever)': False, 'contains(skip)': False, 'contains(where)': True, 'contains(joblo)': False, 'contains(nightmare)': False, 'contains(elm)': False, 'contains(street)': False, 'contains(3)': False, 'contains(7)': False, 'contains(/)': False, 'contains(10)': False, 'contains(blair)': False, 'contains(witch)': False, 'contains(2)': False, 'contains(crow)': False, 'contains(9)': False, 'contains(salvation)': False, 'contains(4)': False, 'contains(stir)': False, 'contains(echoes)': False, 'contains(8)': False, 'contains(happy)': False, 'contains(bastard)': False, 'contains(quick)': True, 'contains(damn)': False, 'contains(y2k)': False, 'contains(bug)': False, 'contains(starring)': False, 'contains(jamie)': False, 'contains(lee)': False, 'contains(curtis)': False, 'contains(another)': False, 'contains(baldwin)': False, 'contains(brother)': False, 'contains(william)': False, 'contains(time)': False, 'contains(story)': False, 'contains(regarding)': False, 'contains(crew)': False, 'contains(tugboat)': False, 'contains(comes)': False, 'contains(across)': False, 'contains(deserted)': False, 'contains(russian)': False, 'contains(tech)': False, 'contains(ship)': False, 'contains(kick)': False, 'contains(power)': False, 'contains(within)': False, 'contains(gore)': False, 'contains(bringing)': False, 'contains(few)': False, 'contains(action)': True, 'contains(sequences)': False, 'contains(virus)': False, 'contains(empty)': False, 'contains(flash)': False, 'contains(substance)': False, 'contains(why)': False, 'contains(was)': False, 'contains(middle)': False, 'contains(nowhere)': False, 'contains(origin)': False, 'contains(pink)': False, 'contains(flashy)': False, 'contains(thing)': False, 'contains(hit)': False, 'contains(mir)': False, 'contains(course)': True, 'contains(donald)': False, 'contains(sutherland)': False, 'contains(stumbling)': False, 'contains(around)': False, 'contains(drunkenly)': False, 'contains(hey)': False, 'contains(let)': False, 'contains(some)': False, 'contains(robots)': False, 'contains(acting)': False, 'contains(below)': False, 'contains(average)': False, 'contains(likes)': False, 'contains(re)': True, 'contains(likely)': False, 'contains(work)': False, 'contains(halloween)': False, 'contains(h20)': False, 'contains(wasted)': False, 'contains(real)': False, 'contains(star)': False, 'contains(stan)': False, 'contains(winston)': False, 'contains(robot)': False, 'contains(design)': False, 'contains(schnazzy)': False, 'contains(cgi)': False, 'contains(occasional)': False, 'contains(shot)': False, 'contains(picking)': False, 'contains(brain)': False, 'contains(if)': True, 'contains(body)': False, 'contains(parts)': False, 'contains(turn)': False, 'contains(otherwise)': False, 'contains(much)': False, 'contains(sunken)': False, 'contains(jaded)': False, 'contains(viewer)': False, 'contains(thankful)': False, 'contains(invention)': False, 'contains(timex)': False, 'contains(indiglo)': False, 'contains(based)': False, 'contains(late)': False, 'contains(1960)': False, 'contains(television)': False, 'contains(show)': False, 'contains(name)': False, 'contains(mod)': False, 'contains(squad)': False, 'contains(tells)': False, 'contains(tale)': False, 'contains(three)': False, 'contains(reformed)': False, 'contains(criminals)': False, 'contains(under)': False, 'contains(employ)': False, 'contains(police)': False, 'contains(undercover)': True, 'contains(however)': True, 'contains(wrong)': True, 'contains(evidence)': False, 'contains(gets)': True, 'contains(stolen)': False, 'contains(immediately)': False, 'contains(suspicion)': False, 'contains(ads)': False, 'contains(cuts)': False, 'contains(claire)': False, 'contains(dane)': False, 'contains(nice)': False, 'contains(hair)': False, 'contains(cute)': False, 'contains(outfits)': False, 'contains(car)': False, 'contains(chases)': False, 'contains(stuff)': False, 'contains(blowing)': False, 'contains(sounds)': False, 'contains(first)': False, 'contains(fifteen)': False, 'contains(quickly)': False, 'contains(becomes)': False, 'contains(apparent)': False, 'contains(certainly)': False, 'contains(slick)': False, 'contains(looking)': False, 'contains(complete)': False, 'contains(costumes)': False, 'contains(isn)': False, 'contains(enough)': False, 'contains(best)': True, 'contains(described)': False, 'contains(cross)': False, 'contains(between)': True, 'contains(hour)': False, 'contains(long)': False, 'contains(cop)': False, 'contains(stretched)': False, 'contains(span)': False, 'contains(single)': False, 'contains(clich)': False, 'contains(matter)': False, 'contains(elements)': False, 'contains(recycled)': False, 'contains(everything)': True, 'contains(already)': False, 'contains(seen)': False, 'contains(nothing)': False, 'contains(spectacular)': False, 'contains(sometimes)': False, 'contains(bordering)': False, 'contains(wooden)': False, 'contains(danes)': False, 'contains(omar)': False, 'contains(epps)': False, 'contains(deliver)': False, 'contains(their)': False, 'contains(lines)': False, 'contains(bored)': False, 'contains(transfers)': False, 'contains(onto)': False, 'contains(escape)': False, 'contains(relatively)': False, 'contains(unscathed)': False, 'contains(giovanni)': False, 'contains(ribisi)': False, 'contains(plays)': False, 'contains(resident)': False, 'contains(crazy)': False, 'contains(man)': False, 'contains(ultimately)': False, 'contains(being)': False, 'contains(worth)': True, 'contains(watching)': False, 'contains(unfortunately)': False, 'contains(save)': False, 'contains(convoluted)': False, 'contains(apart)': False, 'contains(occupying)': False, 'contains(screen)': True, 'contains(young)': False, 'contains(cast)': False, 'contains(clothes)': False, 'contains(hip)': False, 'contains(soundtrack)': False, 'contains(appears)': False, 'contains(geared)': False, 'contains(towards)': False, 'contains(teenage)': False, 'contains(mindset)': False, 'contains(r)': False, 'contains(rating)': False, 'contains(content)': False, 'contains(justify)': False, 'contains(juvenile)': False, 'contains(older)': False, 'contains(information)': False, 'contains(literally)': False, 'contains(spoon)': False, 'contains(hard)': False, 'contains(instead)': False, 'contains(telling)': False, 'contains(dialogue)': False, 'contains(poorly)': False, 'contains(written)': False, 'contains(extremely)': False, 'contains(predictable)': False, 'contains(progresses)': False, 'contains(won)': False, 'contains(care)': False, 'contains(heroes)': False, 'contains(any)': False, 'contains(jeopardy)': False, 'contains(ll)': False, 'contains(aren)': False, 'contains(basing)': False, 'contains(nobody)': False, 'contains(remembers)': False, 'contains(questionable)': False, 'contains(wisdom)': False, 'contains(especially)': True, 'contains(considers)': False, 'contains(target)': False, 'contains(fact)': False, 'contains(number)': False, 'contains(memorable)': False, 'contains(can)': False, 'contains(counted)': False, 'contains(hand)': False, 'contains(missing)': False, 'contains(finger)': False, 'contains(times)': False, 'contains(checked)': False, 'contains(six)': False, 'contains(clear)': False, 'contains(indication)': False, 'contains(them)': True, 'contains(than)': False, 'contains(cash)': False, 'contains(spending)': False, 'contains(dollar)': False, 'contains(judging)': False, 'contains(rash)': False, 'contains(awful)': False, 'contains(seeing)': True, 'contains(avoid)': False, 'contains(at)': False, 'contains(costs)': False, 'contains(quest)': False, 'contains(camelot)': False, 'contains(warner)': False, 'contains(bros)': False, 'contains(feature)': False, 'contains(length)': False, 'contains(fully)': False, 'contains(animated)': False, 'contains(steal)': False, 'contains(clout)': False, 'contains(disney)': False, 'contains(cartoon)': False, 'contains(empire)': False, 'contains(mouse)': False, 'contains(reason)': False, 'contains(worried)': False, 'contains(other)': True, 'contains(recent)': False, 'contains(challenger)': False, 'contains(throne)': False, 'contains(last)': False, 'contains(fall)': False, 'contains(promising)': False, 'contains(flawed)': False, 'contains(20th)': False, 'contains(century)': False, 'contains(fox)': False, 'contains(anastasia)': False, 'contains(hercules)': False, 'contains(lively)': False, 'contains(colorful)': False, 'contains(palate)': False, 'contains(had)': False, 'contains(beat)': False, 'contains(hands)': False, 'contains(crown)': False, 'contains(1997)': False, 'contains(piece)': False, 'contains(animation)': False, 'contains(year)': False, 'contains(contest)': False, 'contains(arrival)': False, 'contains(magic)': False, 'contains(kingdom)': False, 'contains(mediocre)': False, 'contains(--)': True, 'contains(d)': False, 'contains(pocahontas)': False, 'contains(those)': False, 'contains(keeping)': False, 'contains(score)': False, 'contains(nearly)': False, 'contains(dull)': False, 'contains(revolves)': False, 'contains(adventures)': False, 'contains(free)': False, 'contains(spirited)': False, 'contains(kayley)': False, 'contains(voiced)': False, 'contains(jessalyn)': False, 'contains(gilsig)': False, 'contains(early)': True, 'contains(daughter)': False, 'contains(belated)': False, 'contains(knight)': False, 'contains(king)': False, 'contains(arthur)': False, 'contains(round)': False, 'contains(table)': False, 'contains(dream)': False, 'contains(follow)': False, 'contains(father)': False, 'contains(footsteps)': False, 'contains(she)': True, 'contains(chance)': False, 'contains(evil)': False, 'contains(warlord)': False, 'contains(ruber)': False, 'contains(gary)': False, 'contains(oldman)': False, 'contains(ex)': False, 'contains(gone)': False, 'contains(steals)': False, 'contains(magical)': False, 'contains(sword)': False, 'contains(excalibur)': False, 'contains(accidentally)': False, 'contains(loses)': False, 'contains(dangerous)': True, 'contains(booby)': False, 'contains(trapped)': False, 'contains(forest)': False, 'contains(help)': True, 'contains(hunky)': False, 'contains(blind)': False, 'contains(timberland)': False, 'contains(dweller)': False, 'contains(garrett)': False, 'contains(carey)': False, 'contains(elwes)': False, 'contains(headed)': False, 'contains(dragon)': False, 'contains(eric)': False, 'contains(idle)': False, 'contains(rickles)': False, 'contains(arguing)': False, 'contains(itself)': False, 'contains(able)': False, 'contains(medieval)': False, 'contains(sexist)': False, 'contains(prove)': False, 'contains(fighter)': False, 'contains(side)': False, 'contains(pure)': False, 'contains(showmanship)': False, 'contains(essential)': False, 'contains(element)': False, 'contains(expected)': False, 'contains(climb)': False, 'contains(high)': False, 'contains(ranks)': False, 'contains(differentiates)': False, 'contains(something)': False, 'contains(saturday)': False, 'contains(morning)': False, 'contains(subpar)': False, 'contains(instantly)': False, 'contains(forgettable)': False, 'contains(songs)': False, 'contains(integrated)': False, 'contains(computerized)': False, 'contains(footage)': False, 'contains(compare)': False, 'contains(run)': False, 'contains(angry)': False, 'contains(ogre)': False, 'contains(herc)': False, 'contains(battle)': False, 'contains(hydra)': False, 'contains(rest)': False, 'contains(case)': False, 'contains(stink)': False, 'contains(none)': False, 'contains(remotely)': False, 'contains(interesting)': False, 'contains(race)': False, 'contains(bland)': False, 'contains(end)': False, 'contains(tie)': False, 'contains(win)': False, 'contains(comedy)': True, 'contains(shtick)': False, 'contains(awfully)': False, 'contains(cloying)': False, 'contains(least)': True, 'contains(signs)': False, 'contains(pulse)': False, 'contains(fans)': False, "contains(-')": False, 'contains(90s)': False, 'contains(tgif)': False, 'contains(will)': True, 'contains(thrilled)': False, 'contains(jaleel)': False, 'contains(urkel)': False, 'contains(white)': False, 'contains(bronson)': False, 'contains(balki)': False, 'contains(pinchot)': False, 'contains(sharing)': False, 'contains(nicely)': False, 'contains(realized)': False, 'contains(though)': False, 'contains(m)': False, 'contains(loss)': False, 'contains(recall)': False, 'contains(specific)': False, 'contains(providing)': False, 'contains(voice)': False, 'contains(talent)': False, 'contains(enthusiastic)': False, 'contains(paired)': False, 'contains(singers)': False, 'contains(sound)': False, 'contains(musical)': False, 'contains(moments)': False, 'contains(jane)': False, 'contains(seymour)': False, 'contains(celine)': False, 'contains(dion)': False, 'contains(must)': False, 'contains(strain)': False, 'contains(through)': False, 'contains(aside)': False, 'contains(children)': False, 'contains(probably)': False, 'contains(adults)': False, 'contains(grievous)': False, 'contains(error)': False, 'contains(lack)': False, 'contains(personality)': False, 'contains(learn)': False, 'contains(goes)': False, 'contains(synopsis)': False, 'contains(mentally)': False, 'contains(unstable)': False, 'contains(undergoing)': False, 'contains(psychotherapy)': False, 'contains(saves)': False, 'contains(boy)': False, 'contains(potentially)': False, 'contains(fatal)': False, 'contains(falls)': False, 'contains(love)': False, 'contains(mother)': False, 'contains(fledgling)': False, 'contains(restauranteur)': False, 'contains(unsuccessfully)': False, 'contains(attempting)': False, 'contains(gain)': False, 'contains(woman)': True, 'contains(favor)': False, 'contains(takes)': False, 'contains(pictures)': False, 'contains(kills)': False, 'contains(comments)': True, 'contains(stalked)': False, 'contains(yet)': False, 'contains(seemingly)': False, 'contains(endless)': True, 'contains(string)': False, 'contains(spurned)': False, 'contains(psychos)': False, 'contains(getting)': True, 'contains(revenge)': False, 'contains(type)': False, 'contains(stable)': False, 'contains(category)': False, 'contains(1990s)': False, 'contains(industry)': False, 'contains(theatrical)': False, 'contains(direct)': False, 'contains(proliferation)': False, 'contains(may)': False, 'contains(due)': False, 'contains(typically)': False, 'contains(inexpensive)': False, 'contains(produce)': False, 'contains(special)': False, 'contains(effects)': False, 'contains(stars)': False, 'contains(serve)': False, 'contains(vehicles)': False, 'contains(nudity)': False, 'contains(allowing)': False, 'contains(frequent)': False, 'contains(night)': False, 'contains(cable)': False, 'contains(wavers)': False, 'contains(slightly)': False, 'contains(norm)': False, 'contains(respect)': False, 'contains(psycho)': False, 'contains(never)': True, 'contains(affair)': False, 'contains(;)': False, 'contains(contrary)': False, 'contains(rejected)': False, 'contains(rather)': False, 'contains(lover)': False, 'contains(wife)': True, 'contains(husband)': False, 'contains(entry)': False, 'contains(doomed)': False, 'contains(collect)': False, 'contains(dust)': False, 'contains(viewed)': False, 'contains(midnight)': False, 'contains(provide)': False, 'contains(suspense)': False, 'contains(sets)': False, 'contains(interspersed)': False, 'contains(opening)': False, 'contains(credits)': False, 'contains(instance)': False, 'contains(serious)': False, 'contains(sounding)': False, 'contains(narrator)': False, 'contains(spouts)': False, 'contains(statistics)': False, 'contains(stalkers)': False, 'contains(ponders)': False, 'contains(cause)': False, 'contains(stalk)': False, 'contains(implicitly)': False, 'contains(implied)': False, 'contains(men)': False, 'contains(shown)': False, 'contains(snapshot)': False, 'contains(actor)': False, 'contains(jay)': False, 'contains(underwood)': False, 'contains(states)': False, 'contains(daryl)': False, 'contains(gleason)': False, 'contains(stalker)': False, 'contains(brooke)': False, 'contains(daniels)': False, 'contains(meant)': False, 'contains(called)': False, 'contains(guesswork)': False, 'contains(required)': False, 'contains(proceeds)': False, 'contains(begins)': False, 'contains(obvious)': False, 'contains(sequence)': False, 'contains(contrived)': False, 'contains(quite)': False, 'contains(brings)': False, 'contains(victim)': False, 'contains(together)': False, 'contains(obsesses)': False, 'contains(follows)': False, 'contains(tries)': True, 'contains(woo)': False, 'contains(plans)': False, 'contains(become)': False, 'contains(desperate)': False, 'contains(elaborate)': False, 'contains(include)': False, 'contains(cliche)': False, 'contains(murdered)': False, 'contains(pet)': False, 'contains(require)': False, 'contains(found)': False, 'contains(exception)': False, 'contains(cat)': False, 'contains(shower)': False, 'contains(events)': False, 'contains(lead)': True, 'contains(inevitable)': False, 'contains(showdown)': False, 'contains(survives)': False, 'contains(invariably)': False, 'contains(conclusion)': False, 'contains(turkey)': False, 'contains(uniformly)': False, 'contains(adequate)': False, 'contains(anything)': False, 'contains(home)': False, 'contains(either)': False, 'contains(turns)': False, 'contains(toward)': False, 'contains(melodrama)': False, 'contains(overdoes)': False, 'contains(words)': False, 'contains(manages)': False, 'contains(creepy)': False, 'contains(pass)': False, 'contains(demands)': False, 'contains(maryam)': False, 'contains(abo)': False, 'contains(close)': False, 'contains(played)': True, 'contains(bond)': False, 'contains(chick)': False, 'contains(living)': False, 'contains(daylights)': False, 'contains(equally)': False, 'contains(title)': False, 'contains(ditzy)': False, 'contains(strong)': False, 'contains(independent)': False, 'contains(business)': False, 'contains(owner)': False, 'contains(needs)': False, 'contains(proceed)': False, 'contains(example)': False, 'contains(suspicions)': False, 'contains(ensure)': False, 'contains(use)': False, 'contains(excuse)': False, 'contains(decides)': False, 'contains(return)': False, 'contains(toolbox)': False, 'contains(left)': False, 'contains(place)': True, 'contains(house)': False, 'contains(leave)': False, 'contains(door)': False, 'contains(answers)': False, 'contains(opens)': False, 'contains(wanders)': False, 'contains(returns)': False, 'contains(enters)': False, 'contains(our)': False, 'contains(heroine)': False, 'contains(danger)': False, 'contains(somehow)': False, 'contains(parked)': False, 'contains(front)': False, 'contains(right)': False, 'contains(oblivious)': False, 'contains(presence)': False, 'contains(inside)': False, 'contains(whole)': False, 'contains(episode)': False, 'contains(places)': False, 'contains(incredible)': False, 'contains(suspension)': False, 'contains(disbelief)': False, 'contains(questions)': False, 'contains(validity)': False, 'contains(intelligence)': False, 'contains(receives)': False, 'contains(highly)': False, 'contains(derivative)': False, 'contains(somewhat)': False, 'contains(boring)': False, 'contains(cannot)': False, 'contains(watched)': False, 'contains(rated)': False, 'contains(mostly)': False, 'contains(several)': False, 'contains(murder)': False, 'contains(brief)': True, 'contains(strip)': False, 'contains(bar)': False, 'contains(offensive)': False, 'contains(many)': True, 'contains(thrillers)': False, 'contains(mood)': False, 'contains(stake)': False, 'contains(else)': False, 'contains(capsule)': True, 'contains(2176)': False, 'contains(planet)': False, 'contains(mars)': False, 'contains(taking)': False, 'contains(custody)': False, 'contains(accused)': False, 'contains(murderer)': False, 'contains(face)': False, 'contains(menace)': False, 'contains(lot)': False, 'contains(fighting)': False, 'contains(john)': False, 'contains(carpenter)': False, 'contains(reprises)': False, 'contains(ideas)': False, 'contains(previous)': False, 'contains(assault)': False, 'contains(precinct)': False, 'contains(13)': False, 'contains(homage)': False, 'contains(himself)': False, 'contains(0)': False, 'contains(+)': False, 'contains(believes)': False, 'contains(fight)': True, 'contains(horrible)': False, 'contains(writer)': False, 'contains(supposedly)': False, 'contains(expert)': False, 'contains(mistake)': False, 'contains(ghosts)': False, 'contains(drawn)': False, 'contains(humans)': False, 'contains(surprisingly)': False, 'contains(low)': False, 'contains(powered)': False, 'contains(alien)': False, 'contains(addition)': False, 'contains(anybody)': False, 'contains(made)': False, 'contains(grounds)': False, 'contains(sue)': False, 'contains(chock)': False, 'contains(full)': False, 'contains(pieces)': False, 'contains(prince)': False, 'contains(darkness)': False, 'contains(surprising)': False, 'contains(managed)': False, 'contains(fit)': False, 'contains(admittedly)': False, 'contains(novel)': False, 'contains(science)': False, 'contains(fiction)': False, 'contains(experience)': False, 'contains(terraformed)': False, 'contains(walk)': False, 'contains(surface)': False, 'contains(without)': False, 'contains(breathing)': False, 'contains(gear)': False, 'contains(budget)': False, 'contains(mentioned)': False, 'contains(gravity)': False, 'contains(increased)': False, 'contains(earth)': False, 'contains(easier)': False, 'contains(society)': False, 'contains(changed)': False, 'contains(advanced)': False, 'contains(culture)': False, 'contains(women)': False, 'contains(positions)': False, 'contains(control)': False, 'contains(view)': False, 'contains(stagnated)': False, 'contains(female)': False, 'contains(beyond)': False, 'contains(minor)': False, 'contains(technological)': False, 'contains(advances)': False, 'contains(less)': False, 'contains(175)': False, 'contains(expect)': False, 'contains(change)': False, 'contains(ten)': False, 'contains(basic)': False, 'contains(common)': False, 'contains(except)': False, 'contains(yes)': False, 'contains(replaced)': False, 'contains(tacky)': False, 'contains(rundown)': False, 'contains(martian)': False, 'contains(mining)': False, 'contains(colony)': False, 'contains(having)': False, 'contains(criminal)': False, 'contains(napolean)': False, 'contains(wilson)': False, 'contains(desolation)': False, 'contains(williams)': False, 'contains(facing)': False, 'contains(hoodlums)': False, 'contains(automatic)': False, 'contains(weapons)': False, 'contains(nature)': False, 'contains(behave)': False, 'contains(manner)': False, 'contains(essentially)': False, 'contains(human)': False, 'contains(savages)': False, 'contains(lapse)': False, 'contains(imagination)': False, 'contains(told)': False, 'contains(flashback)': False, 'contains(entirely)': False, 'contains(filmed)': False, 'contains(almost)': False, 'contains(tones)': False, 'contains(red)': False, 'contains(yellow)': False, 'contains(black)': False, 'contains(powerful)': False, 'contains(scene)': True, 'contains(train)': True, 'contains(rushing)': False, 'contains(heavy)': False, 'contains(sadly)': False, 'contains(buildup)': False, 'contains(terror)': False, 'contains(creates)': False, 'contains(looks)': True, 'contains(fugitive)': False, 'contains(wannabes)': False, 'contains(rock)': False, 'contains(band)': False, 'contains(kiss)': False, 'contains(building)': False, 'contains(bunch)': False, 'contains(sudden)': False, 'contains(jump)': False, 'contains(sucker)': False, 'contains(thinking)': False, 'contains(scary)': False, 'contains(happening)': False, 'contains(standard)': False, 'contains(haunted)': False, 'contains(shock)': False, 'contains(great)': True, 'contains(newer)': False, 'contains(unimpressive)': False, 'contains(digital)': False, 'contains(decapitations)': False, 'contains(fights)': False, 'contains(short)': False, 'contains(stretch)': False, 'contains(release)': False, 'contains(mission)': False, 'contains(panned)': False, 'contains(reviewers)': False, 'contains(better)': False, 'contains(rate)': False, 'contains(scale)': False, 'contains(following)': False, 'contains(showed)': False, 'contains(liked)': False, 'contains(moderately)': False, 'contains(classic)': False, 'contains(comment)': False, 'contains(twice)': False, 'contains(ask)': False, 'contains(yourself)': False, 'contains(8mm)': False, 'contains(eight)': True, 'contains(millimeter)': False, 'contains(wholesome)': False, 'contains(surveillance)': False, 'contains(sight)': False, 'contains(values)': False, 'contains(becoming)': False, 'contains(enmeshed)': False, 'contains(seedy)': False, 'contains(sleazy)': False, 'contains(underworld)': False, 'contains(hardcore)': False, 'contains(pornography)': False, 'contains(bubbling)': False, 'contains(beneath)': False, 'contains(town)': False, 'contains(americana)': False, 'contains(sordid)': False, 'contains(sick)': False, 'contains(depraved)': False, 'contains(necessarily)': False, 'contains(stop)': True, 'contains(order)': False, 'contains(satisfy)': False, 'contains(twisted)': False, 'contains(desires)': False, 'contains(position)': False, 'contains(influence)': False, 'contains(kinds)': False, 'contains(demented)': False, 'contains(talking)': False, 'contains(snuff)': False, 'contains(supposed)': False, 'contains(documentaries)': False, 'contains(victims)': False, 'contains(brutalized)': False, 'contains(killed)': False, 'contains(camera)': False, 'contains(joel)': False, 'contains(schumacher)': False, 'contains(credit)': False, 'contains(batman)': False, 'contains(robin)': False, 'contains(kill)': False, 'contains(forever)': False, 'contains(client)': False, 'contains(thirds)': False, 'contains(unwind)': False, 'contains(fairly)': True, 'contains(conventional)': False, 'contains(persons)': False, 'contains(drama)': False, 'contains(albeit)': False, 'contains(particularly)': False, 'contains(unsavory)': False, 'contains(core)': False, 'contains(threatening)': False, 'contains(along)': True, 'contains(explodes)': False, 'contains(violence)': False, 'contains(think)': False, 'contains(finally)': False, 'contains(tags)': False, 'contains(ridiculous)': False, 'contains(self)': False, 'contains(righteous)': False, 'contains(finale)': False, 'contains(drags)': False, 'contains(unpleasant)': False, 'contains(trust)': False, 'contains(waste)': False, 'contains(hours)': False, 'contains(nicolas)': False, 'contains(snake)': False, 'contains(eyes)': False, 'contains(cage)': False, 'contains(private)': False, 'contains(investigator)': False, 'contains(tom)': False, 'contains(welles)': False, 'contains(hired)': False, 'contains(wealthy)': False, 'contains(philadelphia)': False, 'contains(widow)': False, 'contains(determine)': False, 'contains(whether)': False, 'contains(reel)': False, 'contains(safe)': False, 'contains(documents)': False, 'contains(girl)': False, 'contains(assignment)': True, 'contains(factly)': False, 'contains(puzzle)': False, 'contains(neatly)': False, 'contains(specialized)': False, 'contains(skills)': False, 'contains(training)': False, 'contains(easy)': False, 'contains(cops)': False, 'contains(toilet)': False, 'contains(tanks)': False, 'contains(clues)': False, 'contains(deeper)': False, 'contains(digs)': False, 'contains(investigation)': False, 'contains(obsessed)': False, 'contains(george)': False, 'contains(c)': False, 'contains(scott)': False, 'contains(paul)': False, 'contains(schrader)': False, 'contains(occasionally)': False, 'contains(flickering)': False, 'contains(whirs)': False, 'contains(sprockets)': False, 'contains(winding)': False, 'contains(projector)': False, 'contains(reminding)': False, 'contains(task)': False, 'contains(hints)': False, 'contains(toll)': False, 'contains(lovely)': False, 'contains(catherine)': False, 'contains(keener)': False, 'contains(frustrated)': False, 'contains(cleveland)': False, 'contains(ugly)': False, 'contains(split)': False, 'contains(level)': False, 'contains(harrisburg)': False, 'contains(pa)': False, 'contains(condemn)': False, 'contains(condone)': False, 'contains(subject)': False, 'contains(exploits)': False, 'contains(irony)': False, 'contains(seven)': False, 'contains(scribe)': False, 'contains(andrew)': False, 'contains(kevin)': True, 'contains(walker)': False, 'contains(vision)': False, 'contains(lane)': False, 'contains(limited)': False, 'contains(hollywood)': False, 'contains(product)': False, 'contains(snippets)': False, 'contains(covering)': False, 'contains(later)': False, 'contains(joaquin)': False, 'contains(phoenix)': False, 'contains(far)': False, 'contains(adult)': False, 'contains(bookstore)': False, 'contains(flunky)': False, 'contains(max)': False, 'contains(california)': False, 'contains(cover)': False, 'contains(horrid)': False, 'contains(screened)': False, 'contains(familiar)': False, 'contains(revelation)': False, 'contains(sexual)': False, 'contains(deviants)': False, 'contains(indeed)': False, 'contains(monsters)': False, 'contains(everyday)': False, 'contains(neither)': False, 'contains(super)': False, 'contains(nor)': False, 'contains(shocking)': False, 'contains(banality)': False, 'contains(exactly)': False, 'contains(felt)': False, 'contains(weren)': False, 'contains(nine)': False, 'contains(laughs)': False, 'contains(months)': False, 'contains(terrible)': False, 'contains(mr)': False, 'contains(hugh)': False, 'contains(grant)': False, 'contains(huge)': False, 'contains(dork)': False, 'contains(oral)': False, 'contains(sex)': False, 'contains(prostitution)': False, 'contains(referring)': False, 'contains(bugs)': False, 'contains(annoying)': False, 'contains(adam)': False, 'contains(sandler)': False, 'contains(jim)': False, 'contains(carrey)': False, 'contains(eye)': False, 'contains(flutters)': False, 'contains(nervous)': False, 'contains(smiles)': False, 'contains(slapstick)': False, 'contains(fistfight)': False, 'contains(delivery)': False, 'contains(room)': False, 'contains(culminating)': False, 'contains(joan)': False, 'contains(cusack)': False, 'contains(lap)': False, 'contains(paid)': False, 'contains($)': False, 'contains(60)': False, 'contains(included)': False, 'contains(obscene)': False, 'contains(double)': False, 'contains(entendres)': False, 'contains(obstetrician)': False, 'contains(pregnant)': False, 'contains(pussy)': False, 'contains(size)': False, 'contains(hairs)': False, 'contains(coat)': False, 'contains(nonetheless)': False, 'contains(exchange)': False, 'contains(cookie)': False, 'contains(cutter)': False, 'contains(originality)': False, 'contains(humor)': False, 'contains(successful)': False, 'contains(child)': False, 'contains(psychiatrist)': False, 'contains(psychologist)': False, 'contains(scriptwriters)': False, 'contains(could)': False, 'contains(inject)': False, 'contains(unfunny)': False, 'contains(kid)': False, 'contains(dad)': False, 'contains(asshole)': False, 'contains(eyelashes)': False, 'contains(offers)': False, 'contains(smile)': False, 'contains(responds)': False, 'contains(english)': False, 'contains(accent)': False, 'contains(attitude)': False, 'contains(possibly)': False, 'contains(_huge_)': False, 'contains(beside)': False, 'contains(includes)': False, 'contains(needlessly)': False, 'contains(stupid)': False, 'contains(jokes)': False, 'contains(olds)': False, 'contains(everyone)': False, 'contains(shakes)': False, 'contains(anyway)': False, 'contains(finds)': False, 'contains(usual)': False, 'contains(reaction)': False, 'contains(fluttered)': False, 'contains(paves)': False, 'contains(possible)': False, 'contains(pregnancy)': False, 'contains(birth)': False, 'contains(gag)': False, 'contains(book)': False, 'contains(friend)': False, 'contains(arnold)': True, 'contains(provides)': False, 'contains(cacophonous)': False, 'contains(funny)': True, 'contains(beats)': False, 'contains(costumed)': False, 'contains(arnie)': False, 'contains(dinosaur)': False, 'contains(draw)': False, 'contains(parallels)': False, 'contains(toy)': False, 'contains(store)': False, 'contains(jeff)': False, 'contains(goldblum)': False, 'contains(hid)': False, 'contains(dreadful)': False, 'contains(hideaway)': False, 'contains(artist)': False, 'contains(fear)': False, 'contains(simultaneous)': False, 'contains(longing)': False, 'contains(commitment)': False, 'contains(doctor)': False, 'contains(recently)': False, 'contains(switch)': False, 'contains(veterinary)': False, 'contains(medicine)': False, 'contains(obstetrics)': False, 'contains(joke)': False, 'contains(old)': False, 'contains(foreign)': False, 'contains(guy)': True, 'contains(mispronounces)': False, 'contains(stereotype)': False, 'contains(say)': False, 'contains(yakov)': False, 'contains(smirnov)': False, 'contains(favorite)': False, 'contains(vodka)': False, 'contains(hence)': False, 'contains(take)': False, 'contains(volvo)': False, 'contains(nasty)': False, 'contains(unamusing)': False, 'contains(heads)': False, 'contains(simultaneously)': False, 'contains(groan)': False, 'contains(failure)': False, 'contains(loud)': False, 'contains(failed)': False, 'contains(uninspired)': False, 'contains(lunacy)': False, 'contains(sunset)': False, 'contains(boulevard)': False, 'contains(arrest)': False, 'contains(please)': False, 'contains(caught)': False, 'contains(pants)': False, 'contains(bring)': False, 'contains(theaters)': False, 'contains(faces)': False, 'contains(90)': False, 'contains(forced)': False, 'contains(unauthentic)': False, 'contains(anyone)': False, 'contains(q)': False, 'contains(80)': False, 'contains(sorry)': False, 'contains(money)': False, 'contains(unfulfilled)': False, 'contains(desire)': False, 'contains(spend)': False, 'contains(bucks)': False, 'contains(call)': False, 'contains(road)': False, 'contains(trip)': False, 'contains(walking)': False, 'contains(wounded)': False, 'contains(stellan)': False, 'contains(skarsg)': False, 'contains(rd)': False, 'contains(convincingly)': False, 'contains(zombified)': False, 'contains(drunken)': False, 'contains(loser)': False, 'contains(difficult)': True, 'contains(smelly)': False, 'contains(boozed)': False, 'contains(reliable)': False, 'contains(swedish)': False, 'contains(adds)': False, 'contains(depth)': False, 'contains(significance)': False, 'contains(plodding)': False, 'contains(aberdeen)': False, 'contains(sentimental)': False, 'contains(painfully)': False, 'contains(mundane)': False, 'contains(european)': False, 'contains(playwright)': False, 'contains(august)': False, 'contains(strindberg)': False, 'contains(built)': False, 'contains(career)': False, 'contains(families)': False, 'contains(relationships)': False, 'contains(paralyzed)': False, 'contains(secrets)': False, 'contains(unable)': False, 'contains(express)': False, 'contains(longings)': False, 'contains(accurate)': False, 'contains(reflection)': False, 'contains(strives)': False, 'contains(focusing)': False, 'contains(pairing)': False, 'contains(alcoholic)': False, 'contains(tomas)': False, 'contains(alienated)': False, 'contains(openly)': False, 'contains(hostile)': False, 'contains(yuppie)': False, 'contains(kaisa)': False, 'contains(lena)': False, 'contains(headey)': False, 'contains(gossip)': False, 'contains(haven)': False, 'contains(spoken)': False, 'contains(wouldn)': False, 'contains(norway)': False, 'contains(scotland)': False, 'contains(automobile)': False, 'contains(charlotte)': False, 'contains(rampling)': False, 'contains(sand)': False, 'contains(rotting)': False, 'contains(hospital)': False, 'contains(bed)': False, 'contains(cancer)': False, 'contains(soap)': False, 'contains(opera)': False, 'contains(twist)': False, 'contains(days)': False, 'contains(live)': False, 'contains(blitzed)': False, 'contains(step)': False, 'contains(foot)': False, 'contains(plane)': False, 'contains(hits)': False, 'contains(open)': False, 'contains(loathing)': False, 'contains(each)': True, 'contains(periodic)': False, 'contains(stops)': True, 'contains(puke)': False, 'contains(dashboard)': False, 'contains(whenever)': False, 'contains(muttering)': False, 'contains(rotten)': False, 'contains(turned)': False, 'contains(sloshed)': False, 'contains(viewpoint)': False, 'contains(recognizes)': False, 'contains(apple)': False, 'contains(hasn)': False, 'contains(fallen)': False, 'contains(tree)': False, 'contains(nosebleeds)': False, 'contains(snorting)': False, 'contains(coke)': False, 'contains(sabotages)': False, 'contains(personal)': False, 'contains(indifference)': False, 'contains(restrain)': False, 'contains(vindictive)': False, 'contains(temper)': False, 'contains(ain)': False, 'contains(pair)': False, 'contains(true)': False, 'contains(notes)': False, 'contains(unspoken)': False, 'contains(familial)': False, 'contains(empathy)': False, 'contains(note)': False, 'contains(repetitively)': False, 'contains(bitchy)': False, 'contains(screenwriters)': False, 'contains(kristin)': False, 'contains(amundsen)': False, 'contains(hans)': False, 'contains(petter)': False, 'contains(moland)': False, 'contains(fabricate)': False, 'contains(series)': True, 'contains(contrivances)': False, 'contains(propel)': False, 'contains(forward)': False, 'contains(roving)': False, 'contains(hooligans)': False, 'contains(drunks)': False, 'contains(nosy)': False, 'contains(flat)': False, 'contains(tires)': False, 'contains(figure)': False, 'contains(schematic)': False, 'contains(convenient)': False, 'contains(narrative)': False, 'contains(reach)': False, 'contains(unveil)': False, 'contains(dark)': False, 'contains(past)': False, 'contains(simplistic)': False, 'contains(devices)': False, 'contains(trivialize)': False, 'contains(conflict)': False, 'contains(mainstays)': False, 'contains(wannabe)': False, 'contains(exists)': False, 'contains(purely)': False, 'contains(sake)': False, 'contains(weak)': False, 'contains(unimaginative)': False, 'contains(casting)': False, 'contains(thwarts)': False, 'contains(pivotal)': False, 'contains(role)': False, 'contains(were)': False, 'contains(stronger)': False, 'contains(actress)': False, 'contains(perhaps)': False, 'contains(coast)': True, 'contains(performances)': False, 'contains(moody)': False, 'contains(haunting)': False, 'contains(cinematography)': False, 'contains(rendering)': False, 'contains(pastoral)': False, 'contains(ghost)': False, 'contains(reference)': False, 'contains(certain)': False, 'contains(superior)': False, 'contains(indie)': False, 'contains(intentional)': False, 'contains(busy)': False, 'contains(using)': False, 'contains(furrowed)': False, 'contains(brow)': False, 'contains(convey)': False, 'contains(twitch)': False, 'contains(insouciance)': False, 'contains(paying)': False, 'contains(attention)': False, 'contains(maybe)': False, 'contains(doing)': False, 'contains(reveal)': False, 'contains(worthwhile)': False, 'contains(earlier)': False, 'contains(released)': False, 'contains(2001)': False, 'contains(jonathan)': False, 'contains(nossiter)': False, 'contains(captivating)': False, 'contains(wonders)': False, 'contains(disturbed)': False, 'contains(parental)': False, 'contains(figures)': False, 'contains(bound)': False, 'contains(ceremonial)': False, 'contains(wedlock)': False, 'contains(differences)': False, 'contains(presented)': False, 'contains(significant)': False, 'contains(luminous)': False, 'contains(diva)': False, 'contains(preening)': False, 'contains(static)': False, 'contains(solid)': False, 'contains(performance)': False, 'contains(pathetic)': False, 'contains(drunk)': False, 'contains(emote)': False, 'contains(besides)': False, 'contains(catatonic)': False, 'contains(sorrow)': False, 'contains(genuine)': False, 'contains(ferocity)': False, 'contains(sexually)': False, 'contains(charged)': False, 'contains(frisson)': False, 'contains(during)': False, 'contains(understated)': False, 'contains(confrontations)': False, 'contains(suggest)': False, 'contains(gray)': False, 'contains(zone)': False, 'contains(complications)': False, 'contains(accompany)': False, 'contains(torn)': False, 'contains(romance)': False, 'contains(stifled)': False, 'contains(curiosity)': False, 'contains(thoroughly)': False, 'contains(explores)': False, 'contains(neurotic)': False, 'contains(territory)': False, 'contains(delving)': False, 'contains(americanization)': False, 'contains(greece)': False, 'contains(mysticism)': False, 'contains(illusion)': False, 'contains(deflect)': False, 'contains(pain)': False, 'contains(overloaded)': False, 'contains(willing)': False, 'contains(come)': False, 'contains(traditional)': False, 'contains(ambitious)': False, 'contains(sleepwalk)': False, 'contains(rhythms)': False, 'contains(timing)': False, 'contains(driven)': False, 'contains(stories)': False, 'contains(complexities)': False, 'contains(depressing)': False, 'contains(answer)': False, 'contains(lawrence)': False, 'contains(kasdan)': False, 'contains(trite)': False, 'contains(useful)': False, 'contains(grand)': False, 'contains(canyon)': False, 'contains(steve)': False, 'contains(martin)': False, 'contains(mogul)': False, 'contains(pronounces)': False, 'contains(riddles)': False, 'contains(answered)': False, 'contains(advice)': False, 'contains(heart)': False, 'contains(french)': False, 'contains(sees)': True, 'contains(parents)': False, 'contains(tim)': False, 'contains(roth)': False, 'contains(oops)': False, 'contains(vows)': False, 'contains(taught)': False, 'contains(musketeer)': False, 'contains(dude)': False, 'contains(used)': True, 'contains(fourteen)': False, 'contains(arrgh)': False, 'contains(swish)': False, 'contains(zzzzzzz)': False, 'contains(original)': False, 'contains(lacks)': False, 'contains(energy)': False, 'contains(next)': False, 'contains(hmmmm)': False, 'contains(justin)': False, 'contains(chambers)': False, 'contains(basically)': False, 'contains(uncharismatic)': False, 'contains(version)': False, 'contains(chris)': False, 'contains(o)': False, 'contains(donnell)': False, 'contains(range)': False, 'contains(mena)': False, 'contains(suvari)': False, 'contains(thora)': False, 'contains(birch)': False, 'contains(dungeons)': False, 'contains(dragons)': False, 'contains(miscast)': False, 'contains(deliveries)': False, 'contains(piss)': False, 'contains(poor)': False, 'contains(ms)': False, 'contains(fault)': False, 'contains(definitely)': False, 'contains(higher)': False, 'contains(semi)': False, 'contains(saving)': False, 'contains(grace)': False, 'contains(wise)': False, 'contains(irrepressible)': False, 'contains(once)': True, 'contains(thousand)': False, 'contains(god)': False, 'contains(beg)': False, 'contains(agent)': False, 'contains(marketplace)': False, 'contains(modern)': False, 'contains(day)': True, 'contains(roles)': False, 'contains(romantic)': False, 'contains(gunk)': False, 'contains(alright)': False, 'contains(yeah)': False, 'contains(yikes)': False, 'contains(notches)': False, 'contains(fellas)': False, 'contains(blares)': False, 'contains(ear)': False, 'contains(accentuate)': False, 'contains(annoy)': False, 'contains(important)': False, 'contains(behind)': False, 'contains(recognize)': False, 'contains(epic)': False, 'contains(fluffy)': False, 'contains(rehashed)': False, 'contains(cake)': False, 'contains(created)': False, 'contains(shrewd)': False, 'contains(advantage)': False, 'contains(kung)': True, 'contains(fu)': True, 'contains(phenomenon)': False, 'contains(test)': False, 'contains(dudes)': False, 'contains(keep)': False, 'contains(reading)': False, 'contains(editing)': False, 'contains(shoddy)': False, 'contains(banal)': False, 'contains(stilted)': False, 'contains(plentiful)': False, 'contains(top)': True, 'contains(horse)': False, 'contains(carriage)': False, 'contains(stand)': False, 'contains(opponent)': False, 'contains(scampering)': False, 'contains(cut)': False, 'contains(mouseketeer)': False, 'contains(rope)': False, 'contains(tower)': False, 'contains(jumping)': False, 'contains(chords)': False, 'contains(hanging)': False, 'contains(says)': False, 'contains(14)': False, 'contains(shirt)': False, 'contains(strayed)': False, 'contains(championing)': False, 'contains(fun)': True, 'contains(stretches)': False, 'contains(atrocious)': False, 'contains(lake)': False, 'contains(reminded)': False, 'contains(school)': False, 'contains(cringe)': False, 'contains(musketeers)': False, 'contains(fat)': False, 'contains(raison)': False, 'contains(etre)': False, 'contains(numbers)': False, 'contains(hoping)': False, 'contains(packed)': False, 'contains(stuntwork)': False, 'contains(promoted)': False, 'contains(trailer)': False, 'contains(major)': False, 'contains(swashbuckling)': False, 'contains(beginning)': False, 'contains(finishes)': False, 'contains(juggling)': False, 'contains(ladders)': False, 'contains(ladder)': True, 'contains(definite)': False, 'contains(keeper)': False, 'contains(regurgitated)': False, 'contains(crap)': False, 'contains(tell)': False, 'contains(deneuve)': False, 'contains(placed)': False, 'contains(hullo)': False, 'contains(barely)': False, 'contains(ugh)': False, 'contains(small)': False, 'contains(annoyed)': False, 'contains(trash)': False, 'contains(gang)': False, 'contains(vow)': False, 'contains(stay)': False, 'contains(thank)': False, 'contains(outlaws)': False, 'contains(5)': False, 'contains(crouching)': False, 'contains(tiger)': False, 'contains(hidden)': False, 'contains(matrix)': False, 'contains(replacement)': False, 'contains(killers)': False, 'contains(6)': False, 'contains(romeo)': False, 'contains(die)': False, 'contains(shanghai)': False, 'contains(noon)': False, 'contains(remembered)': False, 'contains(dr)': False, 'contains(hannibal)': False, 'contains(lecter)': False, 'contains(michael)': False, 'contains(mann)': False, 'contains(forensics)': False, 'contains(thriller)': False, 'contains(manhunter)': False, 'contains(scottish)': False, 'contains(brian)': False, 'contains(cox)': False}
# 0.75
# Most Informative Features
# contains(justin) = True neg : pos = 9.0 : 1.0
# contains(schumacher) = True neg : pos = 7.4 : 1.0
# contains(atrocious) = True neg : pos = 7.0 : 1.0
# contains(shoddy) = True neg : pos = 7.0 : 1.0
# contains(mena) = True neg : pos = 7.0 : 1.0
# contains(unimaginative) = True neg : pos = 7.0 : 1.0
# contains(suvari) = True neg : pos = 7.0 : 1.0
# contains(turkey) = True neg : pos = 6.5 : 1.0
# contains(neatly) = True pos : neg = 6.4 : 1.0
# contains(singers) = True pos : neg = 6.4 : 1.0
print("over!")
| 1,041.074074 | 54,243 | 0.700327 |
79459e2f8419dcd519ea82a9a4ca9bd6bcf9d818 | 1,142 | py | Python | Final/Final/71410.py | EllisBarnes00/COP-1000 | 8509e59e8a566c77295c714ddcb0f557c470358b | [
"Unlicense"
] | null | null | null | Final/Final/71410.py | EllisBarnes00/COP-1000 | 8509e59e8a566c77295c714ddcb0f557c470358b | [
"Unlicense"
] | null | null | null | Final/Final/71410.py | EllisBarnes00/COP-1000 | 8509e59e8a566c77295c714ddcb0f557c470358b | [
"Unlicense"
] | null | null | null | file = open("BoyNames.txt", "r")
data = file.read()
boys_names = data.split('\n')
file = open("GirlNames.txt", "r")
data = file.read()
girls_names = data.split('\n')
file.close()
choice = input("Enter 'boy', 'girl', or 'both':")
if choice == "boy":
name = input("Enter a boy's name:")
if name in boys_names:
print(f"{name} was a popular boy's name between 2000 and 2009.")
else:
print(f"{name} was not a popular boy's name between 2000 and 2009.")
elif choice == "girl":
name = input("Enter a girl's name:")
if name in girls_names:
print(f"{name} was a popular girl's name between 2000 and 2009.")
else:
print(f"{name} was not a popular girl's name between 2000 and 2009.")
elif choice == "both":
bname = input("Enter a boy's name:")
if bname in boys_names:
print(f"{bname} was a popular boy's name between 2000 and 2009.")
else:
print(f"{bname} was not a popular boy's name between 2000 and 2009.")
gname = input("Enter a girl's name:")
if gname in girls_names:
print(f"{gname} was a popular girl's name between 2000 and 2009.")
else:
print(f"{gname} was not a popular girl's name between 2000 and 2009.")
| 30.052632 | 72 | 0.671629 |
79459e627a15abb52722d17338e4b4ef3ea9b1de | 18,082 | py | Python | qa/setup_packages.py | majra20/DALI | 8b97490b3849f3d663a41c95366652a49328da07 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-05-31T14:00:58.000Z | 2019-05-31T14:00:58.000Z | qa/setup_packages.py | Pandinosaurus/DALI | 1031314b7857ec11d40e31496089579297a2e863 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | qa/setup_packages.py | Pandinosaurus/DALI | 1031314b7857ec11d40e31496089579297a2e863 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import sys
# use packaging from PIP as it is always present on system we are testing on
from pip._vendor.packaging.version import parse
import urllib.parse
try:
import pip._internal.utils.compatibility_tags as p
except ImportError:
try:
import pip._internal.pep425tags as p
except ImportError:
import pip.pep425tags as p
try:
# For Python 3.0 and later
from urllib.request import urlopen, HTTPError, Request
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, HTTPError, Request
PYTHON_VERSION = ".".join([str(x) for x in sys.version_info[0:2]])
class PckgVer():
"""Class that holds a version string accompanied with maximum and minimum python version that
this version should support. If python falls beyond version bounds it evaluates to the empty string
Parameters
----------
`ver`: str
Version that is housed by object of this class
`python_max_ver` : str, optional, default = None
Maximum python version supported by this package. If empty there is no upper bound
`python_min_ver`: str, optional, default = None
Mimimum python version supported by this package. If empty there is no lower bound
"""
def __init__(self, ver, python_min_ver=None, python_max_ver=None, alias=None):
self.ver = ver
self.python_min_ver = python_min_ver
self.python_max_ver = python_max_ver
self.name_alias = alias
def __bool__(self):
return (not self.python_min_ver or parse(PYTHON_VERSION) >= parse(self.python_min_ver)) and \
(not self.python_max_ver or parse(PYTHON_VERSION) <= parse(self.python_max_ver))
def __repr__(self):
if self:
return self.ver
else:
return ""
@property
def alias(self):
return self.name_alias
class BasePackage():
"""Class describing basic methods that package should provide
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: str or PckgVer class object
List of versions this package is available for
`name`: str, optional, default = None
Name of this package used during installation. If empty it is the same as the key
"""
def __init__(self, key, versions, name=None):
self.key = key
if not name:
name = key
self.name = name
self.versions = versions
def clamp_index(self, idx, cuda_version=None):
"""Clamps index to range 0 - num_of_packages - 1
Parameters
----------
`key`: idx: int
Index to clamp
`cuda_version`: str, optional, default = None
Cuda version used for a given index
"""
if idx < 0 or idx >= self.get_num_of_version(cuda_version):
idx = 0
return idx
@staticmethod
def get_alias(version):
"""Obtains alias for given version if exists. Otherwise return None
Parameters
----------
`version`: str or PckgVer
Package version
"""
return getattr(version, "alias", None)
def get_name(self, cuda_version=None, idx=None):
"""Retrives package name.
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
`idx`: int
Index of name to retrive in case of specific version has different alias
"""
name = BasePackage.get_alias(self.get_version(idx, cuda_version))
if name is None:
name = self.name
return name
def get_uninstall_names(self, cuda_version=None):
"""Retrives package name/s used to uninstall it.
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
version = self.get_all_versions(cuda_version)
uninstall_names = [self.get_name(cuda_version)]
for v in version:
name = BasePackage.get_alias(v)
if name is not None:
uninstall_names.append(name)
# merge into one string
return " ".join(uninstall_names)
def filter_versions(self, versions):
"""Retrieves only compatible versions of this package from provided `versions` list
Parameters
----------
`versions`: list
List of versions to be checked. All versions that evaluate to True are returned
"""
# no need to convert PckgVer to string, it is done by get_install_string when printed
return [v for v in versions if v]
def get_version(self, idx, cuda_version=None):
"""Get versions at a given index, compatible with provided cuda_version
Parameters
----------
`idx`: int
Index of version to retrive. If index is beyond 0-num_of_versions-1 range
it is clamped to it
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
if idx is None:
idx = 0
idx = self.clamp_index(idx, cuda_version)
return self.get_all_versions(cuda_version)[idx]
def get_all_versions(self, cuda_version=None):
"""Get all versions compatible with provided cuda_version
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
raise NotImplementedError
def get_num_of_version(self, cuda_version=None):
"""Obtains the number of available versions for given cuda_version
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
return len(self.get_all_versions(cuda_version))
def get_install_string(self, idx, cuda_version=None):
"""Obtains installation string that pip should accept for version at
a given index with a given cuda_version
Parameters
----------
`idx`: int
Index of version to retrive. If index is beyond 0-num_ov_versions-1 range
it is clamped to it
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
return "{name}=={version}".format(name=self.get_name(cuda_version, idx), version=self.get_version(idx, cuda_version))
def get_all_install_strings(self, cuda_version=None):
"""Gets all installation string that pip should accept for a given
cuda version. Providing all of them to pip won't work, but each of
them should be a valid pip argument
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
ret = []
for i in range(self.get_num_of_version(cuda_version)):
ret.append(self.get_install_string(i, cuda_version))
return " ".join(ret)
class PlainPackage(BasePackage):
"""Class describing a simple package with a key/name and a list of versions.
Cuda version is irrelevant for this package
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: str or PckgVer class object
List of versions this package is available for
`name`: str, , optional, default = None
Name of this package used during installation. If empty it is the same as the key
"""
def __init__(self, key, versions, name=None):
super(PlainPackage, self).__init__(key, versions, name)
def get_all_versions(self, cuda_version=None):
return self.filter_versions(self.versions)
class CudaPackage(BasePackage):
"""Class describing a cuda package with a key/name and a dictionary where the key
is a cuda version and value is the list of versions supported.
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: dict or PckgVer class object
Dictionary, where the key is a cuda version and vale, is the list of versions supported
`name`: str, , optional, default = None
Name of this package used during installation. If empty it is the same as the key.
If it includes `{cuda_v}` it is replaced by the cuda_version when queried
"""
def __init__(self, key, versions, name=None):
super(CudaPackage, self).__init__(key, versions, name)
if not isinstance(versions, dict):
raise TypeError("versions argument should by dict type [cuda_version : list_of_versions")
def get_name(self, cuda_version=None, idx=None):
cuda_version = self.max_cuda_version(cuda_version)
name = super().get_name(cuda_version, idx)
return name.format(cuda_v=cuda_version)
def get_all_versions(self, cuda_version):
cuda_version = self.max_cuda_version(cuda_version)
return self.filter_versions(self.versions[cuda_version])
def max_cuda_version(self, cuda_version):
"""Gets a compatible, available cuda version to one asked for.
If there is no cuda version in the version list that matches the one provided,
the cuda version that is not higher is used 10.2 -> 10, 9.2 -> 9
Parameters
----------
`cuda_version`: str
Cuda version used for this query
"""
max_cuda = None
for ver in sorted(self.versions.keys(), key=int):
if int(ver) <= int(cuda_version):
max_cuda = ver
return max_cuda
class CudaHttpPackage(CudaPackage):
"""Class describing a cuda package with a key/name and a dictionary where the key
is a cuda version and value is the list of directly accessible http links
When it asked for a package version it checks compatible platform tags and provides
a download link to a compatible package
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: str or PckgVer class object
Dictionary, where the key is a cuda version and vale, is the list
of directly accessible http links. `{platform}` inside the link is replaced by the
compatible platform tag provided by pip
`name`: str, , optional, default = None
Name of this package used during installation. If empty it is the same as the key.
If it includes `{cuda_v}` it is replaced by the cuda_version when queried
"""
def __init__(self, key, versions, name=None):
super(CudaHttpPackage, self).__init__(key, versions, name)
def get_all_versions(self, cuda_version):
cuda_version = self.max_cuda_version(cuda_version)
ret = []
for v in self.versions[cuda_version]:
vers = self.get_pyvers_name(v, cuda_version)
if vers != "":
ret.append(vers)
return ret
def get_install_string(self, idx, cuda_version=None):
return "{version}".format(version=self.get_version(idx, cuda_version))
def test_request(self, url):
"""Checks if a provided url is available
Parameters
----------
`url`: str
Package url to be tested.
"""
url = url.split("://")
url[-1] = urllib.parse.quote(url[-1])
url = "://".join(url)
request = Request(url)
request.get_method = lambda : 'HEAD'
try:
_ = urlopen(request)
return url
except HTTPError:
return None
def get_pyvers_name(self, url, cuda_version):
"""Checks if a provided url is available for a given cuda version
It checks what package is available and is compatible with the available platforms
returned by the pip
Parameters
----------
`url`: str
Package url to be tested. `{cuda_v}` is replaced by cuda_version and `{platform}`
by the platform tag
`cuda_version`: str
Cuda version used for this query
"""
if isinstance(p.get_supported()[0], tuple):
# old PIP returns tuple
for py_ver in [(x, y, z) for (x, y, z) in p.get_supported() if y != 'none' and 'any' not in y]:
py_ver = "-".join(py_ver)
ret = self.test_request(url.format(platform=py_ver, cuda_v=cuda_version))
if ret:
return ret
else:
# new PIP returns object
for py_ver in [tag for tag in p.get_supported() if tag.abi != 'none' and tag.platform != 'any']:
py_ver = str(py_ver)
ret = self.test_request(url.format(platform=py_ver, cuda_v=cuda_version))
if ret:
return ret
return ""
all_packages = [PlainPackage("opencv-python", ["4.5.1.48"]),
CudaPackage("cupy",
{ "100" : ["8.6.0"],
"110" : ["8.6.0"] },
"cupy-cuda{cuda_v}"),
CudaPackage("mxnet",
{ "100" : ["1.8.0.post0"] },
"mxnet-cu{cuda_v}"),
CudaPackage("tensorflow-gpu",
{ "100" : [
PckgVer("1.15.4", python_max_ver="3.7"),
"2.3.1"],
"110" : [
PckgVer("1.15.4", python_max_ver="3.7"),
"2.3.1",
"2.4.1",
PckgVer("1.15.5+nv21.04", python_min_ver="3.8", python_max_ver="3.8", alias="nvidia-tensorflow")]
}),
CudaHttpPackage("torch",
{ "100" : ["http://download.pytorch.org/whl/cu{cuda_v}/torch-1.4.0+cu{cuda_v}-{platform}.whl"] }),
CudaHttpPackage("torchvision",
{ "100" : ["https://download.pytorch.org/whl/cu{cuda_v}/torchvision-0.5.0+cu{cuda_v}-{platform}.whl"] }),
CudaPackage("paddlepaddle-gpu",
{ "100" : ["2.0.2"],
"110" : ["2.0.2"]})
]
all_packages_keys = [pckg.key for pckg in all_packages]
parser = argparse.ArgumentParser(description='Env setup helper')
parser.add_argument('--list', '-l', help='list configs', action='store_true', default=False)
parser.add_argument('--num', '-n', help='return number of all configurations possible', action='store_true', default=False)
parser.add_argument('--install', '-i', dest='install', type=int, help="get Nth configuration", default=-1)
parser.add_argument('--all', '-a', dest='getall', action='store_true', help='return packages in all versions')
parser.add_argument('--remove', '-r', dest='remove', help="list packages to remove", action='store_true', default=False)
parser.add_argument('--cuda', dest='cuda', default="90", help="CUDA version to use")
parser.add_argument('--use', '-u', dest='use', default=[], help="provide only packages from this list", nargs='*')
args = parser.parse_args()
def print_configs(cuda_version):
"""Prints all available configurations"""
for pckg in all_packages:
print("{}:".format(pckg.get_name(cuda_version)))
for v in pckg.get_all_versions(cuda_version):
alias = BasePackage.get_alias(v)
if alias is not None:
v = "{}=={}".format(alias, v)
print("\t{}".format(v))
def cal_num_of_configs(packages, cuda_version):
"""Calculates how many different version configurations are available for given
packages and cuda version"""
ret = 1
for pckg in all_packages:
if pckg.key in packages:
ret *= pckg.get_num_of_version(cuda_version)
return ret
def for_all_pckg(packages, fun):
"""Iterates over all packages, executes a fun returns all fun results as a list"""
ret = []
for pckg in all_packages:
if pckg.key in packages:
ret.append(fun(pckg))
# add all remaining used packages with default versions
additional = [v for v in packages if v not in all_packages_keys]
return ret + additional
def get_remove_string(packages, cuda_version):
"""Creates pip remove string for given cuda version and package list"""
# Remove only these which version we want to change
ret = for_all_pckg(packages, lambda pckg: pckg.get_uninstall_names(cuda_version))
return " ".join(ret)
def get_all_strings(packages, cuda_version):
"""Prints all available configurations for given package list and cuda version"""
ret = for_all_pckg(packages, lambda pckg: pckg.get_all_install_strings(cuda_version))
return " ".join(ret)
def get_install_string(idx, packages, cuda_version):
"""Creates pip install string for given cuda version, variant number and package list"""
ret = for_all_pckg(packages, lambda pckg: pckg.get_install_string(idx, cuda_version))
# add all remaining used packages with default versions
return " ".join(ret)
def main():
global args
if args.list:
print_configs(args.cuda)
elif args.num:
print (cal_num_of_configs(args.use, args.cuda) - 1)
elif args.remove:
print (get_remove_string(args.use, args.cuda))
elif args.getall:
print(get_all_strings(args.use, args.cuda))
elif args.install >= 0:
print (get_install_string(args.install, args.use, args.cuda))
if __name__ == "__main__":
main()
| 39.916115 | 129 | 0.600265 |
79459efa2905d758735d069595e90b9f5f7cb775 | 3,245 | py | Python | demo/settings.py | asedeno/django-coverage6-breakage-demo | c723cd16b240330778b7ec1491ae7b5d2300c0ac | [
"MIT"
] | null | null | null | demo/settings.py | asedeno/django-coverage6-breakage-demo | c723cd16b240330778b7ec1491ae7b5d2300c0ac | [
"MIT"
] | null | null | null | demo/settings.py | asedeno/django-coverage6-breakage-demo | c723cd16b240330778b7ec1491ae7b5d2300c0ac | [
"MIT"
] | null | null | null | """
Django settings for demo project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-mopf8%tus+s%varv@^t7&44m!j(os5(lc_u77knm5n92$13#u@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'demo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.551181 | 91 | 0.699538 |
79459f36cc4bbcb50bab3fe5b746955131830700 | 1,647 | py | Python | examples/projects/trajectory_optimisation/direct_collocation/double_stage.py | SherbyRobotics/PyRobotics | 86eb1189258f6f41642a149c813dd2fd6853bcc1 | [
"MIT"
] | 9 | 2016-05-10T22:23:03.000Z | 2020-10-12T20:53:54.000Z | examples/projects/trajectory_optimisation/direct_collocation/double_stage.py | SherbyRobotics/PyRobotics | 86eb1189258f6f41642a149c813dd2fd6853bcc1 | [
"MIT"
] | 3 | 2016-08-05T15:19:21.000Z | 2019-04-08T15:12:49.000Z | examples/projects/trajectory_optimisation/direct_collocation/double_stage.py | SherbyRobotics/PyRobotics | 86eb1189258f6f41642a149c813dd2fd6853bcc1 | [
"MIT"
] | 5 | 2018-10-15T15:45:10.000Z | 2021-11-16T01:55:47.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 12:05:08 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
from pyro.control import nonlinear
from pyro.planning import trajectoryoptimisation
from pyro.analysis import simulation
###############################################################################
sys = pendulum.DoublePendulum()
#Max/Min torque
sys.u_ub[0] = +20
sys.u_ub[1] = +20
sys.u_lb[0] = -20
sys.u_lb[1] = -20
tf = 4.0
# Coarce traj optimization
n = 20
dt = tf/n
planner = trajectoryoptimisation.DirectCollocationTrajectoryOptimisation( sys , dt , n )
planner.x_start = np.array([-3.14,0,0,0])
planner.x_goal = np.array([0,0,0,0])
planner.maxiter = 500
planner.compute_optimal_trajectory()
planner.show_solution()
# Fine traj optimization
n = 100
dt = tf/n
planner2 = trajectoryoptimisation.DirectCollocationTrajectoryOptimisation( sys , dt , n )
planner2.x_start = np.array([-3.14,0,0,0])
planner2.x_goal = np.array([0,0,0,0])
planner2.set_initial_trajectory_guest( planner.traj )
planner2.maxiter = 500
planner2.compute_optimal_trajectory()
planner2.show_solution()
planner2.save_solution( 'double_pendulum_directcollocation_hires.npy' )
# Controller
ctl = nonlinear.ComputedTorqueController( sys , planner2.traj )
ctl.rbar = np.array([0,0])
ctl.w0 = 5
ctl.zeta = 1
# New cl-dynamic
cl_sys = ctl + sys
# Simultation
cl_sys.x0 = np.array([-3.14,0,0,0])
cl_sys.plot_trajectory('xu')
cl_sys.animate_simulation() | 23.869565 | 89 | 0.632665 |
79459f67348991a567239b03b4e5761954752c38 | 3,115 | py | Python | appsupport_apps.py | arubdesu/EAs | 4f688e928f54728c9de4c240c18bf479edf7db5e | [
"Apache-2.0"
] | 30 | 2015-08-10T15:00:15.000Z | 2020-03-31T17:08:02.000Z | appsupport_apps.py | arubdesu/EAs | 4f688e928f54728c9de4c240c18bf479edf7db5e | [
"Apache-2.0"
] | null | null | null | appsupport_apps.py | arubdesu/EAs | 4f688e928f54728c9de4c240c18bf479edf7db5e | [
"Apache-2.0"
] | 7 | 2015-09-29T20:04:25.000Z | 2019-05-10T06:26:50.000Z | #!/usr/bin/python
"""Given a whitelist of legit helper apps, report on stuff to investigate."""
import json
import os
import subprocess
import sys
def osquery_check():
"""bail early if osquery not installed"""
if not os.path.exists('/usr/local/bin/osqueryi'):
result = 'wha? no osquery? bro, do you even lift?'
print "<result>%s</result>" % result
sys.exit(0)
def run_osquery(sql):
"""take sql command you'd like json output for from osquery"""
cmd = ['/usr/local/bin/osqueryi', '--json', sql]
jsony_out = subprocess.check_output(cmd)
try:
jsony_dictlist = json.loads(jsony_out)
except ValueError:
sys.exit(1)
return jsony_dictlist
def check_app(app):
"""rather than doing 'starts/endswith' tomfoolery, check in function"""
crappy_paths = ["CitrixOnline/",
"GoToMyPC Viewer",
"Hewlett-Packard",
"Java/",
"TextExpander",
"Web Applications",]
for path in crappy_paths:
if path in app:
return
return app
def main():
"""gimme some main"""
osquery_check()
allowed = ["Android File Transfer Agent.app",
"asannotation2.app",
"aswatcher.app",
"atmsupload.app",
"Box Edit.app",
"Box Local Com Server.app",
"Cisco WebEx Start.app",
"CitrixOnlineLauncher.app",
"CocoaDialog.app",
"CommitWindow.app",
"convertpdf.app",
"crash_report_sender.app",
"Dropbox.app",
"Event Center.app",
"InstallBoxEdit.app",
#"iSkysoft Helper Compact.app", same sketchy 'Wondershare' company as below
"Meeting Center.app",
"Network Recording Player.app",
"org.eclipse.equinox.app",# smooth, IBM, bundling eclipse(!) w/ SPSS
"SharedPackageExtensions.app",# something apple-related
"TextExpander Helper.app",
"TextExpander.app",
"Training Center.app",# webex-related
#"TunesGoWatch.app", sketchy senuti-like product
#"Wondershare Helper Compact.app", see what I mean by sketchy?
"XTrace.app",]
#pylint: disable=line-too-long
all_users_app_support_dicts = run_osquery("select path from apps where path like '/Users/%/%Library/Application Support%'")
just_paths, to_investigate = [], []
for path_dict in all_users_app_support_dicts:
if os.path.basename(path_dict['path']) not in allowed:
just_paths.append(path_dict['path'])
for path in just_paths:
got_caught = check_app(path)
if got_caught:
to_investigate.append(got_caught)
if to_investigate:
result = "Not in whitelist, investigate:\n" + "\n".join(*[to_investigate])
else:
result = "No strange apps in ~/*/Lib/AppSupport."
print "<result>%s</result>" % result
if __name__ == '__main__':
main()
| 34.611111 | 127 | 0.577849 |
79459fae829a021b3be2a8b1997280b24824a1d8 | 195 | py | Python | config/urls.py | devmedtz/nmb-python | c5f49050e232bcd9a4efb753cceb6df5bd8b5ed1 | [
"MIT"
] | 6 | 2021-06-23T16:37:40.000Z | 2021-10-11T13:54:44.000Z | config/urls.py | isayaeli/django-nmb | c5f49050e232bcd9a4efb753cceb6df5bd8b5ed1 | [
"MIT"
] | 1 | 2021-10-07T11:12:30.000Z | 2021-10-07T11:12:30.000Z | config/urls.py | isayaeli/django-nmb | c5f49050e232bcd9a4efb753cceb6df5bd8b5ed1 | [
"MIT"
] | 2 | 2021-06-25T06:57:32.000Z | 2021-11-27T07:42:28.000Z | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('example/', include('example.urls', namespace='example')),
]
| 24.375 | 67 | 0.702564 |
7945a0fb3a07ec1c2f3c71f396c4287deeeb9ebf | 3,337 | py | Python | src/crawl_quora.py | so3500/similarity-of-two-question | 8770754081828ceee30441a2e629b79df750c2a1 | [
"MIT"
] | null | null | null | src/crawl_quora.py | so3500/similarity-of-two-question | 8770754081828ceee30441a2e629b79df750c2a1 | [
"MIT"
] | null | null | null | src/crawl_quora.py | so3500/similarity-of-two-question | 8770754081828ceee30441a2e629b79df750c2a1 | [
"MIT"
] | null | null | null | import csv
from GoogleScraper import scrape_with_config, GoogleSearchError
# 검색 결과를 확인 하는 함수
def test_google_search(search):
# let's inspect what we got
for serp in search.serps:
print('=' * 50)
print('serp', serp)
print('serp.search_engine_name: ', serp.search_engine_name)
print('serp.scrape_method: ', serp.scrape_method)
print('serp.page_number: ', serp.page_number)
print('serp.requested_at: ', serp.requested_at)
print('serp.num_results: ', serp.num_results)
# ... more attributes ...
for link in serp.links:
print(link)
print('=' * 50)
# title 에서 부제 제거
def preprocess_title(title):
splited_title = title.split('-')
p_title = ''
if len(splited_title) > 1:
if splited_title[len(splited_title)-1] == ' Quora':
# p_title = splited_title[0]
for i in range(len(splited_title)-1):
p_title += splited_title[i]
else:
p_title = title
else:
p_title = title
# elif len(title.split('|')) > 1:
# title = title.split('|')[0]
return p_title
def get_config(**kwargs):
config = {
'use_own_ip': True,
'keyword': kwargs['keyword'],
'search_engines': ['google'],
'num_pages_for_keyword': kwargs['num_pages'],
'scrape_method': 'selenium',
'sel_browser': 'chrome',
'do_caching': False,
'output_filename': kwargs['filename'],
}
return config
# keyword = input('input question :') + ' site:www.quora.com'
# keyword = "What is the best way to make money online? site:www.quora.com"
def crawl_data(keyword):
file_num = 0
output_filename = './crawling_output/output_{}.csv'.format(file_num)
params = {
'keyword': keyword + ' site:www.quora.com',
'num_pages': 2,
'filename': output_filename,
}
config = get_config(**params)
title_list = []
title_origin_list = []
similarity_list = []
link_list = []
dict_idx = 0
output_dict = {}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
else:
# 검색 결과를 확인하는 함수
# test_google_search(search)
# open scv file
with open(output_filename, 'r', newline='') as csv_file:
# csv_reader = csv.reader(csv_file, delimiter=',')
csv_reader = csv.DictReader(csv_file, delimiter=',')
for row in csv_reader:
title_origin = row['title']
title = row['title']
link = row['link']
# title 에서 부제 제거
# 'title - src site'와 같이 - or | 있으면 자르기
title = preprocess_title(title)
# dictionary element 만들어서 추가
dict_element = {
'title' : title,
'title_origin' : title_origin,
'similarity' : 0.0,
'link' : link,
}
output_dict[dict_idx] = dict_element
title_list.append(title)
title_origin_list.append(title_origin)
link_list.append(row['link'])
dict_idx += 1
# 없으면 문장 그대로
csv_file.close()
return title_list,link_list | 29.530973 | 75 | 0.549296 |
7945a18d21c56ba76f4dee062c6c0d266bbcd1a5 | 13,172 | py | Python | nephoria/testcases/s3/osg_load_test.py | Corymbia/nephoria | fe8ad7beba479e7a8de70f09cf1fdb4d640b4987 | [
"BSD-2-Clause"
] | 5 | 2016-08-31T06:51:18.000Z | 2018-04-25T20:50:25.000Z | nephoria/testcases/s3/osg_load_test.py | Corymbia/nephoria | fe8ad7beba479e7a8de70f09cf1fdb4d640b4987 | [
"BSD-2-Clause"
] | 49 | 2016-04-19T07:01:46.000Z | 2017-11-17T23:57:18.000Z | nephoria/testcases/s3/osg_load_test.py | Corymbia/nephoria | fe8ad7beba479e7a8de70f09cf1fdb4d640b4987 | [
"BSD-2-Clause"
] | 14 | 2016-04-14T17:01:35.000Z | 2021-07-13T13:35:38.000Z | #!/usr/bin/env python
from __future__ import division
from cStringIO import StringIO
import tempfile
import os
from concurrent.futures.thread import ThreadPoolExecutor
from math import ceil
from nephoria.testcase_utils.cli_test_runner import CliTestRunner, SkipTestException
from nephoria.testcontroller import TestController
import copy
import time
class OSGConcurrentTests(CliTestRunner):
_DEFAULT_CLI_ARGS = copy.copy(CliTestRunner._DEFAULT_CLI_ARGS)
_DEFAULT_CLI_ARGS['buckets'] = {
'args': ['--buckets'],
'kwargs': {'dest': 'buckets', 'help': 'Number of buckets', 'default': 5, 'type': int}
}
_DEFAULT_CLI_ARGS['objects'] = {
'args': ['--objects'],
'kwargs': {'dest': 'objects', 'help': 'Number of objects', 'default': 5, 'type': int}
}
_DEFAULT_CLI_ARGS['threads'] = {
'args': ['--threads'],
'kwargs': {'dest': 'threads', 'help': 'Number of threads', 'default': 3, 'type': int}
}
_DEFAULT_CLI_ARGS['object_size'] = {
'args': ['--object-size'],
'kwargs': {'dest': 'object_size', 'help': 'Size of the objects to upload', 'default': 5, 'type': int}
}
_DEFAULT_CLI_ARGS['mpu_threshold'] = {
'args': ['--mpu-threshold'],
'kwargs': {'dest': 'mpu_threshold', 'default': 5120, 'type': int,
'help': 'Multipart upload is used when the object size is bigger than the mpu-threshold'
'value in Kilobyte. Any value less than 5120KB will result single file upload. '
'Default value is used when not passed as an argument.'}
}
bucket_list = []
temp_files = []
total_put_latency = 0
total_get_latency = 0
total_del_latency = 0
@property
def tc(self):
tc = getattr(self, '__tc', None)
if not tc:
tc = TestController(self.args.clc,
password=self.args.password,
clouduser_name=self.args.test_user,
clouduser_account=self.args.test_account,
log_level=self.args.log_level)
setattr(self, '__tc', tc)
return tc
@property
def user(self):
user = getattr(self, '__user', None)
if not user:
try:
user = self.tc.get_user_by_name(aws_account_name=self.args.test_account,
aws_user_name=self.args.test_user)
except:
user = self.tc.create_user_using_cloudadmin(aws_account_name=self.args.test_account,
aws_user_name=self.args.test_user)
setattr(self, '__user', user)
return user
@property
def bucket_prefix(self):
bucket_prefix = getattr(self, '__bucket_prefix', None)
if not bucket_prefix:
bucket_prefix = "nephoria-bucket-" + str(int(time.time()))
return bucket_prefix
@bucket_prefix.setter
def bucket_prefix(self, value):
setattr(self, '__bucket_prefix', value)
def create_file(self, size_in_kb, file_name="nephoria-object"):
temp_file = tempfile.NamedTemporaryFile(mode='w+b', prefix=file_name)
self.temp_files.append(temp_file)
temp_file.write(os.urandom(1024 * size_in_kb))
return temp_file.name
def single_upload(self, bucket, key_name, file_path):
key = bucket.new_key(key_name)
key.set_contents_from_filename(file_path)
self.log.debug("Uploaded key '" + key_name + "' to bucket '" + bucket.name + "'")
return key
def multipart_upload(self, bucket, key_name, eufile):
part_size = 1024 * self.args.mpu_threshold
eufile.seek(0, os.SEEK_END)
eufile_size = eufile.tell()
num_parts = int(ceil(eufile_size / part_size))
mpu = bucket.initiate_multipart_upload(key_name)
self.log.debug("Initiated MPU. Using MPU Id: " + mpu.id)
for i in range(num_parts):
start = part_size * i
file_part = open(eufile.name, 'rb')
file_part.seek(start)
data = file_part.read(part_size)
file_part.close()
mpu.upload_part_from_file(StringIO(data), i + 1)
self.log.debug("Uploaded part " + str(i + 1) + " of '" + key_name + "' to bucket '" + bucket.name + "'")
self.log.debug("Completing multipart upload of '" + key_name + "' to bucket '" +
bucket.name + "'" + " using mpu id: " + mpu.id)
mpu.complete_upload()
self.log.debug("Completed multipart upload of '" + key_name + "' to bucket '" + bucket.name + "'")
def put_get_check(self, bucket_name, key_name, eu_file):
"""
PUT objects, GET objects and then verify objects with object hash
5MB is a hard-coded limit for MPU in OSG
"""
bucket = self.tc.admin.s3.get_bucket_by_name(bucket_name)
if (os.path.getsize(eu_file.name) > (5 * 1024 * 1024)) and (self.args.mpu_threshold >= (5 * 1024)):
self.multipart_upload(bucket, key_name, eu_file)
else:
upload_time = self.time_to_exec(self.single_upload, bucket, key_name, eu_file.name)
with open('osg_perf.log', 'a') as f:
f.write('PUT\t' + str(upload_time) + '\n')
return True
def time_to_exec(self, method, *args, **kwargs):
start_time = time.time()
method_name = method.__name__
try:
result = method(*args, **kwargs)
except Exception as e:
self.log.error(e)
end_time = time.time()
total_time = end_time - start_time
self.log.error("Failed to run method: " + method_name)
else:
end_time = time.time()
total_time = end_time - start_time
return total_time
def create_buckets(self, num):
for i in range(num):
bucket_name = self.bucket_prefix + '-' + str(i)
self.log.debug('creating bucket: ' + bucket_name)
self.bucket_list.append(bucket_name)
bucket = self.tc.admin.s3.connection.create_bucket(bucket_name)
self.log.debug(self.tc.admin.s3.connection.get_all_buckets())
def get_content(self, key):
self.log.debug("Getting content as string for: " + key.name)
content = key.get_contents_as_string()
def put_objects(self, bucket_name, key_name, eu_file=None):
"""
Args:
bucket_name: existing bucket_name to put objects
key_name: name of the key
eu_file: file to put into bucket
"""
bucket = self.tc.admin.s3.get_bucket_by_name(bucket_name)
if (os.path.getsize(eu_file.name) > (5 * 1024 * 1024)) and (self.args.mpu_threshold >= (5 * 1024)):
self.multipart_upload(bucket, key_name, eu_file)
else:
upload_time = self.time_to_exec(self.single_upload, bucket, key_name, eu_file.name)
self.total_put_latency = self.total_put_latency + upload_time
with open('osg_perf.log', 'a') as f:
f.write('PUT\t\t' + str(upload_time) + '\n')
return True
def test1_concurrent_upload(self):
with open('osg_perf.log', 'w') as f:
f.write(str('OPS\t\t Time ') + '\n')
f.write(str('---\t\t----------') + '\n')
self.log.debug("Creating buckets..")
self.create_buckets(self.args.buckets)
self.log.debug("Creating object of " + str(self.args.object_size) + "KB")
eu_file = open(self.create_file(self.args.object_size))
thread_pool = []
with ThreadPoolExecutor(max_workers=self.args.threads) as executor:
for bucket_name in self.bucket_list:
for k in range(self.args.objects):
thread_pool.append(executor.submit(self.put_objects,
bucket_name=bucket_name,
key_name=eu_file.name + '-' + str(k),
eu_file=eu_file))
lock_time = 2
self.log.debug("len(thread_pool): " + str(len(thread_pool)))
while len(thread_pool) < (self.args.buckets * self.args.objects):
self.log.debug("len(thread_pool): " + str(len(thread_pool)))
self.log.warning("Uncanny lock, sleeping for " + str(lock_time) + " seconds.")
time.sleep(lock_time)
for tp in thread_pool:
try:
if not tp.result():
self.log.error("[CRITICAL] failed upload in thread")
except Exception as e:
self.log.error("Found exception in thread-pool: " + e.message)
def get_objects(self, key):
download_time = self.time_to_exec(self.get_content, key)
self.total_get_latency = self.total_get_latency + download_time
with open('osg_perf.log', 'a') as f:
f.write('GET\t\t' + str(download_time) + '\n')
def test2_get_objects(self):
get_thread_pool = []
with ThreadPoolExecutor(max_workers=self.args.threads) as executor:
for bucket_name in self.bucket_list:
bucket = self.tc.admin.s3.get_bucket_by_name(bucket_name)
max_keys = 10
keys = bucket.get_all_keys(max_keys=max_keys)
for key in keys:
get_thread_pool.append(executor.submit(self.get_objects, key))
while keys.next_marker:
self.log.debug("found keys.next_marker: " + keys.next_marker)
keys = bucket.get_all_keys(marker=keys.next_marker)
for key in keys:
get_thread_pool.append(executor.submit(self.get_objects, key))
self.log.debug("len(get_thread_pool): " + str(len(get_thread_pool)))
lock_time = 2
while len(get_thread_pool) < (self.args.buckets * self.args.objects):
self.log.debug("len(get_thread_pool): " + str(len(get_thread_pool)))
self.log.warning("Uncanny lock, sleeping for " + str(lock_time) + " seconds.")
time.sleep(lock_time)
def delete_key(self, key):
self.log.debug('deleting key: ' + key.name)
delete_time = self.time_to_exec(key.delete)
self.total_del_latency = self.total_del_latency + delete_time
with open('osg_perf.log', 'a') as f:
f.write('DEL\t\t' + str(delete_time) + '\n')
return True
def test3_delete_objects(self):
clean_thread_pool = []
with ThreadPoolExecutor(max_workers=self.args.threads) as executor:
for bucket_name in self.bucket_list:
bucket = self.tc.admin.s3.get_bucket_by_name(bucket_name)
max_keys = 10
keys = bucket.get_all_keys(max_keys=max_keys)
for key in keys:
clean_thread_pool.append(executor.submit(self.delete_key, key))
while keys.next_marker:
self.log.debug("found keys.next_marker: " + keys.next_marker)
keys = bucket.get_all_keys(marker=keys.next_marker)
for key in keys:
clean_thread_pool.append(executor.submit(self.delete_key, key))
self.log.debug("len(clean_thread_pool): " + str(len(clean_thread_pool)))
lock_time = 2
while len(clean_thread_pool) < (self.args.buckets * self.args.objects):
self.log.debug("len(clean_thread_pool): " + str(len(clean_thread_pool)))
self.log.warning("Uncanny lock, sleeping for " + str(2) + " seconds.")
time.sleep(lock_time)
for ctp in clean_thread_pool:
try:
if not ctp.result():
self.log.error("[CRITICAL] failed delete in thread")
except Exception as e:
self.log.error("Found exception in clean_thread_pool: " + e.message)
for bucket_name in self.bucket_list:
self.tc.admin.s3.connection.delete_bucket(bucket_name)
for tf in self.temp_files:
tf.close()
def test4_calculate_average_latency(self):
with open('osg_perf.log', 'a') as f:
f.write('\n\n')
f.write(' Average Latency ' + '\n')
f.write('-------------------' + '\n')
avg_put = self.total_put_latency / (self.args.objects * self.args.buckets)
with open('osg_perf.log', 'a') as f:
f.write('Avg PUT\t\t' + str(avg_put) + '\n')
avg_get = self.total_get_latency / (self.args.objects * self.args.buckets)
with open('osg_perf.log', 'a') as f:
f.write('Avg GET\t\t' + str(avg_get) + '\n')
avg_del = self.total_del_latency / (self.args.objects * self.args.buckets)
with open('osg_perf.log', 'a') as f:
f.write('Avg DEL\t\t' + str(avg_del) + '\n')
def clean_method(self):
pass
if __name__ == "__main__":
test = OSGConcurrentTests()
test_result = test.run()
exit(test_result)
| 42.217949 | 116 | 0.583586 |
7945a31ec5626cdfd7b412f6ee1d836d9ae9bf13 | 2,048 | py | Python | open_set_data.py | ivanchenzx/CEVT | 635301a0864115a1f95e01627dd29b005463c7ae | [
"MIT"
] | 5 | 2022-01-17T18:14:55.000Z | 2022-01-29T01:18:04.000Z | open_set_data.py | ivanchenzx/CEVT | 635301a0864115a1f95e01627dd29b005463c7ae | [
"MIT"
] | 1 | 2022-01-20T19:08:21.000Z | 2022-01-25T14:35:54.000Z | open_set_data.py | ivanchenzx/CEVT | 635301a0864115a1f95e01627dd29b005463c7ae | [
"MIT"
] | null | null | null |
def open_setting(source_file_org, source_file_new,
num_known, target_file_org=None, target_file_new=None,
validation_file_org=None, validation_file_new=None):
# keep known source samples only
for line in source_file_org:
if int(line.strip().split(' ')[2]) < num_known:
source_file_new.write(line)
for line in target_file_org:
if int(line.strip().split(' ')[2]) >= num_known:
target_file_new.write(
line.strip().split(' ')[0] +
" " +
line.strip().split(' ')[1] +
" " +
str(num_known) +
"\n"
)
else:
target_file_new.write(line)
for line in validation_file_org:
if int(line.strip().split(' ')[2]) >= num_known:
validation_file_new.write(
line.strip().split(' ')[0] +
" " +
line.strip().split(' ')[1] +
" " +
str(num_known) +
"\n"
)
else:
validation_file_new.write(line)
# e.g. Olympic -> UCF
source_file_org = open("dataset/olympic/" \
"list_olympic_train_ucf_olympic-feature_org.txt", "r")
source_file_new = open("dataset/olympic/" \
"list_olympic_train_ucf_olympic-feature.txt", "w+")
target_file_org = open("dataset/ucf101/" \
"list_ucf101_train_ucf_olympic-feature_org.txt", "r")
target_file_new = open("dataset/ucf101/" \
"list_ucf101_train_ucf_olympic-feature.txt", "w+")
validation_file_org = open("dataset/ucf101/" \
"list_ucf101_val_ucf_olympic-feature_org.txt", "r")
validation_file_new = open("dataset/ucf101/" \
"list_ucf101_val_ucf_olympic-feature.txt", "w+")
open_setting(source_file_org, source_file_new, 3, target_file_org=target_file_org, target_file_new=target_file_new, validation_file_org=validation_file_org, validation_file_new=validation_file_new)
| 39.384615 | 197 | 0.578125 |
7945a5478c5efb58f38eacee7667271a7908a7ec | 1,584 | py | Python | galleries/annotations_parsers/file_name_parser.py | mnicolas94/galleries | e1563764f7db699a94884f9dbae1799a0f05c4c0 | [
"MIT"
] | null | null | null | galleries/annotations_parsers/file_name_parser.py | mnicolas94/galleries | e1563764f7db699a94884f9dbae1799a0f05c4c0 | [
"MIT"
] | null | null | null | galleries/annotations_parsers/file_name_parser.py | mnicolas94/galleries | e1563764f7db699a94884f9dbae1799a0f05c4c0 | [
"MIT"
] | null | null | null | import os
from typing import Optional, Dict
from galleries.annotations_parsers.gallery_annots_parsers import GalleryAnnotationsParser
class FileNameSepParser(GalleryAnnotationsParser):
"""
Parser para obtener anotaciones a partir del nombre de las imágenes.
El nombre del fichero es dividido con un separador y cada elemento obtenido es una anotación.
Ejemplo:
fp = FileNameSepParser(('label', 'age', 'sex'), sep='_')
annots = fp('C:/dir/Fulano_32_M.jpg')
annots va a ser igual a:
{ 'label': 'Fulano', 'age': '32', 'sex': 'M' }
"""
def __init__(self, annot_names=None, sep='-'):
self.annot_names = annot_names or []
self.sep = sep
def __call__(self, img_path: str) -> dict:
return self.get_annotations_by_image_index(img_path)
def get_annotations_by_image_index(self, img_index: str) -> dict:
_, file = os.path.split(img_index)
filename, _ = os.path.splitext(file)
tokens = self._split_tokens(filename)
annots = {}
for i, token in enumerate(tokens):
if i == len(self.annot_names):
break
annot_name = self.annot_names[i]
annots[annot_name] = token
return annots
def get_annotations_types(self) -> Optional[Dict[str, type]]:
return {annot_name: str for annot_name in self.annot_names}
def _split_tokens(self, filename: str):
if len(self.sep) == 1:
return filename.split(sep=self.sep)
else:
tokens = []
string = filename
for separator in self.sep:
token, string = string.split(separator, 1)
tokens.append(token)
return tokens
def get_discrete_annotations_values(self) -> Dict[str, list]:
return {}
| 29.333333 | 94 | 0.717803 |
7945a58fc0d3608d539b5fbd23429fdce8b70c28 | 23,578 | py | Python | sdk/python/pulumi_aws/opsworks/haproxy_layer.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/opsworks/haproxy_layer.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/opsworks/haproxy_layer.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['HaproxyLayer']
class HaproxyLayer(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None,
auto_assign_public_ips: Optional[pulumi.Input[bool]] = None,
auto_healing: Optional[pulumi.Input[bool]] = None,
custom_configure_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_deploy_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_instance_profile_arn: Optional[pulumi.Input[str]] = None,
custom_json: Optional[pulumi.Input[str]] = None,
custom_security_group_ids: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_setup_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_shutdown_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_undeploy_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None,
ebs_volumes: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['HaproxyLayerEbsVolumeArgs']]]]] = None,
elastic_load_balancer: Optional[pulumi.Input[str]] = None,
healthcheck_method: Optional[pulumi.Input[str]] = None,
healthcheck_url: Optional[pulumi.Input[str]] = None,
install_updates_on_boot: Optional[pulumi.Input[bool]] = None,
instance_shutdown_timeout: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
stack_id: Optional[pulumi.Input[str]] = None,
stats_enabled: Optional[pulumi.Input[bool]] = None,
stats_password: Optional[pulumi.Input[str]] = None,
stats_url: Optional[pulumi.Input[str]] = None,
stats_user: Optional[pulumi.Input[str]] = None,
system_packages: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an OpsWorks haproxy layer resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb = aws.opsworks.HaproxyLayer("lb",
stack_id=aws_opsworks_stack["main"]["id"],
stats_password="foobarbaz")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances.
:param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
:param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer.
:param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances.
:param pulumi.Input[str] custom_json: Custom JSON attributes to apply to the layer.
:param pulumi.Input[List[pulumi.Input[str]]] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances.
:param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['HaproxyLayerEbsVolumeArgs']]]] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
:param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer
:param pulumi.Input[str] healthcheck_method: HTTP method to use for instance healthchecks. Defaults to "OPTIONS".
:param pulumi.Input[str] healthcheck_url: URL path to use for instance healthchecks. Defaults to "/".
:param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots.
:param pulumi.Input[float] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
:param pulumi.Input[str] name: A human-readable name for the layer.
:param pulumi.Input[str] stack_id: The id of the stack the layer will belong to.
:param pulumi.Input[bool] stats_enabled: Whether to enable HAProxy stats.
:param pulumi.Input[str] stats_password: The password to use for HAProxy stats.
:param pulumi.Input[str] stats_url: The HAProxy stats URL. Defaults to "/haproxy?stats".
:param pulumi.Input[str] stats_user: The username for HAProxy stats. Defaults to "opsworks".
:param pulumi.Input[List[pulumi.Input[str]]] system_packages: Names of a set of system packages to install on the layer's instances.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_assign_elastic_ips'] = auto_assign_elastic_ips
__props__['auto_assign_public_ips'] = auto_assign_public_ips
__props__['auto_healing'] = auto_healing
__props__['custom_configure_recipes'] = custom_configure_recipes
__props__['custom_deploy_recipes'] = custom_deploy_recipes
__props__['custom_instance_profile_arn'] = custom_instance_profile_arn
__props__['custom_json'] = custom_json
__props__['custom_security_group_ids'] = custom_security_group_ids
__props__['custom_setup_recipes'] = custom_setup_recipes
__props__['custom_shutdown_recipes'] = custom_shutdown_recipes
__props__['custom_undeploy_recipes'] = custom_undeploy_recipes
__props__['drain_elb_on_shutdown'] = drain_elb_on_shutdown
__props__['ebs_volumes'] = ebs_volumes
__props__['elastic_load_balancer'] = elastic_load_balancer
__props__['healthcheck_method'] = healthcheck_method
__props__['healthcheck_url'] = healthcheck_url
__props__['install_updates_on_boot'] = install_updates_on_boot
__props__['instance_shutdown_timeout'] = instance_shutdown_timeout
__props__['name'] = name
if stack_id is None:
raise TypeError("Missing required property 'stack_id'")
__props__['stack_id'] = stack_id
__props__['stats_enabled'] = stats_enabled
if stats_password is None:
raise TypeError("Missing required property 'stats_password'")
__props__['stats_password'] = stats_password
__props__['stats_url'] = stats_url
__props__['stats_user'] = stats_user
__props__['system_packages'] = system_packages
__props__['tags'] = tags
__props__['use_ebs_optimized_instances'] = use_ebs_optimized_instances
__props__['arn'] = None
super(HaproxyLayer, __self__).__init__(
'aws:opsworks/haproxyLayer:HaproxyLayer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None,
auto_assign_public_ips: Optional[pulumi.Input[bool]] = None,
auto_healing: Optional[pulumi.Input[bool]] = None,
custom_configure_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_deploy_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_instance_profile_arn: Optional[pulumi.Input[str]] = None,
custom_json: Optional[pulumi.Input[str]] = None,
custom_security_group_ids: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_setup_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_shutdown_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
custom_undeploy_recipes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None,
ebs_volumes: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['HaproxyLayerEbsVolumeArgs']]]]] = None,
elastic_load_balancer: Optional[pulumi.Input[str]] = None,
healthcheck_method: Optional[pulumi.Input[str]] = None,
healthcheck_url: Optional[pulumi.Input[str]] = None,
install_updates_on_boot: Optional[pulumi.Input[bool]] = None,
instance_shutdown_timeout: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
stack_id: Optional[pulumi.Input[str]] = None,
stats_enabled: Optional[pulumi.Input[bool]] = None,
stats_password: Optional[pulumi.Input[str]] = None,
stats_url: Optional[pulumi.Input[str]] = None,
stats_user: Optional[pulumi.Input[str]] = None,
system_packages: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None) -> 'HaproxyLayer':
"""
Get an existing HaproxyLayer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name(ARN) of the layer.
:param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances.
:param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
:param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer.
:param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances.
:param pulumi.Input[str] custom_json: Custom JSON attributes to apply to the layer.
:param pulumi.Input[List[pulumi.Input[str]]] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances.
:param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['HaproxyLayerEbsVolumeArgs']]]] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
:param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer
:param pulumi.Input[str] healthcheck_method: HTTP method to use for instance healthchecks. Defaults to "OPTIONS".
:param pulumi.Input[str] healthcheck_url: URL path to use for instance healthchecks. Defaults to "/".
:param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots.
:param pulumi.Input[float] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
:param pulumi.Input[str] name: A human-readable name for the layer.
:param pulumi.Input[str] stack_id: The id of the stack the layer will belong to.
:param pulumi.Input[bool] stats_enabled: Whether to enable HAProxy stats.
:param pulumi.Input[str] stats_password: The password to use for HAProxy stats.
:param pulumi.Input[str] stats_url: The HAProxy stats URL. Defaults to "/haproxy?stats".
:param pulumi.Input[str] stats_user: The username for HAProxy stats. Defaults to "opsworks".
:param pulumi.Input[List[pulumi.Input[str]]] system_packages: Names of a set of system packages to install on the layer's instances.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["auto_assign_elastic_ips"] = auto_assign_elastic_ips
__props__["auto_assign_public_ips"] = auto_assign_public_ips
__props__["auto_healing"] = auto_healing
__props__["custom_configure_recipes"] = custom_configure_recipes
__props__["custom_deploy_recipes"] = custom_deploy_recipes
__props__["custom_instance_profile_arn"] = custom_instance_profile_arn
__props__["custom_json"] = custom_json
__props__["custom_security_group_ids"] = custom_security_group_ids
__props__["custom_setup_recipes"] = custom_setup_recipes
__props__["custom_shutdown_recipes"] = custom_shutdown_recipes
__props__["custom_undeploy_recipes"] = custom_undeploy_recipes
__props__["drain_elb_on_shutdown"] = drain_elb_on_shutdown
__props__["ebs_volumes"] = ebs_volumes
__props__["elastic_load_balancer"] = elastic_load_balancer
__props__["healthcheck_method"] = healthcheck_method
__props__["healthcheck_url"] = healthcheck_url
__props__["install_updates_on_boot"] = install_updates_on_boot
__props__["instance_shutdown_timeout"] = instance_shutdown_timeout
__props__["name"] = name
__props__["stack_id"] = stack_id
__props__["stats_enabled"] = stats_enabled
__props__["stats_password"] = stats_password
__props__["stats_url"] = stats_url
__props__["stats_user"] = stats_user
__props__["system_packages"] = system_packages
__props__["tags"] = tags
__props__["use_ebs_optimized_instances"] = use_ebs_optimized_instances
return HaproxyLayer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name(ARN) of the layer.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="autoAssignElasticIps")
def auto_assign_elastic_ips(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to automatically assign an elastic IP address to the layer's instances.
"""
return pulumi.get(self, "auto_assign_elastic_ips")
@property
@pulumi.getter(name="autoAssignPublicIps")
def auto_assign_public_ips(self) -> pulumi.Output[Optional[bool]]:
"""
For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
"""
return pulumi.get(self, "auto_assign_public_ips")
@property
@pulumi.getter(name="autoHealing")
def auto_healing(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable auto-healing for the layer.
"""
return pulumi.get(self, "auto_healing")
@property
@pulumi.getter(name="customConfigureRecipes")
def custom_configure_recipes(self) -> pulumi.Output[Optional[List[str]]]:
return pulumi.get(self, "custom_configure_recipes")
@property
@pulumi.getter(name="customDeployRecipes")
def custom_deploy_recipes(self) -> pulumi.Output[Optional[List[str]]]:
return pulumi.get(self, "custom_deploy_recipes")
@property
@pulumi.getter(name="customInstanceProfileArn")
def custom_instance_profile_arn(self) -> pulumi.Output[Optional[str]]:
"""
The ARN of an IAM profile that will be used for the layer's instances.
"""
return pulumi.get(self, "custom_instance_profile_arn")
@property
@pulumi.getter(name="customJson")
def custom_json(self) -> pulumi.Output[Optional[str]]:
"""
Custom JSON attributes to apply to the layer.
"""
return pulumi.get(self, "custom_json")
@property
@pulumi.getter(name="customSecurityGroupIds")
def custom_security_group_ids(self) -> pulumi.Output[Optional[List[str]]]:
"""
Ids for a set of security groups to apply to the layer's instances.
"""
return pulumi.get(self, "custom_security_group_ids")
@property
@pulumi.getter(name="customSetupRecipes")
def custom_setup_recipes(self) -> pulumi.Output[Optional[List[str]]]:
return pulumi.get(self, "custom_setup_recipes")
@property
@pulumi.getter(name="customShutdownRecipes")
def custom_shutdown_recipes(self) -> pulumi.Output[Optional[List[str]]]:
return pulumi.get(self, "custom_shutdown_recipes")
@property
@pulumi.getter(name="customUndeployRecipes")
def custom_undeploy_recipes(self) -> pulumi.Output[Optional[List[str]]]:
return pulumi.get(self, "custom_undeploy_recipes")
@property
@pulumi.getter(name="drainElbOnShutdown")
def drain_elb_on_shutdown(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable Elastic Load Balancing connection draining.
"""
return pulumi.get(self, "drain_elb_on_shutdown")
@property
@pulumi.getter(name="ebsVolumes")
def ebs_volumes(self) -> pulumi.Output[Optional[List['outputs.HaproxyLayerEbsVolume']]]:
"""
`ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
"""
return pulumi.get(self, "ebs_volumes")
@property
@pulumi.getter(name="elasticLoadBalancer")
def elastic_load_balancer(self) -> pulumi.Output[Optional[str]]:
"""
Name of an Elastic Load Balancer to attach to this layer
"""
return pulumi.get(self, "elastic_load_balancer")
@property
@pulumi.getter(name="healthcheckMethod")
def healthcheck_method(self) -> pulumi.Output[Optional[str]]:
"""
HTTP method to use for instance healthchecks. Defaults to "OPTIONS".
"""
return pulumi.get(self, "healthcheck_method")
@property
@pulumi.getter(name="healthcheckUrl")
def healthcheck_url(self) -> pulumi.Output[Optional[str]]:
"""
URL path to use for instance healthchecks. Defaults to "/".
"""
return pulumi.get(self, "healthcheck_url")
@property
@pulumi.getter(name="installUpdatesOnBoot")
def install_updates_on_boot(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to install OS and package updates on each instance when it boots.
"""
return pulumi.get(self, "install_updates_on_boot")
@property
@pulumi.getter(name="instanceShutdownTimeout")
def instance_shutdown_timeout(self) -> pulumi.Output[Optional[float]]:
"""
The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
"""
return pulumi.get(self, "instance_shutdown_timeout")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A human-readable name for the layer.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="stackId")
def stack_id(self) -> pulumi.Output[str]:
"""
The id of the stack the layer will belong to.
"""
return pulumi.get(self, "stack_id")
@property
@pulumi.getter(name="statsEnabled")
def stats_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable HAProxy stats.
"""
return pulumi.get(self, "stats_enabled")
@property
@pulumi.getter(name="statsPassword")
def stats_password(self) -> pulumi.Output[str]:
"""
The password to use for HAProxy stats.
"""
return pulumi.get(self, "stats_password")
@property
@pulumi.getter(name="statsUrl")
def stats_url(self) -> pulumi.Output[Optional[str]]:
"""
The HAProxy stats URL. Defaults to "/haproxy?stats".
"""
return pulumi.get(self, "stats_url")
@property
@pulumi.getter(name="statsUser")
def stats_user(self) -> pulumi.Output[Optional[str]]:
"""
The username for HAProxy stats. Defaults to "opsworks".
"""
return pulumi.get(self, "stats_user")
@property
@pulumi.getter(name="systemPackages")
def system_packages(self) -> pulumi.Output[Optional[List[str]]]:
"""
Names of a set of system packages to install on the layer's instances.
"""
return pulumi.get(self, "system_packages")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="useEbsOptimizedInstances")
def use_ebs_optimized_instances(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to use EBS-optimized instances.
"""
return pulumi.get(self, "use_ebs_optimized_instances")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.70614 | 220 | 0.6744 |
7945a622d98db3f4700a8fc112b9b8f9ea8122f2 | 835 | py | Python | api/serializers/file.py | altest-com/siice-api | 322098b91a8dc22de1dddb5102a4c8beddd9a009 | [
"MIT"
] | null | null | null | api/serializers/file.py | altest-com/siice-api | 322098b91a8dc22de1dddb5102a4c8beddd9a009 | [
"MIT"
] | null | null | null | api/serializers/file.py | altest-com/siice-api | 322098b91a8dc22de1dddb5102a4c8beddd9a009 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from ._abstracts import TrackTimeSerializer, MaskFieldSerializer
from .. import models
class ImageSerializer(TrackTimeSerializer, MaskFieldSerializer):
size_bytes = serializers.IntegerField(read_only=True)
height = serializers.IntegerField(read_only=True)
width = serializers.IntegerField(read_only=True)
class Meta:
model = models.Image
fields = TrackTimeSerializer.Meta.fields + (
'image',
'size_bytes',
'height',
'width'
)
class FileSerializer(TrackTimeSerializer, MaskFieldSerializer):
size_bytes = serializers.IntegerField(read_only=True)
class Meta:
model = models.File
fields = TrackTimeSerializer.Meta.fields + (
'file',
'size_bytes'
)
| 26.09375 | 64 | 0.668263 |
7945a6282744b41505a77580d14dfb1361c9d785 | 291 | py | Python | src/api/domain/operation/CreateDataOperation/CreateDataOperationCommand.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 14 | 2020-12-19T15:06:13.000Z | 2022-01-12T19:52:17.000Z | src/api/domain/operation/CreateDataOperation/CreateDataOperationCommand.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 43 | 2021-01-06T22:05:22.000Z | 2022-03-10T10:30:30.000Z | src/api/domain/operation/CreateDataOperation/CreateDataOperationCommand.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 4 | 2020-12-18T23:10:09.000Z | 2021-04-02T13:03:12.000Z | from dataclasses import dataclass
from infrastructure.cqrs.ICommand import ICommand
from domain.operation.CreateDataOperation.CreateDataOperationRequest import CreateDataOperationRequest
@dataclass
class CreateDataOperationCommand(ICommand):
request: CreateDataOperationRequest = None
| 32.333333 | 102 | 0.876289 |
7945a8acbc93d4653987b97c08b7f1863f996b70 | 3,082 | py | Python | ucscentralsdk/mometa/compute/ComputeResourceAggrEp.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/mometa/compute/ComputeResourceAggrEp.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/mometa/compute/ComputeResourceAggrEp.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for ComputeResourceAggrEp ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class ComputeResourceAggrEpConsts():
POLLING_INTERVAL_10_MIN = "10 min"
POLLING_INTERVAL_15_MIN = "15 min"
POLLING_INTERVAL_20_MIN = "20 min"
POLLING_INTERVAL_30_MIN = "30 min"
POLLING_INTERVAL_45_MIN = "45 min"
POLLING_INTERVAL_5_MIN = "5 min"
POLLING_INTERVAL_60_MIN = "60 min"
class ComputeResourceAggrEp(ManagedObject):
"""This is ComputeResourceAggrEp class."""
consts = ComputeResourceAggrEpConsts()
naming_props = set([])
mo_meta = MoMeta("ComputeResourceAggrEp", "computeResourceAggrEp", "compute", VersionMeta.Version101a, "InputOutput", 0x1f, [], ["admin"], [u'topRoot'], [u'computeFaultUpgradeFlag', u'computeGroupMembership', u'computeProfile', u'computeResourceSetManager', u'computeSystem', u'faultGlobalSeverityHolder', u'faultGlobalTypedHolder'], ["Get", "Set"])
prop_meta = {
"available_physical_cnt": MoPropertyMeta("available_physical_cnt", "availablePhysicalCnt", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"polling_interval": MoPropertyMeta("polling_interval", "pollingInterval", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["10 min", "15 min", "20 min", "30 min", "45 min", "5 min", "60 min"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"total_physical_cnt": MoPropertyMeta("total_physical_cnt", "totalPhysicalCnt", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"availablePhysicalCnt": "available_physical_cnt",
"childAction": "child_action",
"dn": "dn",
"pollingInterval": "polling_interval",
"rn": "rn",
"status": "status",
"totalPhysicalCnt": "total_physical_cnt",
}
def __init__(self, **kwargs):
self._dirty_mask = 0
self.available_physical_cnt = None
self.child_action = None
self.polling_interval = None
self.status = None
self.total_physical_cnt = None
ManagedObject.__init__(self, "ComputeResourceAggrEp", **kwargs)
| 55.035714 | 353 | 0.693056 |
7945a935965eac4912399856a4a2582a9562172d | 545 | py | Python | address_book/forms.py | Pack144/py_packman | 5e34cb02f89008cc363440b115410d63462bc610 | [
"MIT"
] | 1 | 2019-05-04T05:21:01.000Z | 2019-05-04T05:21:01.000Z | address_book/forms.py | Pack144/py_packman | 5e34cb02f89008cc363440b115410d63462bc610 | [
"MIT"
] | 10 | 2020-06-05T21:02:10.000Z | 2022-03-11T23:50:18.000Z | address_book/forms.py | Pack144/py_packman | 5e34cb02f89008cc363440b115410d63462bc610 | [
"MIT"
] | null | null | null | from django.forms import ModelForm
from .models import Address, PhoneNumber
class AddressForm(ModelForm):
class Meta:
model = Address
fieldsets = (
(None, {'fields': ('street', 'street2', ('city', 'state', 'zip_code'))}),
('Permissions', {'fields': ('published',)}),
)
class PhoneNumberForm(ModelForm):
class Meta:
model = PhoneNumber
fieldsets = (
(None, {'fields': ('type', 'number')}),
('Permissions', {'fields': ('published',)}),
)
| 24.772727 | 85 | 0.53945 |
7945a94750f0b2dd5e09ca20c2e28d9c32bd377a | 1,349 | py | Python | setup.py | RakhithJK/ExtractTable-py | c4e555c5bd8ce3e8b184bcf91e8b3277519943bf | [
"Apache-2.0"
] | 136 | 2019-10-07T01:24:43.000Z | 2022-03-29T12:18:48.000Z | setup.py | RakhithJK/ExtractTable-py | c4e555c5bd8ce3e8b184bcf91e8b3277519943bf | [
"Apache-2.0"
] | 15 | 2019-10-10T14:03:43.000Z | 2021-11-27T07:14:58.000Z | setup.py | RakhithJK/ExtractTable-py | c4e555c5bd8ce3e8b184bcf91e8b3277519943bf | [
"Apache-2.0"
] | 20 | 2019-10-10T21:28:47.000Z | 2022-02-24T09:44:58.000Z | # -*- coding: utf-8 -*-
import os
from setuptools import find_packages
about = {}
with open(os.path.join('ExtractTable', '__version__.py'), 'r') as f:
exec(f.read(), about)
with open('README.md', 'r') as f:
readme = f.read()
with open("requirements.txt") as fh:
requires = [x.strip() for x in fh.readlines()]
def setup_package():
metadata = dict(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type="text/markdown",
url=about['__url__'],
author=about['__author__'],
author_email=about['__author_email__'],
license=about['__license__'],
packages=find_packages(),
install_requires=requires,
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
])
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(**metadata)
if __name__ == '__main__':
setup_package()
| 27.530612 | 80 | 0.610823 |
7945a9a49bace20f8276a8859ec11c789d8b88a6 | 707 | py | Python | setup.py | wally-yu/selenium-framework | c1c85ec7268b3fc51087ec0a8a5300271687b4c7 | [
"MIT"
] | 1 | 2018-12-05T09:32:45.000Z | 2018-12-05T09:32:45.000Z | setup.py | wally-yu/selenium-framework | c1c85ec7268b3fc51087ec0a8a5300271687b4c7 | [
"MIT"
] | null | null | null | setup.py | wally-yu/selenium-framework | c1c85ec7268b3fc51087ec0a8a5300271687b4c7 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
version = '0.2.0'
with open('README.md') as readme_file:
long_description = readme_file.read()
setup(
name='pyselenium_framework',
version=version,
packages=find_packages(exclude=['AUT', 'Execution', 'driver_binary_files']),
author=u'Wally Yu',
install_requires=['selenium==3.14.1'],
url='https://github.com/wally-yu/selenium-framework',
include_package_data=True,
license='MIT License',
description='A Python Selenium Framework Which Makes Code More Easy to Maintain and Read',
long_description=long_description,
long_description_content_type='text/markdown',
)
# build: python3 setup.py sdist bdist_wheel | 33.666667 | 94 | 0.729844 |
7945aa3d29032c80b5b4086211911cf6f42349f4 | 4,415 | py | Python | src/logpipe/backend/kafka.py | twentythirdz/django-logpipe | 05d90225fc993e6dc8fea27f41c56e8e8debf340 | [
"ISC"
] | null | null | null | src/logpipe/backend/kafka.py | twentythirdz/django-logpipe | 05d90225fc993e6dc8fea27f41c56e8e8debf340 | [
"ISC"
] | null | null | null | src/logpipe/backend/kafka.py | twentythirdz/django-logpipe | 05d90225fc993e6dc8fea27f41c56e8e8debf340 | [
"ISC"
] | null | null | null | from django.apps import apps
from ..exceptions import MissingTopicError
from .. import settings
from . import RecordMetadata, Record, get_offset_backend
import kafka
import logging
logger = logging.getLogger(__name__)
class ModelOffsetStore(object):
def commit(self, consumer, message):
KafkaOffset = apps.get_model(app_label='logpipe', model_name='KafkaOffset')
logger.debug('Commit offset "%s" for topic "%s", partition "%s" to %s' % (
message.offset, message.topic, message.partition, self.__class__.__name__))
obj, created = KafkaOffset.objects.get_or_create(
topic=message.topic,
partition=message.partition)
obj.offset = message.offset + 1
obj.save()
def seek(self, consumer, topic, partition):
KafkaOffset = apps.get_model(app_label='logpipe', model_name='KafkaOffset')
tp = kafka.TopicPartition(topic=topic, partition=partition)
try:
obj = KafkaOffset.objects.get(topic=topic, partition=partition)
logger.debug('Seeking to offset "%s" on topic "%s", partition "%s"' % (obj.offset, topic, partition))
consumer.client.seek(tp, obj.offset)
except KafkaOffset.DoesNotExist:
logger.debug('Seeking to beginning of topic "%s", partition "%s"' % (topic, partition))
consumer.client.seek_to_beginning(tp)
class KafkaOffsetStore(object):
def commit(self, consumer, message):
logger.debug('Commit offset "%s" for topic "%s", partition "%s" to %s' % (
message.offset, message.topic, message.partition, self.__class__.__name__))
consumer.client.commit()
def seek(self, consumer, topic, partition):
pass
class Consumer(object):
_client = None
def __init__(self, topic_name, **kwargs):
self.topic_name = topic_name
self.client_kwargs = kwargs
@property
def client(self):
if not self._client:
kwargs = self._get_client_config()
self._client = kafka.KafkaConsumer(**kwargs)
tps = self._get_topic_partitions()
self._client.assign(tps)
backend = get_offset_backend()
for tp in tps:
backend.seek(self, tp.topic, tp.partition)
self._client.committed(tp)
return self._client
def __iter__(self):
return self
def __next__(self):
r = next(self.client)
record = Record(
topic=r.topic,
partition=r.partition,
offset=r.offset,
timestamp=r.timestamp,
key=r.key,
value=r.value)
return record
def _get_topic_partitions(self):
p = []
partitions = self.client.partitions_for_topic(self.topic_name)
if not partitions:
raise MissingTopicError('Could not find topic %s. Does it exist?' % self.topic_name)
for partition in partitions:
tp = kafka.TopicPartition(self.topic_name, partition=partition)
p.append(tp)
return p
def _get_client_config(self):
kwargs = {
'auto_offset_reset': 'earliest',
'enable_auto_commit': False,
'consumer_timeout_ms': 1000,
}
kwargs.update(settings.get('KAFKA_CONSUMER_KWARGS', {}))
kwargs.update(self.client_kwargs)
kwargs.update({
'bootstrap_servers': settings.get('KAFKA_BOOTSTRAP_SERVERS'),
})
return kwargs
class Producer(object):
_client = None
@property
def client(self):
if not self._client:
kwargs = self._get_client_config()
self._client = kafka.KafkaProducer(**kwargs)
return self._client
def send(self, topic_name, key, value):
key = key.encode()
timeout = settings.get('KAFKA_SEND_TIMEOUT', 10)
future = self.client.send(topic_name, key=key, value=value)
metadata = future.get(timeout=timeout)
return RecordMetadata(
topic=topic_name,
partition=metadata.partition,
offset=metadata.offset)
def _get_client_config(self):
kwargs = {
'bootstrap_servers': settings.get('KAFKA_BOOTSTRAP_SERVERS'),
'retries': settings.get('KAFKA_MAX_SEND_RETRIES', 0)
}
kwargs.update(settings.get('KAFKA_PRODUCER_KWARGS', {}))
return kwargs
| 34.224806 | 113 | 0.622197 |
7945aa5ed6df4e096353c1952df16cd3db94243c | 31,162 | py | Python | src/prediction/GAN_Regression.py | IBM/hybrid-expert-intuition-model | e21d7b4233458ebd0c4f73aac43e74d7d64f8cdb | [
"Apache-2.0"
] | 1 | 2020-08-19T00:18:24.000Z | 2020-08-19T00:18:24.000Z | src/prediction/GAN_Regression.py | IBM/hybrid-expert-intuition-model | e21d7b4233458ebd0c4f73aac43e74d7d64f8cdb | [
"Apache-2.0"
] | null | null | null | src/prediction/GAN_Regression.py | IBM/hybrid-expert-intuition-model | e21d7b4233458ebd0c4f73aac43e74d7d64f8cdb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
from prediction.LR_Categorized import *
import time
import pickle
from util.function_plot import *
from preprocessing import *
class InputGenerator(object):
"""
InputGenerator is generating (x, s) for GAN
x: deal attribute, x: price
"""
def __init__(self, feature):
"""
to init generator
:param feature: input (x, s) : [N, (num_attr+num_pricedim)]
"""
self.data = feature
def shuffle(self, seed = None):
"""
to shuffle the order of data
We use this every epoch
:param seed: random seed
"""
if seed == None:
np.random.seed(seed=int(time.time()))
# np.random.seed(seed=11)
else:
np.random.seed(seed)
id_data = list(range(len(self.data)))
np.random.shuffle(id_data)
self.data = self.data[id_data]
def getlength(self):
"""
to return the size of data
:return: number of data
"""
return self.data.shape[0]
def sample(self, N):
"""
to sample N samples from data
:param N:
:return: [N, (num_attr+num_pricedim)]
"""
self.shuffle()
return self.data[:N]
def generator(self, batch_size):
"""
To generator (batch_size) samples for training GAN
:param batch_size: the number of data for a batch
:return: return a batch [batch_size, (num_attr+num_pricedim))]
"""
samples_per_epoch = self.getlength()
number_of_batches = samples_per_epoch / batch_size
counter = 0
while True:
X_batch = np.array(self.data[batch_size * counter:batch_size * (counter + 1)]).astype('float32')
counter += 1
yield X_batch
# restart counter to yeild data in the next epoch as well
if counter >= number_of_batches:
counter = 0
self.shuffle()
def linear(input, output_dim, scope=None, stddev=1.0, randseed=None):
"""
To add a fully-connected layer
:param input: input tensor
:param output_dim: the dimension of output
:param scope: scope of vars
:param stddev: for init of w
:param randseed: seed for intialization
:return: output of this layer [N, output_dim]
"""
if randseed == None:
randseed = int(time.time())
# randseed = 12
with tf.variable_scope(scope or 'linear'):
w = tf.get_variable(
'w',
[input.get_shape()[1], output_dim],
initializer=tf.random_normal_initializer(stddev=stddev, seed=randseed)
)
b = tf.get_variable(
'b',
[output_dim],
initializer=tf.constant_initializer(0.0)
)
return tf.matmul(input, w) + b
def generator(input, h_dim, pricedim = 1, featdim = 45):
"""
Generator in GAN (# G(x) -> s*)
:param input: input vector [N, num of deal attribue + pricedim]
:param h_dim: num of neurons in the hidden layer of geneerator
:param pricedim: the number of possible categorized values
:param featdim: the number ofo deal attributes
:return: output of generator
"""
# [price, x] -> to get x by spliting
price, deal_attr_only = tf.split(input, [pricedim, featdim - pricedim], 1)
h0 = tf.nn.relu(linear(deal_attr_only, h_dim, 'g0'))
h1 = tf.nn.relu(linear(h0, h_dim, 'g1'))
h2 = linear(h1, pricedim, 'g2')
generated_price = tf.nn.sigmoid(h2)
# attach again with the new generated price [price*, x]
output_generator = tf.concat([generated_price, deal_attr_only], 1)
return output_generator
def discriminator(input, h_dim):
"""
Discriminator for GAN
:param input: input of discriminator [N, num of deal attribue + pricedim]
:param h_dim: # of linear layer's hidden nodes
:return: output of discrimnator [N, 1]
"""
h0 = tf.nn.relu(linear(input, h_dim * 2, 'd0'))
h1 = tf.nn.relu(linear(h0, h_dim , 'd1'))
h2 = tf.nn.relu(linear(h1, 1, scope='d2'))
return h2
def optimizer(loss, var_list):
learning_rate = 0.001
step = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
loss,
global_step=step,
var_list=var_list
)
return optimizer
def log(x):
'''
Sometimes discriminiator outputs can reach values close to
(or even slightly less than) zero due to numerical rounding.
This just makes sure that we exclude those values so that we don't
end up with NaNs during optimisation.
'''
return tf.log(tf.maximum(x, 1e-5))
class GAN(object):
def __init__(self, params, featdim = 1, pricedim = 1):
with tf.variable_scope('G'):
# input feature
self.z = tf.placeholder(tf.float32, shape=(params.batch_size, featdim))
# generated price
self.G = generator(self.z, params.hidden_size, pricedim=pricedim, featdim=featdim)
# for test (batch=1)
with tf.variable_scope('G', reuse=True):
self.test_z = tf.placeholder(tf.float32, shape=(1, featdim))
self.G_test = generator(self.test_z, params.hidden_size, pricedim=pricedim, featdim=featdim)
# Here we create two copies of the discriminator network
# that share parameters, as you cannot use the same network with
# different inputs in TensorFlow.
self.x = tf.placeholder(tf.float32, shape=(params.batch_size, featdim))
with tf.variable_scope('D'):
self.D1 = discriminator(
self.x,
params.hidden_size
)
with tf.variable_scope('D', reuse=True):
self.D2 = discriminator(
self.G,
params.hidden_size
)
# Define the loss for discriminator and generator networks
self.loss_d = tf.reduce_mean(-1.1 * log(self.D1) + log(self.D2))
self.loss_g = tf.reduce_mean(-log(self.D2))
vars = tf.trainable_variables()
self.d_params = [v for v in vars if v.name.startswith('D/')]
self.g_params = [v for v in vars if v.name.startswith('G/')]
self.opt_d = optimizer(self.loss_d, self.d_params)
self.opt_g = optimizer(self.loss_g, self.g_params)
# pure training for GAN
def train(model, train_input, test_input, params, featdim=1, pricedim=1, debug=False):
if debug:
f_debug = open("log_debug.txt", "w")
with tf.Session() as session:
tf.local_variables_initializer().run()
tf.global_variables_initializer().run()
train_sample = train_input.generator(params.batch_size) # batch generator
test_sample = test_input.generator(1) # batch generator
for step in range(params.num_steps + 1):
# 1. update discriminator
x = next(train_sample)
if len(x) != params.batch_size * featdim:
print("x does not have enough columns. Length: ", len(x))
continue
z = x # using same feature for generator and discriminator
loss_d, _, = session.run([model.loss_d, model.opt_d], {
model.x: np.reshape(x, (params.batch_size, featdim)),
model.z: np.reshape(z, (params.batch_size, featdim))
})
if step > (params.num_steps * 0.1):
# 2. update generator
z = next(train_sample)
loss_g, _ = session.run([model.loss_g, model.opt_g], {
model.z: np.reshape(z, (params.batch_size, featdim))
})
if debug:
# if step % params.log_every == 0:
dis_1, dis_2, train_price = session.run([model.D1, model.D2, model.G], {
model.x: np.reshape(x, (params.batch_size, featdim)),
model.z: np.reshape(z, (params.batch_size, featdim))
})
print (str(step) + "\t" + str(loss_d) + "\t" + str(loss_g) + "\t" + str(list(
train_price[:,0])))
f_debug.write((str(step) + "\t" + str(loss_d) + "\t" + str(loss_g) + "\t" + str(list(
np.reshape(dis_1, [1, -1])[0])) + "\t\t" + str(list(np.reshape(dis_2, [1, -1])[0])) + "\t" + str(list(
train_price[:,0]))) + "\n")
np_test_output = np.empty([0, pricedim])
for i in range (int(test_input.getlength())):
z = next(test_sample)
output = session.run([model.G_test], {
model.test_z: np.reshape(z, (1, featdim))
})
np_test_output = np.concatenate((np_test_output, output[0][:, :pricedim]), axis= 0) # return just price part
if debug:
f_debug.close()
return np_test_output
def GANRegression(args, train_feature, test_feature, pricedim = 1, debug=False):
"""
To train GAN for regression
:param args: intput arguments
:param train_feature: [N, 36]
:param test_feature: [N, 36]
:param pricedim: the number of categorized valuees for price
:param debug: debug option (True: ON)
:return: testing data's regression output for another classifier
"""
tf.reset_default_graph()
# 2. define graph
model = GAN(args, featdim=(train_feature.shape[1]), pricedim=pricedim)
# 3. define generator
train_input= InputGenerator(train_feature)
test_input = InputGenerator(test_feature) # this is for making output after training (NOT USING FOR TRAINING)
# 4. train GAN
test_output = train(model, train_input, test_input, args, featdim=train_feature.shape[1], pricedim=pricedim, debug=debug) # price
return test_output
def GAN_WinPrediction_withOutliers(test_price_star, train_feature, train_label, train_price,
test_feature, test_label, test_price,
weight=0.5, op_prior=0, op_plot=False, op_diff=0.1, n_bins=12, op_valid=0, op_classifier=0 , debug=False):
"""
To train and test classifier using prior and regression
:param test_price_star: regeressed prices
:param train_feature: [N, 36] <- example OF IBM
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36] <- example OF IBM
:param test_label: [M, 1]
:param test_price: [M, 1]
:param intuition_set: in the set, [0]-intuition all other feature, [1]-intuition label, [2]-intuition var (THEY ARE FROM OUTLIER)
:param weight: weight of prior knowledge
:param op_prior: 0 - do not use prior, 1 - use it in a hybrid way (our proposal), 2- always use the combined prediction with prior, 3- prior only
:param op_plot: True - export plot / False - Not
:param op_diff: || s -s* ||_2 for hybrid clssification (if p_prior = 1)
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
# intuition var
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
#Intuition_Classifier = LogisticRegression()
#Intuition_Classifier.fit(intuition_feature_all, intuition_set[1])
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
prediction = LR_Classifier.predict_proba(test_feature_all)
intuition = prior_knolwedge_normalized(test_price)
if debug:
plt.clf()
plt.hist(diff, bins=np.linspace(0, 1.0, num=40)) # arguments are passed to np.histogram
plt.xlim(0, 1.0)
plt.title("Histogram of ${||s-s^{*}||}$")
# plt.show()
plt.savefig("gan_regression_histrogram(s-s_star).png")
diff = list(diff)
d_price_prob = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = intuition[i].item()
y_compromised = (1 - weight) * y_hat + weight * y_prior
if op_prior == 0: # y_hat
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif op_prior == 2: # just compromised
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
elif op_prior == 3: # prior only
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else: # conditional
if diff[i] == 0:
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif diff[i] >= op_diff:
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else:
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
mean = []
std = []
x_range = []
# bar plot
# for i in range(n_bins):
# if len(d_price_prob[i]) == 0:
# mean.append(0)
# std.append(0)
# else:
# mean.append(np.mean(d_price_prob[i]))
# std.append(np.std(d_price_prob[i]))
# x_range.append(i * 0.1 + 0.05)
#
# if op_plot:
# # Call the function to create plot
# plt.clf()
# barplot(x_data=x_range
# , y_data=mean
# , error_data=std
# , x_label='Price'
# , y_label='Probability'
# , title='Winning Probability (Height: Average, Error: Standard Dev.)')
#
# plt.xlim(0, 1.0)
# plt.plot([0., 1.], [1., 0], 'k-', marker='o', lw=2) # domain knowledge
# plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
# line plot
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
x_range.append(i * 0.1 + 0.05)
if op_plot:
plt.clf()
plt.plot(x_range, mean, 'r-', marker='o', lw=1, label='Our Method (LR with Intuition)')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig("gan_regression_bar_plot_classfier_" + str(op_classifier) + "_valid_" + str(op_valid) + "_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy, mean, l_output_prob
def GAN_WinPrediction(test_GAN_price, train_feature, train_label, train_price,
test_feature, test_label, test_price, weight = 0.5, op_prior = 0, op_plot = False, op_diff = 0.1, n_bins = 12, debug = False):
"""
To train and test classifier using prior and regression
:param test_GAN_price: regeressed prices
:param train_feature: [N, 36]
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36]
:param test_label: [M, 1]
:param test_price: [M, 1]
:param weight: weight of prior knowledge
:param op_prior: 0 - do not use prior, 1 - use it in a hybrid way (our proposal), 2- always use the combined prediction with prior, 3- prior only
:param op_plot: True - export plot / False - Not
:param op_diff: || s -s* ||_2 for hybrid clssification (if p_prior = 1)
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
test_price_star = np.reshape(np.array(test_GAN_price), (len(test_GAN_price), 1))
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
prediction = LR_Classifier.predict_proba(test_feature_all)
if debug:
plt.clf()
plt.hist(diff, bins=np.linspace(0, 1.0, num=40)) # arguments are passed to np.histogram
plt.xlim(0, 1.0)
plt.title("Histogram of ${||s-s^{*}||}$")
# plt.show()
plt.savefig("gan_regression_histrogram(s-s_star).png")
diff = list(diff)
d_price_prob = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = prior_knolwedge_normalized(i_price)
y_compromised = (1 - weight) * y_hat + weight * y_prior
if op_prior == 0: # y_hat
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif op_prior == 2: # just compromised
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
elif op_prior == 3: # prior only
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else: # conditional
if diff[i] == 0:
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif diff[i] >= op_diff:
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else:
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
mean = []
std = []
x_range = []
# bar plot
# for i in range(n_bins):
# if len(d_price_prob[i]) == 0:
# mean.append(0)
# std.append(0)
# else:
# mean.append(np.mean(d_price_prob[i]))
# std.append(np.std(d_price_prob[i]))
# x_range.append(i * 0.1 + 0.05)
#
# if op_plot:
# # Call the function to create plot
# plt.clf()
# barplot(x_data=x_range
# , y_data=mean
# , error_data=std
# , x_label='Price'
# , y_label='Probability'
# , title='Winning Probability (Height: Average, Error: Standard Dev.)')
#
# plt.xlim(0, 1.0)
# plt.plot([0., 1.], [1., 0], 'k-', marker='o', lw=2) # domain knowledge
# plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
# line plot
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
x_range.append(i * 0.1 + 0.05)
if op_plot:
plt.clf()
plt.plot(x_range, mean, 'r-', marker='o', lw=1, label='Our Method (LR with Intuition)')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy
def GAN_WinPrediction_drawALL(test_GAN_price, train_feature, train_label, train_price,
test_feature, test_label, test_price, weight = 0.5, op_prior = 0, op_plot = False, op_diff = 0.1, n_bins = 12, debug = False):
"""
To train and test classifier using prior and regression
:param test_GAN_price: regeressed prices
:param train_feature: [N, 36]
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36]
:param test_label: [M, 1]
:param test_price: [M, 1]
:param weight: weight of prior knowledge
:param op_prior: 0 - do not use prior, 1 - use it in a hybrid way (our proposal), 2- always use the combined prediction with prior, 3- prior only
:param op_plot: True - export plot / False - Not
:param op_diff: || s -s* ||_2 for hybrid clssification (if p_prior = 1)
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
test_price_star = np.reshape(np.array(test_GAN_price), (len(test_GAN_price), 1))
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
prediction = LR_Classifier.predict_proba(test_feature_all)
if debug:
plt.clf()
plt.hist(diff, bins=np.linspace(0, 1.0, num=40)) # arguments are passed to np.histogram
plt.xlim(0, 1.0)
plt.title("Histogram of ${||s-s^{*}||}$")
# plt.show()
plt.savefig("gan_regression_histrogram(s-s_star).png")
diff = list(diff)
d_price_prob = {}
d_price_prob_no_intuition = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
d_price_prob_no_intuition[i] = []
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = prior_knolwedge_normalized(i_price)
y_compromised = (1 - weight) * y_hat + weight * y_prior
d_price_prob_no_intuition[id_price].append(y_hat)
if op_prior == 0: # y_hat
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif op_prior == 2: # just compromised
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
elif op_prior == 3: # prior only
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else: # conditional
if diff[i] == 0:
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif diff[i] >= op_diff:
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else:
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
mean = []
std = []
mean_no_intuition = []
std_no_intuition = []
x_range = []
# bar plot
# for i in range(n_bins):
# if len(d_price_prob[i]) == 0:
# mean.append(0)
# std.append(0)
# else:
# mean.append(np.mean(d_price_prob[i]))
# std.append(np.std(d_price_prob[i]))
# x_range.append(i * 0.1 + 0.05)
#
# if op_plot:
# # Call the function to create plot
# plt.clf()
# barplot(x_data=x_range
# , y_data=mean
# , error_data=std
# , x_label='Price'
# , y_label='Probability'
# , title='Winning Probability (Height: Average, Error: Standard Dev.)')
#
# plt.xlim(0, 1.0)
# plt.plot([0., 1.], [1., 0], 'k-', marker='o', lw=2) # domain knowledge
# plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
# line plot
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
if len(d_price_prob_no_intuition[i]) == 0:
continue
else:
mean_no_intuition.append(np.mean(d_price_prob_no_intuition[i]))
std_no_intuition.append(np.std(d_price_prob_no_intuition[i]))
x_range.append(i * 0.1 + 0.05)
print(mean)
print(x_range)
if op_plot:
plt.clf()
plt.plot(x_range, mean, 'g-', marker='o', lw=1, label='Intuition Only')
plt.plot(x_range, mean_no_intuition, 'b-', marker='o', lw=1, label='LR with No Intuition')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy
def GAN_WinPrediction_difffunc_withOutliers(test_GAN_price, train_feature, train_label, train_price,
test_feature, test_label, test_price, intuition_set=([], [], []),
op_plot=False, op_coeff=1.0,
op_valid=0, n_bins=12, debug=False):
"""
To train and test classifier using prior, regression, and "functional weight"
:param test_GAN_price: regeressioned prices
:param train_feature: [N, 36]
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36]
:param test_label: [M, 1]
:param test_price: [M, 1]
:param intuition_set: in the set, [0]-intuition all other feature, [1]-intuition label, [2]-intuition var (THEY ARE FROM OUTLIER)
:param op_coeff: coefficient for sigmoid
:param op_plot: True - export plot / False - Not
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
intuition_price = np.reshape(intuition_set[2], (len(intuition_set[2]), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
test_price_star = np.reshape(np.array(test_GAN_price), (len(test_GAN_price), 1))
prediction = LR_Classifier.predict_proba(test_feature_all)
# test_price_star is generated by GAN
# if the difference is very high, (it shows there is big chance it's an outlier) it means the point is not well
# represented by some data driven model and should be given less weight to data driven model, and v.V.
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
# if debug:
# plt.hist(diff, bins=10) # arguments are passed to np.histogram
# plt.title("Histogram of ${||s-s^{*}||}^2_2$")
# # plt.show()
# plt.savefig("lr_regression_histrogram(s-s_star).png")
d_price_prob = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
diff = np.reshape(np.array(diff), [len(diff), 1])
weight = sigmoid(diff, beta=op_coeff)
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = prior_knolwedge_normalized(i_price)
y_compromised = (1 - weight[i]) * y_hat + weight[i] * y_prior
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised[0])
mean = []
std = []
x_range = []
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
x_range.append(i * 0.1 + 0.05)
if op_plot:
# Call the function to create plot
plt.clf()
plt.plot(x_range, mean, 'r-', marker='o', lw=1, label='Our Method (LR with Intuition)')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig(
"gan_diff_reg_valid_" + str(op_valid) + "_func_" + str(op_coeff) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy, mean, l_output_prob | 35.818391 | 178 | 0.600828 |
7945aa902e00b7dee1fcc17f19f8f93b65db0335 | 1,222 | py | Python | apps/purchases/urls.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | null | null | null | apps/purchases/urls.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | 5 | 2021-03-19T10:16:00.000Z | 2022-02-10T09:16:32.000Z | apps/purchases/urls.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | null | null | null | """Purchases app URLs."""
# Django
from django.urls import path, include
# Views
from .views import suppliers as suppliers_views
from .views import purchases as purchases_views
urlpatterns = [
path('suppliers/', suppliers_views.SupplierListView.as_view(), name='suppliers'),
path('suppliers/create/', suppliers_views.SupplierCreateView.as_view(), name='supplier-create'),
path('suppliers/<int:pk>/', suppliers_views.SupplierDetailView.as_view(), name='supplier'),
path('suppliers/update/<int:pk>/', suppliers_views.SupplierUpdateView.as_view(), name='supplier-update'),
path('suppliers/delete/<int:pk>/', suppliers_views.SupplierDelete.as_view(), name='supplier-delete'),
path('purchases/', purchases_views.PurchaseListView.as_view(), name='purchases'),
path('purchases/create/', purchases_views.PurchaseCreateView.as_view(), name='purchase-create'),
path('purchases/<int:pk>/', purchases_views.PurchaseDetailView.as_view(), name='purchase'),
path('purchases/update/<int:pk>/', purchases_views.PurchaseUpdateView.as_view(), name='purchase-update'),
path('purchases/delete/<int:pk>/', purchases_views.PurchaseDelete.as_view(), name='purchase-delete'),
]
| 50.916667 | 110 | 0.730769 |
7945aa92e8319f8fca9736935ddcd5d4e29f5dfd | 9,730 | py | Python | biorbd_optim/path_conditions.py | Dangzilla/BiorbdOptim | adb1898b86282c87b3fd186b13e00a8202ceb21c | [
"Apache-2.0"
] | null | null | null | biorbd_optim/path_conditions.py | Dangzilla/BiorbdOptim | adb1898b86282c87b3fd186b13e00a8202ceb21c | [
"Apache-2.0"
] | 1 | 2022-02-18T19:03:13.000Z | 2022-02-18T19:03:13.000Z | biorbd_optim/path_conditions.py | Dangzilla/BiorbdOptim | adb1898b86282c87b3fd186b13e00a8202ceb21c | [
"Apache-2.0"
] | null | null | null | import numpy as np
from .mapping import BidirectionalMapping, Mapping
from .enums import InterpolationType
class PathCondition(np.ndarray):
def __new__(cls, input_array, interpolation_type=InterpolationType.CONSTANT):
# Check and reinterpret input
input_array = np.asarray(input_array, dtype=float)
if len(input_array.shape) == 0:
input_array = input_array[np.newaxis, np.newaxis]
if interpolation_type == InterpolationType.CONSTANT:
if len(input_array.shape) == 1:
input_array = input_array[:, np.newaxis]
if input_array.shape[1] != 1:
raise RuntimeError("Value for InterpolationType.CONSTANT must have exactly one column")
elif interpolation_type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:
if len(input_array.shape) == 1:
input_array = input_array[:, np.newaxis]
if input_array.shape[1] != 1 and input_array.shape[1] != 3:
raise RuntimeError("Value for InterpolationType.CONSTANT must have exactly one or three columns")
if input_array.shape[1] == 1:
input_array = np.repeat(input_array, 3, axis=1)
elif interpolation_type == InterpolationType.LINEAR:
if input_array.shape[1] != 2:
raise RuntimeError("Value for InterpolationType.LINEAR must have exactly two columns")
elif interpolation_type == InterpolationType.EACH_FRAME:
if input_array.shape[1] < 2:
raise RuntimeError("Value for InterpolationType.EACH_FRAME must exactly match the number of points")
else:
raise RuntimeError(f"InterpolationType is not implemented yet")
obj = np.asarray(input_array).view(cls)
# Additional information
obj.nb_shooting = None
obj.type = interpolation_type
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self.nb_shooting = getattr(obj, "nb_shooting", None)
self.type = getattr(obj, "type", None)
def __reduce__(self):
pickled_state = super(PathCondition, self).__reduce__()
new_state = pickled_state[2] + (self.nb_shooting, self.type)
return (pickled_state[0], pickled_state[1], new_state)
def __setstate__(self, state):
self.nb_shooting = state[-2]
self.type = state[-1]
# Call the parent's __setstate__ with the other tuple elements.
super(PathCondition, self).__setstate__(state[0:-2])
def check_and_adjust_dimensions(self, nb_elements, nb_shooting, condition_type):
if (
self.type == InterpolationType.CONSTANT
or self.type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT
or self.type == InterpolationType.LINEAR
):
self.nb_shooting = nb_shooting
elif self.type == InterpolationType.EACH_FRAME:
self.nb_shooting = nb_shooting + 1
else:
if self.nb_shooting != nb_shooting:
raise RuntimeError(
f"Invalid number of shooting ({self.nb_shooting}), the expected number is {nb_shooting}"
)
if self.shape[0] != nb_elements:
raise RuntimeError(
f"Invalid number of {condition_type} ({self.shape[0] }), the expected size is {nb_elements}"
)
if self.type == InterpolationType.CONSTANT:
if self.shape[1] != 1:
raise RuntimeError(
f"Invalid number of {condition_type} for InterpolationType.CONSTANT (ncols = {self.shape[1]}), "
f"the expected number of column is 1"
)
elif self.type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:
if self.shape[1] != 3:
raise RuntimeError(
f"Invalid number of {condition_type} for InterpolationType.CONSTANT (ncols = {self.shape[1]}), "
f"the expected number of column is 3"
)
elif self.type == InterpolationType.LINEAR:
if self.shape[1] != 2:
raise RuntimeError(
f"Invalid number of {condition_type} for InterpolationType.LINEAR (ncols = {self.shape[1]}), "
f"the expected number of column is 2"
)
elif self.type == InterpolationType.EACH_FRAME:
if self.shape[1] != self.nb_shooting:
raise RuntimeError(
f"Invalid number of {condition_type} for InterpolationType.LINEAR (ncols = {self.shape[1]}), "
f"the expected number of column is {self.nb_shooting}"
)
else:
raise RuntimeError(f"InterpolationType is not implemented yet")
def evaluate_at(self, shooting_point):
if self.nb_shooting is None:
raise RuntimeError(f"check_and_adjust_dimensions must be called at least once before evaluating at")
if self.type == InterpolationType.CONSTANT:
return self[:, 0]
elif self.type == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:
if shooting_point == 0:
return self[:, 0]
elif shooting_point == self.nb_shooting:
return self[:, 2]
else:
return self[:, 1]
elif self.type == InterpolationType.LINEAR:
return self[:, 0] + (self[:, 1] - self[:, 0]) * shooting_point / self.nb_shooting
elif self.type == InterpolationType.EACH_FRAME:
return self[:, shooting_point]
else:
raise RuntimeError(f"InterpolationType is not implemented yet")
class Bounds:
"""
Organizes bounds of states("X"), controls("U") and "V".
"""
def __init__(
self,
min_bound=(),
max_bound=(),
interpolation_type=InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT,
**parameters,
):
if isinstance(min_bound, PathCondition):
self.min = min_bound
else:
self.min = PathCondition(min_bound, interpolation_type=interpolation_type, **parameters)
if isinstance(max_bound, PathCondition):
self.max = max_bound
else:
self.max = PathCondition(max_bound, interpolation_type=interpolation_type, **parameters)
def check_and_adjust_dimensions(self, nb_elements, nb_shooting):
"""
Detects if bounds are not correct (wrong size of list: different than degrees of freedom).
Detects if first or last nodes are not complete, in that case they have same bounds than intermediates nodes.
:param nb_elements: Length of each list.
"""
self.min.check_and_adjust_dimensions(nb_elements, nb_shooting, "Bound min")
self.max.check_and_adjust_dimensions(nb_elements, nb_shooting, "Bound max")
def concatenate(self, other):
self.min = PathCondition(np.concatenate((self.min, other.min)), interpolation_type=self.min.type)
self.max = PathCondition(np.concatenate((self.max, other.max)), interpolation_type=self.max.type)
class QAndQDotBounds(Bounds):
def __init__(self, biorbd_model, all_generalized_mapping=None, q_mapping=None, q_dot_mapping=None):
if all_generalized_mapping is not None:
if q_mapping is not None or q_dot_mapping is not None:
raise RuntimeError("all_generalized_mapping and a specified mapping cannot be used along side")
q_mapping = all_generalized_mapping
q_dot_mapping = all_generalized_mapping
if not q_mapping:
q_mapping = BidirectionalMapping(Mapping(range(biorbd_model.nbQ())), Mapping(range(biorbd_model.nbQ())))
if not q_dot_mapping:
q_dot_mapping = BidirectionalMapping(
Mapping(range(biorbd_model.nbQdot())), Mapping(range(biorbd_model.nbQdot()))
)
QRanges = []
QDotRanges = []
for i in range(biorbd_model.nbSegment()):
segment = biorbd_model.segment(i)
QRanges += [q_range for q_range in segment.QRanges()]
QDotRanges += [qdot_range for qdot_range in segment.QDotRanges()]
x_min = [QRanges[i].min() for i in q_mapping.reduce.map_idx] + [
QDotRanges[i].min() for i in q_dot_mapping.reduce.map_idx
]
x_max = [QRanges[i].max() for i in q_mapping.reduce.map_idx] + [
QDotRanges[i].max() for i in q_dot_mapping.reduce.map_idx
]
super(QAndQDotBounds, self).__init__(min_bound=x_min, max_bound=x_max)
class InitialConditions:
def __init__(self, initial_guess=(), interpolation_type=InterpolationType.CONSTANT, **parameters):
if isinstance(initial_guess, PathCondition):
self.init = initial_guess
else:
self.init = PathCondition(initial_guess, interpolation_type=interpolation_type, **parameters)
def check_and_adjust_dimensions(self, nb_elements, nb_shooting):
"""
Detects if initial values are not given, in that case "0" is given for all degrees of freedom.
Detects if initial values are not correct (wrong size of list: different than degrees of freedom).
Detects if first or last nodes are not complete, in that case they have same values than intermediates nodes.
"""
self.init.check_and_adjust_dimensions(nb_elements, nb_shooting, "InitialConditions")
def concatenate(self, other):
self.init = PathCondition(np.concatenate((self.init, other.init)), interpolation_type=self.init.type,)
| 45.680751 | 118 | 0.639466 |
7945ac8baac9eac58ab5ac37ac661989a16ed3f1 | 15,762 | py | Python | Main.py | connorhess/Report-Counter | 2ddd3782dd0dba5af751092502f5e564bd433415 | [
"MIT"
] | null | null | null | Main.py | connorhess/Report-Counter | 2ddd3782dd0dba5af751092502f5e564bd433415 | [
"MIT"
] | null | null | null | Main.py | connorhess/Report-Counter | 2ddd3782dd0dba5af751092502f5e564bd433415 | [
"MIT"
] | null | null | null | from tkinter import messagebox
import sqlite3
from tkinter import *
from tkinter import filedialog
import webbrowser
from pynput.keyboard import Key, KeyCode, Listener
import threading
import time
from tkinter.font import Font
from functools import partial
from tkinter import ttk
conn = sqlite3.connect('Report_Counter.db', check_same_thread=False)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS Stats(ID INT, Name TEXT, Detail RAEL)''')
c.execute('''CREATE TABLE IF NOT EXISTS Commands(ID INT, Name TEXT, Detail RAEL)''')
# c.execute("SELECT COUNT(ID) FROM Stats WHERE ID=1")
# if (c.fetchall()[0][0]) == 0:
# c.execute('''INSERT INTO Stats(ID, Name, Detail) VALUES(?, ? ,?)''',(1, 'Report Count', 0))
# c.execute('''INSERT INTO Stats(ID, Name, Detail) VALUES(?, ? ,?)''',(2, '+1 KeyBind', 105))
# c.execute('''INSERT INTO Stats(ID, Name, Detail) VALUES(?, ? ,?)''',(3, '-1 KeyBind', 102))
# conn.commit()
# else:
# print("No setup had to be done")
c.execute("SELECT COUNT(ID) FROM Stats WHERE ID=1")
if (c.fetchall()[0][0]) == 0:
c.execute('''INSERT INTO Stats(ID, Name, Detail) VALUES(?, ? ,?)''',(1, 'Report Count', 0))
conn.commit()
print("added Report Count")
else:
print("No setup had to be done")
c.execute("SELECT COUNT(ID) FROM Stats WHERE ID=2")
if (c.fetchall()[0][0]) == 0:
c.execute('''INSERT INTO Stats(ID, Name, Detail) VALUES(?, ? ,?)''',(2, '+1 KeyBind', 105))
conn.commit()
print("added +1 KeyBind")
else:
print("No setup had to be done")
c.execute("SELECT COUNT(ID) FROM Stats WHERE ID=3")
if (c.fetchall()[0][0]) == 0:
c.execute('''INSERT INTO Stats(ID, Name, Detail) VALUES(?, ? ,?)''',(3, '-1 KeyBind', 102))
conn.commit()
print("added -1 KeyBind")
else:
print("No setup had to be done")
global Count
c.execute("SELECT Detail FROM Stats WHERE ID=1")
Count = (c.fetchall()[0][0])
STOP = 0
def Run():
def Main():
Page1 = Tk()
Page1.title("Report Counter")
Page1.configure(background="#BEBEBE")
Page1.geometry("+200+200")
# Page1.attributes("-fullscreen", True)
# Page1.attributes("-topmost", True)
myFont = Font(family="Times New Roman", size=20)
def Edit_KeyBind():
Page2 = Toplevel()
Page2.title("Report Counter Keybind Editor")
Page2.configure(background="#BEBEBE")
Page2.geometry("+250+250")
Page2.transient([Page1])
Keys = {"a": "65", "b": "66", "c": "67", "d": "68", "e": "69", "f": "70", "g": "71", "h": "72", "i": "73", "j": "74", "k": "75", "l": "76", "m": "77", "n": "78", "o": "79", "p": "80", "q": "81", "r": "82", "s": "83", "t": "84", "u": "85", "v": "86", "w": "87", "x": "88", "y": "89", "z": "90", "f1": "112", "f2": "113", "f3": "114", "f4": "115", "f5": "116", "f6": "117", "f7": "118", "f8": "119", "f9": "120", "f10": "121", "f11": "122", "f12": "123", "-": "189", "=": "187", "1": "49", "2": "50", "3": "51", "4": "52", "5": "53", "6": "44", "7": "45", "8": "46", "9": "47", "0": "48", "kp_end": "97", "kp_downarrow": "98", "kp_pgdn": "99", "kp_leftarrow": "100", "kp_5": "101", "kp_rightarrow": "102", "kp_home": "103", "kp_uparrow": "104", "kp_pgup": "105"}
key_list = list(Keys.keys())
val_list = list(Keys.values())
Label(Page2, text="KeyBind +1", bd=2).grid(row=0,column=0,pady=2,sticky='e')
combo1 = ttk.Combobox(Page2, values=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', '-', '=', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'kp_end', 'kp_downarrow', 'kp_pgdn', 'kp_leftarrow', 'kp_5', 'kp_rightarrow', 'kp_home', 'kp_uparrow', 'kp_pgup'])
combo1.grid(column=1, row=0)
Label(Page2, text="KeyBind -1", bd=2).grid(row=1,column=0,pady=2,sticky='e')
combo2 = ttk.Combobox(Page2, values=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', '-', '=', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'kp_end', 'kp_downarrow', 'kp_pgdn', 'kp_leftarrow', 'kp_5', 'kp_rightarrow', 'kp_home', 'kp_uparrow', 'kp_pgup'])
combo2.grid(column=1, row=1)
c.execute("SELECT Detail FROM Stats WHERE ID=2")
Val1 = (str(c.fetchall()[0][0]))
Val11 = (list(Keys).index(str(key_list[val_list.index(Val1)])))
c.execute("SELECT Detail FROM Stats WHERE ID=3")
Val2 = (str(c.fetchall()[0][0]))
Val21 = (list(Keys).index(str(key_list[val_list.index(Val2)])))
combo1.current(Val11)
combo2.current(Val21)
def Update():
Num1 = Keys[(combo1.get())]
c.execute("UPDATE Stats SET Detail=? WHERE ID=2",(Num1,))
Num2 = Keys[(combo2.get())]
c.execute("UPDATE Stats SET Detail=? WHERE ID=3",(Num2,))
conn.commit()
Page2.destroy()
Page1.destroy()
Run()
B01 = Button(Page2, text="Done", font=myFont, fg="Black", bg="Green", command=Update, bd=2)
B01.grid(row=2,column=1)
menubar = Menu(Page1)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Edit KeyBinds", command=Edit_KeyBind)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_separator()
Page1.config(menu=menubar)
def Close():
STOP = 1
Page1.destroy()
conn.close()
# thread.exit()
# listener.exit()
Message_var = StringVar()
label0 = Label(Page1, textvariable=Message_var, width=29)
label0.grid(row=1,column=0)
label0.configure(font=myFont)
try:
response = requests.get('https://raw.githubusercontent.com/connorhess/Report-Counter/master/Message.txt')
Message = response.text
Message_var.set(Message)
except:
Message_var.set("Offline/No Message")
B0 = Button(Page1, text="Exit (Use This One to Close Program)", font=myFont, width=29, height=1, fg="Black", bg="Red", command=Close, bd=2)
B0.grid(row=0,column=0)
frame_width = 445
frame_height1 = 100
frame_height2 = 260
F1 = Frame(Page1, height=frame_height1, width=frame_width, bg="#E9E9E9", relief="raise")
F1.grid(row=2,column=0)
F1.grid_propagate(0)
label1 = Label(F1, text='Total Sits: ', anchor='e', pady=4)
label1.grid(row=0,column=0,sticky='e', pady=(0, 5))
label1.configure(font=myFont)
Stats = StringVar()
label2 = Label(F1, textvariable=Stats, anchor='w', pady=4)
label2.grid(row=0,column=1,sticky='w', pady=(0, 5))
label2.configure(font=myFont)
label3 = Label(F1, text='Sits Needed: ', anchor='e', pady=4)
label3.grid(row=1,column=0,sticky='e', pady=5)
label3.configure(font=myFont)
Sits_from_80 = StringVar()
label4 = Label(F1, textvariable=Sits_from_80, anchor='w', pady=4)
label4.grid(row=1,column=1,sticky='w', pady=5)
label4.configure(font=myFont)
c.execute("SELECT Detail FROM Stats")
count = (c.fetchall()[0][0])
Stats.set(count)
Sits_from_80.set((80-int(count)))
def update():
# c.execute("SELECT Detail FROM Stats")
# count = (c.fetchall()[0][0])
print(Count)
Stats.set(Count)
def Web_browser_forums():
new = 2
webbrowser.open("https://elitelupus.com/forums/",new=new)
def WB_open(Url):
new = 2
webbrowser.open(Url,new=new)
B1 = Button(F1, text="Search Ban", font=myFont, width=12, height=1, fg="white", bg="green", command=partial(WB_open,"https://elitelupus.com/bans/search/"), bd=2)
B1.grid(row=0,column=3)
B2 = Button(F1, text="Forums", font=myFont, width=12, height=1, fg="white", bg="green", command=Web_browser_forums, bd=2)
B2.grid(row=1,column=3)
F2 = Frame(Page1, height=frame_height2, width=frame_width, bg="#E9E9E9", relief="raise")
F2.grid(row=3,column=0)
F2.grid_propagate(0)
myFont2 = Font(family="Times New Roman", size=14)
width2 = 14
BG = "light green"
FG = "black"
FG2 = "Black"
BG2 = "White"
FG3 = "Red"
BG3 = "Grey"
B3 = Button(F2, text="Ban Appeal", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/forumdisplay.php?fid=15"), bd=2)
B3.grid(row=0,column=0)
B4 = Button(F2, text="Warn Appeal", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/forumdisplay.php?fid=25"), bd=2)
B4.grid(row=0,column=1)
B5 = Button(F2, text="Staff Applications", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/forumdisplay.php?fid=14"), bd=2)
B5.grid(row=0,column=2)
B6 = Button(F2, text="Player Reports", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/forumdisplay.php?fid=16"), bd=2)
B6.grid(row=1,column=0)
B7 = Button(F2, text="Rules", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/showthread.php?tid=6355"), bd=2)
B7.grid(row=1,column=1)
B8 = Button(F2, text="Job Rules", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/showthread.php?tid=8627"), bd=2)
B8.grid(row=1,column=2)
B9 = Button(F2, text="Staff Reports", font=myFont2, width=width2, height=1, fg=FG, bg=BG, command=partial(WB_open,"https://elitelupus.com/forums/forumdisplay.php?fid=17"), bd=2)
B9.grid(row=2,column=0)
B10 = Button(F2, text="Report Issue", font=myFont2, width=width2, height=1, fg=FG2, bg=BG2, command=partial(WB_open,"https://github.com/connorhess/Report-Counter/issues/new?labels=bug"), bd=2)
B10.grid(row=3,column=0)
B11 = Button(F2, text="New Suggestion", font=myFont2, width=width2, height=1, fg=FG2, bg=BG2, command=partial(WB_open,"https://github.com/connorhess/Report-Counter/issues/new?labels=enhancement"), bd=2)
B11.grid(row=3,column=1)
B12 = Button(F2, text="Info", font=myFont2, width=width2, height=1, fg=FG2, bg=BG2, command=partial(WB_open,"https://www.node-s.co.za/products/report-counter"), bd=2)
B12.grid(row=3,column=2)
# B13 = Button(F2, text="Donate", font=myFont2, width=width2, height=1, fg=FG2, bg=BG2, command=partial(WB_open,"https://pay.yoco.com/node-s?reference=Donate"), bd=2)
# B13.grid(row=4,column=0)
B14 = Button(F2, text="Join Server 1", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/54.37.246.36:27015"), bd=2)
B14.grid(row=5,column=0)
B15 = Button(F2, text="Join Server 2", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/54.37.246.36:27016"), bd=2)
B15.grid(row=5,column=1)
B16 = Button(F2, text="Join Server 3", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/54.37.246.36:27017"), bd=2)
B16.grid(row=5,column=2)
B17 = Button(F2, text="Join Server 4", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/gmod-drp1-usa.elitelupus.com:27015"), bd=2)
B17.grid(row=6,column=0)
B18 = Button(F2, text="Join Server 5", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/gmod-drp2-usa.elitelupus.com:27015"), bd=2)
B18.grid(row=6,column=1)
B19 = Button(F2, text="Join Server 6", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/gmod-drp3-usa.elitelupus.com:27015"), bd=2)
B19.grid(row=6,column=2)
B20 = Button(F2, text="Join Server 7", font=myFont2, width=width2, height=1, fg=FG3, bg=BG3, command=partial(WB_open,"steam://connect/139.99.233.75:27015"), bd=2)
B20.grid(row=7,column=0)
def Animate():
while True:
if STOP == 1:
thread.exit()
time.sleep(0.2)
# c.execute("SELECT Detail FROM Stats")
# count = (c.fetchall()[0][0])
Sits_from_80.set((80-int(Count)))
Stats.set(Count)
thread = threading.Thread(target=Animate)
thread.setDaemon(True)
thread.start()
Page1.mainloop()
thread = threading.Thread(target=Main)
thread.setDaemon(True)
thread.start()
def Add_count():
c.execute("SELECT Detail FROM Stats WHERE ID=1")
Current = (c.fetchall()[0][0])
global Count
Count = Current
if Current >= 0:
New = Current + 1
else:
New = 1
Count += 1
c.execute("UPDATE Stats SET Detail=? WHERE ID=1",(New,))
conn.commit()
def Remove_count():
c.execute("SELECT Detail FROM Stats WHERE ID=1")
Current = (c.fetchall()[0][0])
global Count
Count = Current
if Current >= 0:
New = Current - 1
else:
New = 0
Count -= 1
c.execute("UPDATE Stats SET Detail=? WHERE ID=1",(New,))
conn.commit()
# Create a mapping of keys to function (use frozenset as sets/lists are not hashable - so they can't be used as keys)
# Note the missing `()` after function_1 and function_2 as want to pass the function, not the return value of the function
c.execute("SELECT Detail FROM Stats WHERE ID=2")
Keybind1 = (c.fetchall()[0][0])
c.execute("SELECT Detail FROM Stats WHERE ID=3")
Keybind2 = (c.fetchall()[0][0])
combination_to_function = {
frozenset([KeyCode(vk=Keybind1)]): Add_count, # Add 1
frozenset([KeyCode(vk=Keybind2)]): Remove_count, # Remove 1
}
# The currently pressed keys (initially empty)
pressed_vks = set()
def get_vk(key):
"""
Get the virtual key code from a key.
These are used so case/shift modifications are ignored.
"""
return key.vk if hasattr(key, 'vk') else key.value.vk
def is_combination_pressed(combination):
""" Check if a combination is satisfied using the keys pressed in pressed_vks """
return all([get_vk(key) in pressed_vks for key in combination])
def on_press(key):
""" When a key is pressed """
vk = get_vk(key) # Get the key's vk
pressed_vks.add(vk) # Add it to the set of currently pressed keys
for combination in combination_to_function: # Loop through each combination
if is_combination_pressed(combination): # Check if all keys in the combination are pressed
combination_to_function[combination]() # If so, execute the function
def on_release(key):
""" When a key is released """
vk = get_vk(key) # Get the key's vk
pressed_vks.remove(vk) # Remove it from the set of currently pressed keys
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
Run()
| 40.005076 | 772 | 0.582287 |
7945acb65793dadb9a347c1c47265ca2752f8e82 | 349 | py | Python | pc.py | simryang/python-study | 3e358e3470629130eea71df20bd9b468c7679bf7 | [
"MIT"
] | null | null | null | pc.py | simryang/python-study | 3e358e3470629130eea71df20bd9b468c7679bf7 | [
"MIT"
] | null | null | null | pc.py | simryang/python-study | 3e358e3470629130eea71df20bd9b468c7679bf7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
class pc():
def __init__(self):
self.n = 0
self.loop = False
def countloop(self):
while self.loop:
self.n += 1
self.result()
def result(self):
print(f'result of pc = {self.loop}')
if __name__ == '__main__':
ipc = pc()
print(f'c={os.getpid()}')
ipc.loop = True
ipc.countloop() | 15.863636 | 40 | 0.587393 |
7945ad15a98d464acc89088bf4d9d6cb8e333dac | 533 | py | Python | output/models/ms_data/additional/isdefault073_xsd/isdefault073.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/additional/isdefault073_xsd/isdefault073.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/additional/isdefault073_xsd/isdefault073.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
@dataclass
class Ct:
class Meta:
name = "ct"
a: object = field(
default="default",
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
b: str = field(
init=False,
default="fixed",
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
@dataclass
class Root(Ct):
class Meta:
name = "root"
| 16.65625 | 40 | 0.448405 |
7945ad89b662b58ca7616d2f2d8fab21cf0f4051 | 4,396 | py | Python | tests/integration/__init__.py | stobrien89/s3transfer | c5307a76c45f42f8d0688298f8c1ddb3a8c0fbce | [
"Apache-2.0"
] | null | null | null | tests/integration/__init__.py | stobrien89/s3transfer | c5307a76c45f42f8d0688298f8c1ddb3a8c0fbce | [
"Apache-2.0"
] | null | null | null | tests/integration/__init__.py | stobrien89/s3transfer | c5307a76c45f42f8d0688298f8c1ddb3a8c0fbce | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import botocore
import botocore.session
from botocore.exceptions import WaiterError
from s3transfer.manager import TransferManager
from s3transfer.subscribers import BaseSubscriber
from tests import FileCreator, random_bucket_name, unittest
def recursive_delete(client, bucket_name):
# Ensure the bucket exists before attempting to wipe it out
exists_waiter = client.get_waiter('bucket_exists')
exists_waiter.wait(Bucket=bucket_name)
page = client.get_paginator('list_objects')
# Use pages paired with batch delete_objects().
for page in page.paginate(Bucket=bucket_name):
keys = [{'Key': obj['Key']} for obj in page.get('Contents', [])]
if keys:
client.delete_objects(Bucket=bucket_name, Delete={'Objects': keys})
for _ in range(5):
try:
client.delete_bucket(Bucket=bucket_name)
break
except client.exceptions.NoSuchBucket:
exists_waiter.wait(Bucket=bucket_name)
except Exception:
# We can sometimes get exceptions when trying to
# delete a bucket. We'll let the waiter make
# the final call as to whether the bucket was able
# to be deleted.
not_exists_waiter = client.get_waiter('bucket_not_exists')
try:
not_exists_waiter.wait(Bucket=bucket_name)
except botocore.exceptions.WaiterError:
continue
class BaseTransferManagerIntegTest(unittest.TestCase):
"""Tests for the high level s3transfer module."""
@classmethod
def setUpClass(cls):
cls.region = 'us-west-2'
cls.session = botocore.session.get_session()
cls.client = cls.session.create_client('s3', cls.region)
cls.bucket_name = random_bucket_name()
cls.client.create_bucket(
Bucket=cls.bucket_name,
CreateBucketConfiguration={'LocationConstraint': cls.region},
)
def setUp(self):
self.files = FileCreator()
def tearDown(self):
self.files.remove_all()
@classmethod
def tearDownClass(cls):
recursive_delete(cls.client, cls.bucket_name)
def delete_object(self, key):
self.client.delete_object(Bucket=self.bucket_name, Key=key)
def object_exists(self, key, extra_args=None):
try:
self.wait_object_exists(key, extra_args)
return True
except WaiterError:
return False
def object_not_exists(self, key, extra_args=None):
if extra_args is None:
extra_args = {}
try:
self.client.get_waiter('object_not_exists').wait(
Bucket=self.bucket_name, Key=key, **extra_args
)
return True
except WaiterError:
return False
def wait_object_exists(self, key, extra_args=None):
if extra_args is None:
extra_args = {}
for _ in range(5):
self.client.get_waiter('object_exists').wait(
Bucket=self.bucket_name, Key=key, **extra_args
)
def create_transfer_manager(self, config=None):
return TransferManager(self.client, config=config)
def upload_file(self, filename, key, extra_args=None):
transfer = self.create_transfer_manager()
with open(filename, 'rb') as f:
transfer.upload(f, self.bucket_name, key, extra_args)
self.wait_object_exists(key, extra_args)
self.addCleanup(self.delete_object, key)
class WaitForTransferStart(BaseSubscriber):
def __init__(self, bytes_transfer_started_event):
self._bytes_transfer_started_event = bytes_transfer_started_event
def on_progress(self, **kwargs):
if not self._bytes_transfer_started_event.is_set():
self._bytes_transfer_started_event.set()
| 36.330579 | 79 | 0.668335 |
7945ae60abfea11a4ac1f97c7538d3c901276a50 | 1,931 | py | Python | api/python/log_book_post_encoder.py | AndreyShamis/lbook | ab5b01966dc8b279ab850d0e579203c15da0c360 | [
"MIT"
] | 2 | 2018-03-10T08:08:03.000Z | 2020-11-30T17:10:43.000Z | api/python/log_book_post_encoder.py | AndreyShamis/lbook | ab5b01966dc8b279ab850d0e579203c15da0c360 | [
"MIT"
] | 10 | 2018-03-04T06:51:42.000Z | 2022-03-14T15:12:52.000Z | api/python/log_book_post_encoder.py | AndreyShamis/lbook | ab5b01966dc8b279ab850d0e579203c15da0c360 | [
"MIT"
] | null | null | null | import sys
import uuid
import codecs
import io
import mimetypes
class LogBookPostEncoder:
def __init__(self):
self.frontier = uuid.uuid4().hex
self.content_type = f'multipart/form-data; boundary={self.frontier}'
def encode(self, fields: list, files: list):
tmp_body = io.BytesIO()
for part in self.iterate_on_fields(fields, files):
tmp_body.write(part[0])
return self.content_type, tmp_body.getvalue()
@classmethod
def convert(cls, s):
if isinstance(s, bytes) and sys.hexversion >= 0x03000000:
s = s.decode('utf-8')
return s
def iterate_on_fields(self, fields: list, files: list):
nl = '\r\n'
encode_func = codecs.getencoder('utf-8')
for src_key, value in fields:
key = self.convert(src_key)
yield encode_func(f'--{self.frontier}{nl}')
yield encode_func(self.convert(f'Content-Disposition: form-data; name="{key}"{nl}'))
yield encode_func(nl)
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encode_func(self.convert(value))
yield encode_func(nl)
for src_key, src_file_name, file_path in files:
file_name = self.convert(src_file_name)
key = self.convert(src_key)
yield encode_func(f'--{self.frontier}{nl}')
yield encode_func(self.convert(f'Content-Disposition: form-data; name="{key}"; filename="{file_name}"{nl}'))
content_type = mimetypes.guess_type(file_name)[0] or 'application/octet-stream'
yield encode_func(f'Content-Type: {content_type}{nl}')
yield encode_func(nl)
with open(file_path, 'rb') as fd:
buff = fd.read()
yield buff, len(buff)
yield encode_func(nl)
yield encode_func(f'--{self.frontier}--{nl}')
| 37.862745 | 120 | 0.605904 |
7945af03aa428be0f38e55566ee98853eeeff03b | 816 | py | Python | tests/generator/anomaly_generator/templater/create_template.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | 10 | 2020-08-25T09:23:02.000Z | 2021-01-12T14:00:35.000Z | tests/generator/anomaly_generator/templater/create_template.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | 140 | 2020-06-30T11:59:47.000Z | 2021-08-23T20:58:43.000Z | tests/generator/anomaly_generator/templater/create_template.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | null | null | null | from timeatlas import AnomalyGeneratorTemplate
def create_template(filename, seed, functions, threshold, num_animalies, anomaly_name):
template = AnomalyGeneratorTemplate(filename=filename,
seed=seed,
functions=functions,
threshold=threshold,
num_anomalies=num_animalies,
anomaly_name=anomaly_name)
template.write()
if __name__ == '__main__':
filename = "test"
seed = 1234
functions = ['flatline', 'hard_knee']
threshold = 1234
num_animalies = None
write = True
anomaly_name = "ANOMALY"
create_template(filename, seed, functions, threshold, num_animalies, anomaly_name)
| 32.64 | 87 | 0.567402 |
7945af19c4828d20d33fe1ebf8e4cb0288dbb3a5 | 4,477 | py | Python | myriad/config.py | nikihowe/myriad_temp | 4c2576cfef4eded96a64778675f1d2404eb52369 | [
"Apache-2.0"
] | 17 | 2021-05-14T15:02:59.000Z | 2022-03-17T13:03:51.000Z | myriad/config.py | nikihowe/myriad_temp | 4c2576cfef4eded96a64778675f1d2404eb52369 | [
"Apache-2.0"
] | null | null | null | myriad/config.py | nikihowe/myriad_temp | 4c2576cfef4eded96a64778675f1d2404eb52369 | [
"Apache-2.0"
] | null | null | null | # (c) 2021 Nikolaus Howe
from typing import Tuple
import jax
from dataclasses import dataclass
from enum import Enum
from myriad.systems import SystemType
class OptimizerType(Enum):
"""Parser argument. Optimizing strategy used to solve the OCP"""
# _settings_ = NoAlias
COLLOCATION = "COLLOCATION"
SHOOTING = "SHOOTING"
FBSM = "FBSM"
class SamplingApproach(Enum):
UNIFORM = 'UNIFORM'
TRUE_OPTIMAL = 'TRUE_OPTIMAL'
RANDOM_WALK = 'RANDOM_WALK'
CURRENT_OPTIMAL = 'CURRENT_OPTIMAL' # TODO: current optimal is broken at the moment, because we're not
# TODO: the guess around which we are sampling
# RANDOM_GRID = 'RANDOM_GRID'
# This ^ isn't implemented yet. It's unclear how helpful it would be
# FULL_GRID = 'FULL_GRID'
# We're not doing the FULL GRID anymore because it breaks the idea of generating trajectories.
# But it would be interesting to compare performance against, since in some sense this is the
# theoretical best. I wonder how resilient it would be to noise though.
# ENDTOEND = "ENDTOEND"
# ORNSTECK_BLABLA = "snnth"
# Another one we should try to implement
class NLPSolverType(Enum):
SLSQP = "SLSQP" # Scipy's SLSQP
TRUST = "TRUST" # Scipy's trust-constr
IPOPT = "IPOPT" # ipopt
# INEXACTNEWTON="INEXACTNEWTON"
EXTRAGRADIENT = "EXTRAGRADIENT" # an extragradient-based solver
class IntegrationMethod(Enum):
EULER = "CONSTANT"
HEUN = "LINEAR"
MIDPOINT = "MIDPOINT"
RK4 = "RK4"
class QuadratureRule(Enum):
TRAPEZOIDAL = "TRAPEZOIDAL"
HERMITE_SIMPSON = "HERMITE_SIMPSON"
# Hyperparameters which change experiment results
@dataclass(eq=True, frozen=False) # or frozen == False
class HParams:
"""The hyperparameters of the experiment. Modifying those should change the results"""
seed: int = 2019
system: SystemType = SystemType.CANCERTREATMENT
optimizer: OptimizerType = OptimizerType.SHOOTING
nlpsolver: NLPSolverType = NLPSolverType.IPOPT
integration_method: IntegrationMethod = IntegrationMethod.HEUN
quadrature_rule: QuadratureRule = QuadratureRule.TRAPEZOIDAL
max_iter: int = 1000 # maxiter for NLP solver (usually 1000)
intervals: int = 1 # used by COLLOCATION and SHOOTING
controls_per_interval: int = 100 # used by SHOOTING
fbsm_intervals: int = 1000 # used by FBSM
sampling_approach: SamplingApproach = SamplingApproach.RANDOM_WALK
train_size: int = 10
val_size: int = 3
test_size: int = 3
sample_spread: float = 0.05
start_spread: float = 0.1
noise_level: float = 0.01 * 0.
to_smooth: bool = False
learning_rate: float = 0.001
minibatch_size: int = 16
num_epochs: int = 1_000_001
num_experiments: int = 5
loss_recording_frequency: int = 1000
plot_progress_frequency: int = 10_000
early_stop_threshold: int = 30_000 # 70 for cartpole, 1 for cancertreatment
early_stop_check_frequency: int = 1000
hidden_layers: Tuple[int] = (100, 100)
num_unrolled: int = 5
eta_x: float = 1e-1
eta_lmbda: float = 1e-3
adam_lr: float = 1e-4
def __post_init__(self):
if self.optimizer == OptimizerType.COLLOCATION:
self.controls_per_interval = 1
if self.nlpsolver == NLPSolverType.EXTRAGRADIENT:
self.max_iter *= 10
# For convenience, record number of steps and stepsize
system = self.system()
self.num_steps = self.intervals * self.controls_per_interval
self.stepsize = system.T / self.num_steps
self.key = jax.random.PRNGKey(self.seed)
self.state_size = system.x_0.shape[0]
self.control_size = system.bounds.shape[0] - self.state_size
# Fix the minibatch size if we're working with small datasets
self.minibatch_size = min([self.minibatch_size, self.train_size, self.val_size, self.test_size])
@dataclass(eq=True, frozen=False)
class Config:
"""Secondary configurations that should not change experiment results
and should be largely used for debugging"""
verbose: bool = True
"""Verbose mode; default to `True`"""
jit: bool = True
"""Enable [`@jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions) compilation; default to `True`"""
plot: bool = True
"""Plot progress during (and results after) the experiment; default to `True`"""
pretty_plotting: bool = True
"""Only plot the true trajectory, ignoring the solver state output"""
load_params_if_saved: bool = True
figsize: Tuple[float, float] = (8, 6)
file_extension: str = 'png' # pdf, pgf, png
| 34.438462 | 150 | 0.725262 |
7945b0a79cff6a6072205a46eaa6cc0b77bb872a | 3,749 | py | Python | datasets/lm/tiny_shakespeare/data.py | chetanchougle/chatbot2 | 489d63c2f91b095728ab50f3708b6cf6279f6a20 | [
"MIT"
] | 2 | 2020-12-12T14:21:05.000Z | 2020-12-14T03:08:05.000Z | conciencia/datasets/lm/tiny_shakespeare/data.py | dsapandora/nao_ros_api | 99aece058e43b57e535146a0ee7b917b3eab7a2d | [
"MIT"
] | 1 | 2021-03-19T22:49:56.000Z | 2021-03-19T22:49:56.000Z | conciencia/datasets/lm/tiny_shakespeare/data.py | dsapandora/nao_ros_api | 99aece058e43b57e535146a0ee7b917b3eab7a2d | [
"MIT"
] | null | null | null | EN_WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyz ' # space is included in whitelist
EN_BLACKLIST = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\''
FILENAME = 'input.txt'
VOCAB_SIZE = 8000
SEQ_LEN = 10
import random
import sys
import nltk
import numpy as np
import pickle
'''
read lines from file
return [list of lines]
'''
def read_lines(filename):
content = ''
with open(filename) as f:
for line in f:
if line.strip():
if not line.strip()[-1] == ':':
content += line
return content.split('\n')[:-1]
'''
split sentences in one line
into multiple lines
return [list of lines]
'''
def split_line(line):
return line.split('.')
'''
remove anything that isn't in the vocabulary
return str(pure ta/en)
'''
def filter_line(line, whitelist):
return ''.join([ ch for ch in line if ch in whitelist ])
'''
read list of words, create index to word,
word to index dictionaries
return tuple( vocab->(word, count), idx2w, w2idx )
'''
def index_(tokenized_sentences, vocab_size):
# get frequency distribution
freq_dist = nltk.FreqDist(tokenized_sentences)
# get vocabulary of 'vocab_size' most used words
vocab = freq_dist.most_common(vocab_size)
# index2word
index2word = [ x[0] for x in vocab ]
# word2index
word2index = dict([(w,i) for i,w in enumerate(index2word)] )
return index2word, word2index, freq_dist
def to_array(tokenized, seqlen, w2idx):
num_words = len(tokenized)
# calc data_len
data_len = num_words//seqlen
# create numpy arrays
X = np.zeros([data_len, seqlen])
Y = np.zeros([data_len, seqlen])
# fill in
for i in range(data_len):
X[i] = np.array([ w2idx[w] for w in tokenized[i*seqlen:(i+1)*seqlen] ])
Y[i] = np.array([ w2idx[w] for w in tokenized[(i*seqlen) + 1 : ((i+1)*seqlen) + 1] ])
# return ndarrays
return X.astype(np.int32), Y.astype(np.int32)
def process_data():
print('\n>> Read lines from file')
lines = read_lines(filename=FILENAME)
# change to lower case (just for en)
lines = [ line.lower() for line in lines ]
print('\n:: Sample from read(p) lines')
print(lines[121:125])
# filter out unnecessary characters
print('\n>> Filter lines')
lines = [ filter_line(line, EN_WHITELIST) for line in lines ]
print(lines[121:125])
# convert list of [lines of text] into list of [list of words ]
print('\n>> Segment lines into words')
tokenized = [ w for wordlist in lines for w in wordlist.split(' ') ]
print('\n:: Sample from segmented list of words')
print(tokenized[60])
# indexing -> idx2w, w2idx : en/ta
print('\n >> Index words')
idx2w, w2idx, freq_dist = index_( tokenized, vocab_size=VOCAB_SIZE)
#
# remove unknowns
tokenized = [ w for w in tokenized if w in idx2w ]
#
# convert to ndarray
X, Y = to_array(tokenized, SEQ_LEN, w2idx)
print('\n >> Save numpy arrays to disk')
# save them
np.save('idx_x.npy', X)
np.save('idx_y.npy', Y)
# let us now save the necessary dictionaries
metadata = {
'w2idx' : w2idx,
'idx2w' : idx2w,
'seqlen' : SEQ_LEN,
'freq_dist' : freq_dist
}
# write to disk : data control dictionaries
with open('metadata.pkl', 'wb') as f:
pickle.dump(metadata, f)
def load_data(PATH=''):
# read data control dictionaries
with open(PATH + 'metadata.pkl', 'rb') as f:
metadata = pickle.load(f)
# read numpy arrays
idx_ta = np.load(PATH + 'idx_q.npy')
idx_en = np.load(PATH + 'idx_a.npy')
return metadata, idx_q, idx_a
if __name__ == '__main__':
process_data()
| 24.993333 | 93 | 0.617498 |
7945b187b6e0be1871cb32e04bb7b494e93d85bd | 10,805 | py | Python | _database/models/meetingnotes.py | marcoEDU/HackerspaceWebsiteTemplate | 29621a5f5daef7a8073f368b7d95a1df654c8ba9 | [
"MIT"
] | 9 | 2019-11-04T04:46:08.000Z | 2019-12-29T22:24:38.000Z | _database/models/meetingnotes.py | marcoEDU/HackerspaceWebsiteTemplate | 29621a5f5daef7a8073f368b7d95a1df654c8ba9 | [
"MIT"
] | 27 | 2020-02-17T17:57:00.000Z | 2020-04-23T20:25:44.000Z | _database/models/meetingnotes.py | marcoEDU/HackerspaceWebsiteTemplate | 29621a5f5daef7a8073f368b7d95a1df654c8ba9 | [
"MIT"
] | 4 | 2020-02-17T13:39:18.000Z | 2020-04-12T07:56:45.000Z | from django.db import models
from pyprintplus import Log
from _setup.models import Config
class MeetingNoteSet(models.QuerySet):
def remove_empty_notes(self):
self.filter(text_notes__isnull=True).delete()
print('Deleted all empty notes')
def current(self):
return self.filter(text_notes__isnull=True).order_by('-int_UNIXtime_created').first()
def past(self, older_then=None):
if older_then:
self = self.filter(
text_notes__isnull=False,
int_UNIXtime_created__lt=older_then.int_UNIXtime_created)
return self.filter(text_notes__isnull=False).order_by('-int_UNIXtime_created')
def import_all_from_wiki(self, WIKI_API_URL=Config('BASICS.WIKI.API_URL').value, test=False):
import requests
if not WIKI_API_URL:
Log().print('--> BASICS.WIKI.API_URL not found in config.json -> BASICS - Please add your WIKI_API_URL first.')
return
response_json = requests.get(WIKI_API_URL +
'?action=query&list=categorymembers&cmtitle=Category:Meeting_Notes&cmlimit=500&format=json').json()
all_wiki_pages = [
x['title'] for x in response_json['query']['categorymembers'] if 'Meeting Notes 20' in x['title']]
if test:
all_wiki_pages = all_wiki_pages[:4]
else:
while 'continue' in response_json and 'cmcontinue' in response_json['continue']:
response_json = requests.get(WIKI_API_URL +
'?action=query&list=categorymembers&cmcontinue='+response_json['continue']['cmcontinue']+'&cmtitle=Category:Meeting_Notes&cmlimit=500&format=json').json()
all_wiki_pages += [
x['title'] for x in response_json['query']['categorymembers'] if 'Meeting Notes 20' in x['title']]
for meeting in all_wiki_pages:
MeetingNote().import_from_wiki(meeting, WIKI_API_URL)
print('Imported all meeting notes from wiki')
def LIST__search_results(self):
results_list = []
results = self.all()
for result in results:
results_list.append({
'icon': 'meetingnote',
'name': 'Meeting notes - '+str(result),
'url': '/meeting/'+result.text_date,
'menu_heading': 'menu_h_meetings'
})
return results_list
class MeetingNote(models.Model):
objects = MeetingNoteSet.as_manager()
text_date = models.TextField(blank=True, null=True)
text_notes = models.TextField(blank=True, null=True)
text_main_topics = models.TextField(blank=True, null=True)
many_consensus_items = models.ManyToManyField(
'Consensus', related_name="m_consensus_items", blank=True)
text_keywords = models.TextField(blank=True, null=True)
int_UNIXtime_created = models.IntegerField(blank=True, null=True)
int_UNIXtime_updated = models.IntegerField(blank=True, null=True)
@property
def date(self):
import pytz
from datetime import datetime
from _setup.models import Config
local_timezone = pytz.timezone(
Config('PHYSICAL_SPACE.TIMEZONE_STRING').value)
local_time = datetime.fromtimestamp(
self.int_UNIXtime_created, local_timezone)
return local_time.date()
@property
def str_menu_heading(self):
return 'menu_h_meetings'
@property
def list_main_topics(self):
return self.text_main_topics.split(',') if self.text_main_topics else None
@property
def running_since(self):
import time
# reduce 30 seconds, considering time it takes to create notes
seconds_ago = time.time()-self.int_UNIXtime_created-30
minutes = round(seconds_ago/60)
hours = round(minutes/60) if minutes > 60 else 0
if minutes > 60:
minutes = minutes-(hours*60)
return '{}h {}min'.format(hours, minutes)
def openMeetingNotes(self, riseuppad_meeting_path=Config('MEETINGS.RISEUPPAD_MEETING_PATH').value):
import time
from PyWebScraper import Scraper
browser = Scraper('https://pad.riseup.net/p/' + riseuppad_meeting_path,
scraper_type='selenium', auto_close_selenium=False).selenium
time.sleep(5)
browser.switch_to.frame(browser.find_element_by_name("ace_outer"))
browser.switch_to.frame(browser.find_element_by_name("ace_inner"))
return browser
def start(self,
riseuppad_meeting_path=Config(
'MEETINGS.RISEUPPAD_MEETING_PATH').value,
hackspace_name=Config('BASICS.NAME').value,
timezone=Config('PHYSICAL_SPACE.TIMEZONE_STRING').value
):
print('Starting...')
import os
import sys
import time
from datetime import datetime
import pytz
from _setup.models import Config
from django.template.loader import get_template
browser = self.openMeetingNotes(
riseuppad_meeting_path=riseuppad_meeting_path)
# copy template for new meeting into riseup pad
meeting_template = get_template('meeting_notes_template.txt').render({
'MeetingNumber': MeetingNote.objects.count()+1,
'HackspaceName': hackspace_name,
'Date': str(
datetime.now(pytz.timezone(timezone)).date())
})
input_field = browser.find_element_by_id('innerdocbody')
input_field.clear()
input_field.send_keys(meeting_template)
print('Done: https://pad.riseup.net/p/' + riseuppad_meeting_path)
browser.close()
def end(self, riseuppad_meeting_path=Config('MEETINGS.RISEUPPAD_MEETING_PATH').value):
# save meeting notes
browser = self.openMeetingNotes(
riseuppad_meeting_path=riseuppad_meeting_path)
self.text_notes = browser.find_element_by_id('innerdocbody').text
self.save()
browser.close()
# to do: auto notify via slack
print('Done: Ended & saved meeting')
def STR__get_keywords(self):
import re
keywords = re.findall('#(\w+)', self.text_notes)
keywords = [
x.replace('#', '')
.replace('_', ' ')
for x in keywords if
x != 'summary'
]
filtered = []
for keyword in keywords:
if keyword not in filtered:
filtered.append(keyword)
return ','.join(filtered)
def get_main_topics(self):
import os
import sys
import re
from _setup.models import Config
try:
# find main topics via heading in note template
main_topics = re.findall('(?<==).*', open(os.path.join(
sys.path[0].split('HackspaceOS')[0]+'HackspaceOS', '_database/templates/meeting_notes__'+Config('BASICS.NAME').value+'.txt'), 'r').read())
main_topics = [
x.replace('==', '')
.replace('= ', '')
.replace(' =', '')
.replace('=', '')
.replace('[[', '')
.replace(']]', '')
.strip()
for x in main_topics if
x != ''
and 'Meeting Summary' not in x
and 'End of Meeting' not in x
and 'Discussion Item' not in x
]
return ','.join(main_topics)
except:
return ''
def keyword_add(self, keyword):
if self.text_keywords and keyword != '':
self.text_keywords += ','+keyword
else:
self.text_keywords = keyword
super(MeetingNote, self).save()
print('Saved keyword - '+keyword)
def keyword_remove(self, keyword):
if self.text_keywords and keyword != '':
if self.text_keywords == keyword:
self.text_keywords = ''
else:
self.text_keywords = self.text_keywords.replace(
','+keyword, '').replace(keyword+',', '')
super(MeetingNote, self).save()
print('Removed keyword - '+keyword)
def __str__(self):
if self.text_date:
return self.text_date
else:
return 'New MeetingNote'
def updateCreatedBasedOnName(self):
import time
from datetime import datetime
try:
self.int_UNIXtime_created = int(time.mktime(
datetime.strptime(self.text_date, "%Y-%m-%d").timetuple()))
super(MeetingNote, self).save()
except:
print('Failed for '+self.text_date)
def import_from_local(self):
import os
import sys
self.text_notes = open(os.path.join(
sys.path[0].split('HackspaceOS')[0]+'HackspaceOS', '_database/meeting_notes/'+self.text_date+'.txt'), 'r').read()
self.updateCreatedBasedOnName()
self.save()
def import_from_wiki(self, page, wiki_api_url=Config('BASICS.WIKI.API_URL').value):
import requests
if not wiki_api_url:
Log().print('--> BASICS.WIKI.API_URL not found in config.json -> BASICS - Please add your WIKI_API_URL first.')
return
self.text_date = page.split('Notes ')[1].replace(' ', '-')
# see if notes already exist, else, create
if MeetingNote.objects.filter(text_date=self.text_date).exists() == False:
# remove all links
from bs4 import BeautifulSoup
response_json = requests.get(
wiki_api_url+'?action=parse&page='+page+'&format=json').json()['parse']
soup = BeautifulSoup(str(response_json['text']).replace(
"{\'*\': \'", "").replace("'}", "").replace("\\n", "").replace("\\\'", "\'"), 'html.parser')
for a in soup.findAll('a'):
del a['href']
self.text_notes = str(soup)
self.updateCreatedBasedOnName()
self.save()
print('Imported from wiki - '+self.text_date)
else:
print('Skipped - Already exists. '+self.text_date)
def save(self, *args, **kwargs):
from _database.models import Helper
self = Helper().RESULT__updateTime(self)
if not self.text_date:
self.text_date = str(self.date)
super(MeetingNote, self).save(*args, **kwargs)
if not self.text_main_topics:
self.text_main_topics = self.get_main_topics()
if self.text_notes and not self.text_keywords:
self.text_keywords = self.STR__get_keywords()
else:
self.start()
super(MeetingNote, self).save(*args, **kwargs)
| 36.258389 | 199 | 0.596761 |
7945b1cb2dd12a29e1ff23484e2e0dd55f4b315e | 11,327 | py | Python | tests/integration/states/git.py | jandd/salt | c046a1069e54b953b91dfe67a719f49aed2fa2a8 | [
"Apache-2.0"
] | 1 | 2020-02-22T07:11:24.000Z | 2020-02-22T07:11:24.000Z | tests/integration/states/git.py | jandd/salt | c046a1069e54b953b91dfe67a719f49aed2fa2a8 | [
"Apache-2.0"
] | null | null | null | tests/integration/states/git.py | jandd/salt | c046a1069e54b953b91dfe67a719f49aed2fa2a8 | [
"Apache-2.0"
] | 1 | 2020-02-22T07:11:26.000Z | 2020-02-22T07:11:26.000Z | # -*- coding: utf-8 -*-
'''
Tests for the Git state
'''
# Import python libs
from __future__ import absolute_import
import os
import shutil
import socket
import subprocess
import tempfile
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath, skip_if_binaries_missing
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
@skip_if_binaries_missing('git')
class GitTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
'''
Validate the git state
'''
def setUp(self):
super(GitTest, self).setUp()
self.__domain = 'github.com'
try:
if hasattr(socket, 'setdefaulttimeout'):
# 10 second dns timeout
socket.setdefaulttimeout(10)
socket.gethostbyname(self.__domain)
except socket.error:
msg = 'error resolving {0}, possible network issue?'
self.skipTest(msg.format(self.__domain))
def test_latest(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_with_rev_and_submodules(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev='develop',
target=name,
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_failure(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://youSpelledGitHubWrong.com/saltstack/salt-test-repo.git',
rev='develop',
target=name,
submodules=True
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_empty_dir(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
if not os.path.isdir(name):
os.mkdir(name)
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev='develop',
target=name,
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_unless_no_cwd_issue_6800(self):
'''
cwd=target was being passed to _run_check which blew up if
target dir did not already exist.
'''
name = os.path.join(integration.TMP, 'salt_repo')
if os.path.isdir(name):
shutil.rmtree(name)
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev='develop',
target=name,
unless='test -e {0}'.format(name),
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_numeric_rev(self):
'''
git.latest with numeric revision
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev=0.11,
target=name,
submodules=True,
timeout=120
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_with_local_changes(self):
'''
Ensure that we fail the state when there are local changes and succeed
when force_reset is True.
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
# Clone repo
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
# Make change to LICENSE file.
with salt.utils.fopen(os.path.join(name, 'LICENSE'), 'a') as fp_:
fp_.write('Lorem ipsum dolor blah blah blah....\n')
# Make sure that we now have uncommitted changes
self.assertTrue(self.run_function('git.diff', [name, 'HEAD']))
# Re-run state with force_reset=False, this should fail
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name,
force_reset=False
)
self.assertSaltFalseReturn(ret)
# Now run the state with force_reset=True, this should succeed
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name,
force_reset=True
)
self.assertSaltTrueReturn(ret)
# Make sure that we no longer have uncommitted changes
self.assertFalse(self.run_function('git.diff', [name, 'HEAD']))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_fast_forward(self):
'''
Test running git.latest state a second time after changes have been
made to the remote repo.
'''
def _head(cwd):
return self.run_function('git.rev_parse', [cwd, 'HEAD'])
repo_url = 'https://{0}/saltstack/salt-test-repo.git'.format(self.__domain)
mirror_dir = os.path.join(integration.TMP, 'salt_repo_mirror')
mirror_url = 'file://' + mirror_dir
admin_dir = os.path.join(integration.TMP, 'salt_repo_admin')
clone_dir = os.path.join(integration.TMP, 'salt_repo')
try:
# Mirror the repo
self.run_function('git.clone',
[mirror_dir, repo_url, None, '--mirror'])
# Make sure the directory for the mirror now exists
self.assertTrue(os.path.exists(mirror_dir))
# Clone the mirror twice, once to the admin location and once to
# the clone_dir
ret = self.run_state('git.latest', name=mirror_url, target=admin_dir)
self.assertSaltTrueReturn(ret)
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
self.assertSaltTrueReturn(ret)
# Make a change to the repo by editing the file in the admin copy
# of the repo and committing.
head_pre = _head(admin_dir)
with open(os.path.join(admin_dir, 'LICENSE'), 'a') as fp_:
fp_.write('Hello world!')
self.run_function('git.commit', [admin_dir, 'Added a line', '-a'])
# Make sure HEAD is pointing to a new SHA so we know we properly
# committed our change.
head_post = _head(admin_dir)
self.assertNotEqual(head_pre, head_post)
# Push the change to the mirror
# NOTE: the test will fail if the salt-test-repo's default branch
# is changed.
self.run_function('git.push', [admin_dir, 'origin', 'develop'])
# Re-run the git.latest state on the clone_dir
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
self.assertSaltTrueReturn(ret)
# Make sure that the clone_dir now has the correct SHA
self.assertEqual(head_post, _head(clone_dir))
finally:
for path in (mirror_dir, admin_dir, clone_dir):
shutil.rmtree(path, ignore_errors=True)
def test_present(self):
'''
git.present
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(os.path.join(name, 'HEAD')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_present_failure(self):
'''
git.present
'''
name = os.path.join(integration.TMP, 'salt_repo')
if not os.path.isdir(name):
os.mkdir(name)
try:
fname = os.path.join(name, 'stoptheprocess')
with salt.utils.fopen(fname, 'a') as fh_:
fh_.write('')
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isfile(os.path.join(name, 'HEAD')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_present_empty_dir(self):
'''
git.present
'''
name = os.path.join(integration.TMP, 'salt_repo')
if not os.path.isdir(name):
os.mkdir(name)
try:
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(os.path.join(name, 'HEAD')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_config_set_value_with_space_character(self):
'''
git.config
'''
name = tempfile.mkdtemp(dir=integration.TMP)
self.addCleanup(shutil.rmtree, name, ignore_errors=True)
subprocess.check_call(['git', 'init', '--quiet', name])
ret = self.run_state(
'git.config_set',
name='user.name',
value='foo bar',
repo=name,
**{'global': False})
self.assertSaltTrueReturn(ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(GitTest)
| 33.913174 | 86 | 0.550366 |
7945b2660ea17663f5689179a4713d38b99c03b4 | 6,126 | py | Python | snake.py | KacpiW/snake-game | 798decad6bd8db98c0c4c95b6fa58ecc2be3d7fd | [
"MIT"
] | null | null | null | snake.py | KacpiW/snake-game | 798decad6bd8db98c0c4c95b6fa58ecc2be3d7fd | [
"MIT"
] | null | null | null | snake.py | KacpiW/snake-game | 798decad6bd8db98c0c4c95b6fa58ecc2be3d7fd | [
"MIT"
] | null | null | null | import time
import random
import pygame
from pygame.locals import *
from pygame.sprite import RenderUpdates
SIZE = 40
BACKGROUND_COLOR = (110, 110, 5)
class Apple:
def __init__(self, parent_screen) -> None:
self.apple = pygame.image.load("resources/apple.jpg").convert()
self.parent_screen = parent_screen
self.x = SIZE * 3
self.y = SIZE * 3
def draw(self):
self.parent_screen.blit(self.apple, (self.x, self.y))
pygame.display.flip()
def move(self):
self.x = random.randint(0, 24) * SIZE
self.y = random.randint(0, 19) * SIZE
class Snake:
def __init__(self, parent_screen, length) -> None:
self.length = length
self.parent_screen = parent_screen
self.block = pygame.image.load("resources/block.jpg").convert()
self.x = [SIZE]*length
self.y = [SIZE]*length
self.direction = "down"
def increase_length(self):
self.length += 1
self.x.append(-1)
self.y.append(-1)
def draw(self):
for i in range(self.length):
self.parent_screen.blit(self.block, (self.x[i], self.y[i]))
pygame.display.flip()
def walk(self):
for i in range(self.length-1, 0, -1):
self.x[i] = self.x[i-1]
self.y[i] = self.y[i-1]
if self.direction == "up":
self.y[0] -= SIZE
if self.direction == "down":
self.y[0] += SIZE
if self.direction == "left":
self.x[0] -= SIZE
if self.direction == "right":
self.x[0] += SIZE
self.draw()
def move_left(self):
self.direction = "left"
self.draw()
def move_right(self):
self.direction = "right"
self.draw()
def move_up(self):
self.direction = "up"
self.draw()
def move_down(self):
self.direction = "down"
self.draw()
class Game:
def __init__(self) -> None:
pygame.init()
self.surface = pygame.display.set_mode(size=(1000, 800))
pygame.mixer.init()
self.play_bacground_music()
self.render_background()
self.snake = Snake(self.surface, 7)
self.snake.draw()
self.apple = Apple(self.surface)
self.apple.draw()
def is_collision(self, x1, y1, x2, y2):
if x1 >= x2 and x1 < x2 + SIZE:
if y1 >= y2 and y1 < y2 + SIZE:
return True
return False
def is_beyond_screen(self, x1, y1, x_min, x_max, y_min, y_max):
if x1 < x_min or x1 > x_max or y1 < y_min or y1 > y_max:
return True
return False
def render_background(self):
bg = pygame.image.load("resources/background.jpg")
self.surface.blit(bg, (0, 0))
def play_sound(self, sound):
sound = pygame.mixer.Sound(sound)
pygame.mixer.Sound.play(sound)
def play_bacground_music(self):
pygame.mixer.music.load("resources/bg_music_1.mp3")
pygame.mixer.music.play()
def play(self):
self.render_background()
self.snake.walk()
self.apple.draw()
self.display_score()
pygame.display.flip()
# logic of snake colliding with apple
if self.is_collision(self.snake.x[0], self.snake.y[0], self.apple.x, self.apple.y):
self.play_sound("resources/1_snake_game_resources_ding.mp3")
self.snake.increase_length()
self.apple.move()
# logic of snake colliding with itself
for i in range(3, self.snake.length):
if self.is_collision(self.snake.x[0], self.snake.y[0], self.snake.x[i], self.snake.y[i]):
self.play_sound("resources/1_snake_game_resources_crash.mp3")
raise "GAME OVER"
# logic of snake if colliding with boundry
if self.is_beyond_screen(self.snake.x[0], self.snake.y[0], x_min=0, y_min=0, x_max=1000, y_max=800):
self.play_sound(
"resources/1_snake_game_resources_crash.mp3")
raise "GAME OVER"
def display_score(self):
font = pygame.font.SysFont('arial', 30)
score = font.render(
f"Score: {self.snake.length}", True, (255, 255, 255))
self.surface.blit(score, (800, 10))
def show_game_over(self):
self.render_background()
font = pygame.font.SysFont('arial', 30)
line1 = font.render(
f"Score: {self.snake.length}", True, (255, 255, 255)
)
self.surface.blit(line1, (200, 300))
line2 = font.render(
f"To play again press Enter. To exit press Escape!", True, (
255, 255, 255)
)
self.surface.blit(line2, (200, 350))
pygame.display.flip()
pygame.mixer.music.pause()
def reset(self):
self.snake = Snake(self.surface, 1)
self.apple = Apple(self.surface)
def run(self):
game_over = True
pause = False
while game_over:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
game_over == False
if event.key == K_RETURN:
pygame.mixer.music.unpause()
pause = False
if not pause:
if event.key == K_UP:
self.snake.move_up()
if event.key == K_DOWN:
self.snake.move_down()
if event.key == K_LEFT:
self.snake.move_left()
if event.key == K_RIGHT:
self.snake.move_right()
elif event.type == QUIT:
game_over = False
try:
if not pause:
self.play()
except Exception as e:
self.show_game_over()
pause = True
self.reset()
time.sleep(0.1)
if __name__ == "__main__":
game = Game()
game.run()
| 28.626168 | 108 | 0.535423 |
7945b2724b4d4cde1f4716320d80f92536d7dddb | 6,468 | py | Python | src/ml/PreProcessing/preprocessing.py | Hakan-er/utma | f7cd6253ec894047b460d4df9b43eeb9b109bae2 | [
"MIT"
] | 1 | 2022-02-02T20:36:27.000Z | 2022-02-02T20:36:27.000Z | src/ml/PreProcessing/preprocessing.py | Hakan-er/utma | f7cd6253ec894047b460d4df9b43eeb9b109bae2 | [
"MIT"
] | null | null | null | src/ml/PreProcessing/preprocessing.py | Hakan-er/utma | f7cd6253ec894047b460d4df9b43eeb9b109bae2 | [
"MIT"
] | 1 | 2021-06-17T20:50:41.000Z | 2021-06-17T20:50:41.000Z | from sklearn.model_selection import train_test_split
import pandas as pd
import re
import xlrd
from sklearn import preprocessing
import numpy as np
class PreProcessing():
def __init__(self, path, sheet_name=0):
self.path = path
self.sheet_name = sheet_name
if re.search('.csv$', self.path) is not None:
self.data = pd.read_csv(self.path)
elif re.search('.xlsx$', self.path):
self.data = pd.read_excel(self.path, sheet_name=self.sheet_name, skiprows=range(self.find_skip_rows()))
elif re.search('.json$', self.path):
self.data = pd.read_json(self.path)
elif re.search('.xml$', self.path):
self.data = pd.read_xml(self.path)
elif re.search('.html$', self.path):
self.data = pd.read_html(self.path)
else:
raise Exception("Veri Girişinde Hata")
def set_predicted_column(self, predicted_column):
self.predicted_column = predicted_column
# self.predicted_column = self.data.columns[predicted_column]
return self.predicted_column
def get_column_names(self):
return self.data.columns
def get_label_names(self):
return self.data[self.predicted_column].unique()
def get_data(self):
return np.asarray(self.data)
def sheet_name(self):
sheet_names = list()
book = xlrd.open_workbook(self.path)
for sheet in book.sheets():
sheet_names.append(sheet.name)
return sheet_names
def find_skip_rows(self):
data = pd.read_excel(self.path)
for index, row in data.iterrows():
if row.isnull().any() == False:
return index + 1
def dropping_operations(self):
dropped_columns = []
dropped_columns_locs = []
dropped_columns_data = []
column_counter = 0
for column in self.data.columns:
if len(self.data[column].unique()) == len(self.data[column]):
dropped_columns_data.append(self.data[column])
dropped_columns_locs.append(column_counter)
self.data.drop(column, axis=1, inplace=True)
dropped_columns.append(column)
elif len(self.data[column].unique()) == 1:
dropped_columns_data.append(self.data[column])
dropped_columns_locs.append(column_counter)
self.data.drop(column, axis=1, inplace=True)
dropped_columns.append(column)
column_counter += 1
return dropped_columns, dropped_columns_data, dropped_columns_locs
def reverse_dropping_operations(self, dropped_columns, dropped_columns_data, dropped_columns_locs):
for column_name, column_data, column_loc in zip(dropped_columns, dropped_columns_data, dropped_columns_locs):
self.data.insert(column_loc, column_name, column_data)
return self.data
def label_encoding(self):
changed_columns = []
columns_data = []
column_counter = 0
self.le = preprocessing.LabelEncoder()
dataTypeSeries = self.data.dtypes
for datatype, column in zip(dataTypeSeries, self.data.columns):
if datatype == "object":
changed_columns.append(column)
columns_data.append(self.data[column])
self.data[column] = self.le.fit_transform(self.data[column])
column_counter += 1
return changed_columns, columns_data
def reverse_label_encoding(self, changed_columns, columns_data):
counter = 0
for column in changed_columns:
self.data = self.data.assign(new=columns_data[counter])
self.data[column] = self.data["new"]
self.data.drop(columns=['new'], inplace=True)
counter += 1
return np.asarray(self.data)
def number_of_records(self):
self.count_row = self.data.shape[0]
return self.count_row
def fill_missing_values(self, categorical_columns):
# categoricals=self.label_encoding()
for column in self.data.columns:
null_count = (self.data[column].isnull().sum() * 100)
if self.data[column].count() == 0:
self.data.drop(column, axis=1, inplace=True)
continue
null_vs_count = (self.data[column].isnull().sum() * 100) / (self.data[column].count())
if (null_vs_count) < 79 and (null_vs_count) > 0:
if column in categorical_columns:
self.data[column] = self.data[column].fillna(self.data[column].value_counts().index[0])
else:
self.data.fillna(self.data.mean(), inplace=True)
elif (null_vs_count) > 95:
self.data.drop(column, axis=1, inplace=True)
def min_max_scaling(self, X_train, X_test=None):
self.min_max_scaler = preprocessing.MinMaxScaler()
X_train_minmax = self.min_max_scaler.fit_transform(X_train)
if X_test is not None:
X_test_minmax = self.min_max_scaler.transform(X_test)
else:
X_test_minmax = None
return X_train_minmax, X_test_minmax
def reverse_min_max_scaling(self):
return self.min_max_scaler.inverse_transform(self.data)
def GaussianTranformation(self, X_train, X_test):
pt = preprocessing.PowerTransformer(method='box-cox', standardize=True)
return pt.fit_transform(X_train), pt.fit_transform(X_test)
def Normalization(self, X_train, X_test):
X_normalized_train = preprocessing.normalize(X_train, norm='l2')
X_normalized_train = preprocessing.normalize(X_test, norm='l2')
return X_normalized_train, X_normalized_train
def train_split_test(self, supplied_test_set=None, percentage_split=0.2, train_test_splitt=True):
x = self.data.drop(self.predicted_column, axis=1).values
y = self.data[self.predicted_column].values
if train_test_splitt:
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=percentage_split)
else:
y_train = y
X_train = x
supplied_test_set = pd.read_csv(supplied_test_set)
predicted_column = supplied_test_set.columns[self.predicted_column]
y_test = supplied_test_set[self.predicted_column]
X_test = supplied_test_set.drop[self.predicted_column]
return X_train, X_test, y_train, y_test
| 41.729032 | 117 | 0.641775 |
7945b42bf3d0c9dbe874a27be94e6bdc92cd1452 | 6,202 | py | Python | pinax/projects/social_project/apps/friends_app/views.py | yulka/pinax | 025113cbd1e36eefae375493569bb8817c7095fe | [
"MIT"
] | 1 | 2016-01-07T01:50:40.000Z | 2016-01-07T01:50:40.000Z | pinax/projects/social_project/apps/friends_app/views.py | yulka/pinax | 025113cbd1e36eefae375493569bb8817c7095fe | [
"MIT"
] | null | null | null | pinax/projects/social_project/apps/friends_app/views.py | yulka/pinax | 025113cbd1e36eefae375493569bb8817c7095fe | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from friends.models import *
from friends.forms import JoinRequestForm
from friends_app.forms import ImportVCardForm
from account.forms import SignupForm
from friends.importer import import_yahoo, import_google
# @@@ if made more generic these could be moved to django-friends proper
def friends(request, form_class=JoinRequestForm,
template_name="friends_app/invitations.html"):
if request.method == "POST":
invitation_id = request.POST.get("invitation", None)
if request.POST["action"] == "accept":
try:
invitation = FriendshipInvitation.objects.get(id=invitation_id)
if invitation.to_user == request.user:
invitation.accept()
request.user.message_set.create(message=_("Accepted friendship request from %(from_user)s") % {'from_user': invitation.from_user})
except FriendshipInvitation.DoesNotExist:
pass
join_request_form = form_class()
elif request.POST["action"] == "invite": # invite to join
join_request_form = form_class(request.POST)
if join_request_form.is_valid():
join_request_form.save(request.user)
join_request_form = form_class() # @@@
elif request.POST["action"] == "decline":
try:
invitation = FriendshipInvitation.objects.get(id=invitation_id)
if invitation.to_user == request.user:
invitation.decline()
request.user.message_set.create(message=_("Declined friendship request from %(from_user)s") % {'from_user': invitation.from_user})
except FriendshipInvitation.DoesNotExist:
pass
join_request_form = form_class()
else:
join_request_form = form_class()
invites_received = request.user.invitations_to.invitations().order_by("-sent")
invites_sent = request.user.invitations_from.invitations().order_by("-sent")
joins_sent = request.user.join_from.all().order_by("-sent")
return render_to_response(template_name, {
"join_request_form": join_request_form,
"invites_received": invites_received,
"invites_sent": invites_sent,
"joins_sent": joins_sent,
}, context_instance=RequestContext(request))
friends = login_required(friends)
def accept_join(request, confirmation_key, form_class=SignupForm,
template_name="account/signup.html"):
join_invitation = get_object_or_404(JoinInvitation, confirmation_key = confirmation_key.lower())
if request.user.is_authenticated():
return render_to_response("account/signup.html", {
}, context_instance=RequestContext(request))
else:
form = form_class(initial={"email": join_invitation.contact.email, "confirmation_key": join_invitation.confirmation_key })
return render_to_response(template_name, {
"form": form,
}, context_instance=RequestContext(request))
def contacts(request, form_class=ImportVCardForm,
template_name="friends_app/contacts.html"):
if request.method == "POST":
if request.POST["action"] == "upload_vcard":
import_vcard_form = form_class(request.POST, request.FILES)
if import_vcard_form.is_valid():
imported, total = import_vcard_form.save(request.user)
request.user.message_set.create(message=_("%(total)s vCards found, %(imported)s contacts imported.") % {'imported': imported, 'total': total})
import_vcard_form = ImportVCardForm()
else:
import_vcard_form = form_class()
if request.POST["action"] == "import_yahoo":
bbauth_token = request.session.get('bbauth_token')
del request.session['bbauth_token']
if bbauth_token:
imported, total = import_yahoo(bbauth_token, request.user)
request.user.message_set.create(message=_("%(total)s people with email found, %(imported)s contacts imported.") % {'imported': imported, 'total': total})
if request.POST["action"] == "import_google":
authsub_token = request.session.get('authsub_token')
del request.session['authsub_token']
if authsub_token:
imported, total = import_google(authsub_token, request.user)
request.user.message_set.create(message=_("%(total)s people with email found, %(imported)s contacts imported.") % {'imported': imported, 'total': total})
else:
import_vcard_form = form_class()
return render_to_response(template_name, {
"import_vcard_form": import_vcard_form,
"bbauth_token": request.session.get('bbauth_token'),
"authsub_token": request.session.get('authsub_token'),
}, context_instance=RequestContext(request))
contacts = login_required(contacts)
def friends_objects(request, template_name, friends_objects_function, extra_context={}):
"""
Display friends' objects.
This view takes a template name and a function. The function should
take an iterator over users and return an iterator over objects
belonging to those users. This iterator over objects is then passed
to the template of the given name as ``object_list``.
The template is also passed variable defined in ``extra_context``
which should be a dictionary of variable names to functions taking a
request object and returning the value for that variable.
"""
friends = friend_set_for(request.user)
dictionary = {
"object_list": friends_objects_function(friends),
}
for name, func in extra_context.items():
dictionary[name] = func(request)
return render_to_response(template_name, dictionary, context_instance=RequestContext(request))
friends_objects = login_required(friends_objects)
| 48.834646 | 173 | 0.678168 |
7945b47c22f0756fd6bc24d9db798d74cd9ab7c2 | 9,456 | py | Python | utils/tests/test_database_update_and_cron.py | rafsaf/Plemiona_Planer | 1a0e2da0c4b18f1abd1df876f688c6442cba17ce | [
"Apache-2.0"
] | 2 | 2020-11-15T12:50:07.000Z | 2020-11-17T21:54:54.000Z | utils/tests/test_database_update_and_cron.py | rafsaf/Plemiona_Planer | 1a0e2da0c4b18f1abd1df876f688c6442cba17ce | [
"Apache-2.0"
] | 21 | 2021-11-01T14:04:19.000Z | 2022-03-25T06:31:03.000Z | utils/tests/test_database_update_and_cron.py | rafsaf/Tribal-Wars-Planer | 083af9b1efe814be3abe975b9ac8faccc00ebb09 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Rafał Safin (rafsaf). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from pathlib import Path
from unittest.mock import patch
import requests
import requests_mock
from base.models import Player, Tribe, VillageModel, World
from base.tests.test_utils.mini_setup import MiniSetup
from utils.database_update import WorldUpdateHandler
GET_CONFIG = Path("utils/tests/database_update/get_config.xml").read_text()
GET_UNIT_INFO = Path("utils/tests/database_update/get_unit_info.xml").read_text()
class WorldUpdateHandlerTest(MiniSetup):
def setUp(self):
super().setUp()
self.world = self.get_world(save=False)
def test_already_added(self):
world = self.world
world.save()
world_query = WorldUpdateHandler(world=world)
assert world_query.check_if_world_exist_and_try_create() == (None, "added")
def test_connection_error_get_config(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
with requests_mock.Mocker() as mock:
mock.get(
world.link_to_game("/interface.php?func=get_config"),
exc=requests.exceptions.ConnectionError,
)
assert world_query.check_if_world_exist_and_try_create() == (None, "error")
def test_connection_bad_status_get_config(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
with requests_mock.Mocker() as mock:
mock.get(
world.link_to_game("/interface.php?func=get_config"), status_code=400
)
assert world_query.check_if_world_exist_and_try_create() == (None, "error")
def test_connection_redirect_get_config(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
with requests_mock.Mocker() as mock:
mock.get(
world.link_to_game("/interface.php?func=get_config"),
[
{
"text": GET_CONFIG,
"status_code": 300,
},
{
"text": GET_CONFIG,
"status_code": 200,
},
],
)
assert world_query.check_if_world_exist_and_try_create() == (None, "error")
def test_connection_error_get_unit_info(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
with requests_mock.Mocker() as mock:
mock.get(
world.link_to_game("/interface.php?func=get_config"),
[
{
"text": GET_CONFIG,
"status_code": 200,
}
],
)
mock.get(
world.link_to_game("/interface.php?func=get_unit_info"),
exc=requests.exceptions.ConnectionError,
)
assert world_query.check_if_world_exist_and_try_create() == (None, "error")
def test_connection_bad_status_get_unit_info(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
with requests_mock.Mocker() as mock:
mock.get(
world.link_to_game("/interface.php?func=get_config"),
[
{
"text": GET_CONFIG,
"status_code": 200,
}
],
)
mock.get(
world.link_to_game("/interface.php?func=get_unit_info"),
[
{
"text": GET_UNIT_INFO,
"status_code": 404,
}
],
)
assert world_query.check_if_world_exist_and_try_create() == (None, "error")
def test_connection_redirect_get_unit_info(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
with requests_mock.Mocker() as mock:
mock.get(
world.link_to_game("/interface.php?func=get_config"),
[
{
"text": GET_CONFIG,
"status_code": 200,
}
],
)
mock.get(
world.link_to_game("/interface.php?func=get_unit_info"),
[
{
"text": GET_UNIT_INFO,
"status_code": 300,
},
{
"text": GET_UNIT_INFO,
"status_code": 200,
},
],
)
assert world_query.check_if_world_exist_and_try_create() == (None, "error")
def test_works_ok(self):
world = self.world
world_query = WorldUpdateHandler(world=world)
assert world_query.world.paladin == "inactive"
assert world_query.world.archer == "inactive"
assert world_query.world.militia == "inactive"
with requests_mock.Mocker() as mock:
mock.get(
self.world.link_to_game("/interface.php?func=get_config"),
[
{
"text": GET_CONFIG,
"status_code": 200,
},
],
)
mock.get(
self.world.link_to_game("/interface.php?func=get_unit_info"),
[
{
"text": GET_UNIT_INFO,
"status_code": 200,
},
],
)
assert world_query.check_if_world_exist_and_try_create() == (
world_query.world,
"success",
)
world_query.world.refresh_from_db()
assert world_query.world.speed_world == 1.6
assert world_query.world.speed_units == 0.625
assert world_query.world.paladin == "active"
assert world_query.world.archer == "active"
assert world_query.world.militia == "active"
def test_if_world_if_archived1(self):
world = self.world
world.save()
world.refresh_from_db()
world_query = WorldUpdateHandler(world=world)
world_query.check_if_world_is_archived("https://example.com/archive/nt1")
assert not World.objects.filter(pk=world.pk).exists()
def test_if_world_if_archived2(self):
world = self.world
world.save()
world.refresh_from_db()
world_query = WorldUpdateHandler(world=world)
world_query.check_if_world_is_archived("https://example.com/archive/nope")
assert World.objects.filter(pk=world.pk).exists()
assert world_query.world.connection_errors == 1
def test_last_modified_timestamp(self):
datetime = "Sat, 08 Jan 2022 13:48:44 GMT"
assert WorldUpdateHandler.last_modified_timestamp(datetime) == 1641649724
@patch("time.sleep", return_value=None)
def test_db_update_cron_job(self, patched_time_sleep):
TRIBES = Path("utils/tests/database_update/ally.txt.gz").read_bytes()
PLAYERS = Path("utils/tests/database_update/player.txt.gz").read_bytes()
VILLAGES = Path("utils/tests/database_update/village.txt.gz").read_bytes()
self.world.save()
with requests_mock.Mocker() as mock:
world_query = WorldUpdateHandler(world=self.world)
mock.get(
world_query.world.link_to_game("/map/player.txt.gz"),
content=PLAYERS,
headers={
"etag": "12345",
"last-modified": "Sun, 08 May 2022 06:15:20 GMT",
},
)
mock.get(
world_query.world.link_to_game("/map/ally.txt.gz"),
content=TRIBES,
headers={
"etag": "12345",
"last-modified": "Sun, 08 May 2022 06:15:20 GMT",
},
)
mock.get(
world_query.world.link_to_game("/map/village.txt.gz"),
content=VILLAGES,
headers={
"etag": "12345",
"last-modified": "Sun, 08 May 2022 06:15:20 GMT",
},
)
world_query.update_all()
self.world.refresh_from_db()
date1 = self.world.last_update
assert VillageModel.objects.count() == 38219
assert Player.objects.count() == 10234
assert Tribe.objects.count() == 534
world_query.update_all()
self.world.refresh_from_db()
date2 = self.world.last_update
assert date2 > date1
| 37.975904 | 87 | 0.542513 |
7945b4cbc5736bd4ae8f83beebb6efb5a981ee92 | 6,226 | py | Python | __init__.py | yuntai-dev/blender-test | cf8b2fabe8ad3c5edd2b81b1191d01dfd06c993f | [
"MIT"
] | null | null | null | __init__.py | yuntai-dev/blender-test | cf8b2fabe8ad3c5edd2b81b1191d01dfd06c993f | [
"MIT"
] | null | null | null | __init__.py | yuntai-dev/blender-test | cf8b2fabe8ad3c5edd2b81b1191d01dfd06c993f | [
"MIT"
] | null | null | null | #######################
# ### bpy imports ### #
import bpy
from bpy.utils import register_class, unregister_class
# ### base class ### #
from bpy.types import Operator, Panel, Context
# ### prop types ### #
from bpy.props import StringProperty, EnumProperty, CollectionProperty
# ### prop group ### #
from bpy.types import PropertyGroup, Scene
#######################
# ### Add_on info ### #
bl_info = {
"category": "Test",
"name": "Test",
"author": "aneCrow",
"version": (0, 0, 1),
"blender": (2, 92, 0),
"description": "",
"warning": "demo test",
}
# 主面板
class TEST_PT_main(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "TEST"
bl_label = "Test"
def draw(self, context: Context): # 绘制函数
# UI绘制类中禁止直接创建和修改对象的行为
# 尽量以展示为主,将破坏性动作转至operator类执行
seleted_objs = context.selected_objects
layout = self.layout
if len(seleted_objs) == 0:
layout.label(text="选择一个对象")
# 次级面板
class sub_Panel(Panel): # 次级面板基类
bl_space_type = TEST_PT_main.bl_space_type
bl_region_type = TEST_PT_main.bl_region_type
bl_parent_id = TEST_PT_main.__name__
# 静态函数/类函数,第一个参数指向类本身
@classmethod # 静态函数装饰器
def poll(cls, context): # 检查函数
seleted_objs = context.selected_objects
check = len(seleted_objs) > 0
return check # 返回值为空(含False)时,阻止本类实执行和实例化
# 实例函数,实例化前不可用,第一个参数指向类实例
def get_selected_objects_collection(self, context: Context):
"""获取已选object对应collection列表"""
from .utils import get_objects_collection
selected_objects = context.selected_objects
obj_coll_list = get_objects_collection(context, selected_objects)
# 只取其中的collection项
obj_coll_list = [obj_coll[1] for obj_coll in obj_coll_list]
return set(obj_coll_list) # 利用set特性去重
class TEST_PT_selected(sub_Panel): # 继承基类并实现具体功能
bl_label = "selected_objects"
def draw(self, context: Context):
seleted_objs = context.selected_objects
layout = self.layout
for obj in seleted_objs:
layout.label(text=obj.name)
class TEST_PT_collections(sub_Panel):
bl_label = "collections"
def draw(self, context: Context):
collections = self.get_selected_objects_collection(context)
layout = self.layout
# 利用列表解析式执行单行语句
[layout.label(text=coll.name) for coll in collections]
# 功能面板01
# operator传参方式
class TEST_PT_op01(sub_Panel):
bl_label = "operator01"
bl_options = {"DEFAULT_CLOSED"}
def draw(self, context: Context):
active_obj = context.active_object
seleted_objs = context.selected_objects
root_collection = context.scene.collection
# 筛选得到当前激活的object对应colletion
active_obj_colls = self.get_activeObj_collections(active_obj, root_collection)
active_obj_coll = active_obj_colls[0] # 只取第一项,有多个匹配时剩下的会被忽略
layout = self.layout
layout.label(text="已选对象:%d" % len(seleted_objs)) # %运算符
layout.prop(active_obj_coll, "name", text="目标容器")
op = layout.operator(MoveObjectToCollection.bl_idname)
# 向operator传递参数
op.target_name = active_obj_coll.name
for obj in seleted_objs:
# CollectionProperty类型使用add()添加并返回该元素
op.obj_names.add().name = obj.name
def get_activeObj_collections(self, active_object, root_collection):
from .utils import nested_object_generator
return [
collection
for collection in nested_object_generator(root_collection, "children")
if active_object.name in collection.objects
]
class Prop_Name_Group(PropertyGroup): # 自定义参数类型
name: StringProperty()
class MoveObjectToCollection(Operator):
"""移动对象至容器"""
bl_idname = "test.move"
bl_label = "移动至容器"
bl_options = {"REGISTER", "UNDO"} # 允许撤销
# ### prop ### #
target_name: StringProperty()
obj_names: CollectionProperty(type=Prop_Name_Group) # type必须为已注册的property类
# ### 待传参数 ### #
def check_erro(self): # 类型检查
check = [
self.target_name == "",
len(self.obj_names) == 0,
]
return True in check # 或逻辑
def execute(self, context: Context):
from .utils import objects_move_collection
if self.check_erro():
return {"CANCELLED"} # 取消执行
# 使用传入的名称获取对应实例对象
data = bpy.data
objs = [data.objects[item.name] for item in self.obj_names]
target = (
context.scene.collection
if self.target_name == "Master Collection"
else data.collections[self.target_name]
)
objects_move_collection(context, objs, target)
return {"FINISHED"}
# 功能面板02
# 公共数据管理与动态枚举列表
class TEST_PT_op02(sub_Panel):
bl_label = "operator02"
bl_options = {"DEFAULT_CLOSED"}
def draw(self, context: Context):
seleted_objs = context.selected_objects
target_name = context.scene.test_enum
layout = self.layout
layout.label(text="已选对象:%d" % len(seleted_objs))
layout.prop(context.scene, "test_enum") # 储存在scene实例中的自定义数据
op = layout.operator(MoveObjectToCollection.bl_idname)
op.target_name = target_name
for obj in seleted_objs:
op.obj_names.add().name = obj.name
def enum_items_collection(self, context: Context): # 动态解析enum枚举回调
from .utils import filter_current_collections
collections = filter_current_collections(context)
return [(x.name, x.name, "") for x in collections]
# ################################# #
# ### END for register function ### #
classes = [
# base
TEST_PT_main,
TEST_PT_selected,
TEST_PT_collections,
# op 01
Prop_Name_Group,
MoveObjectToCollection,
TEST_PT_op01,
# op 02
TEST_PT_op02,
]
def register():
[register_class(i) for i in classes]
# op 02 公共变量储存
Scene.test_enum = EnumProperty(items=enum_items_collection, name="目标容器")
# 所有的Scene类实例都会被注入test_enum属性,例 context.scene.test_enum
def unregister():
[unregister_class(i) for i in classes]
# ################ #
# ### __test__ ### #
if __name__ == "__main__":
register()
| 26.381356 | 86 | 0.644555 |
7945b5b601434b11bde8e46666a337148c16f001 | 1,078 | py | Python | c2_python-operating-system/2_managing-files-with-python/graded-assessment/scripts/generate_report.py | chaiyeow/google-it-automation | 3f02d739178abd5805114cfc027120b8d6ea81c8 | [
"MIT"
] | 220 | 2020-09-23T21:17:49.000Z | 2022-03-30T18:46:07.000Z | c2_python-operating-system/2_managing-files-with-python/graded-assessment/scripts/generate_report.py | chaiyeow/google-it-automation | 3f02d739178abd5805114cfc027120b8d6ea81c8 | [
"MIT"
] | 4 | 2020-10-18T11:08:49.000Z | 2021-05-31T10:32:00.000Z | c2_python-operating-system/2_managing-files-with-python/graded-assessment/scripts/generate_report.py | chaiyeow/google-it-automation | 3f02d739178abd5805114cfc027120b8d6ea81c8 | [
"MIT"
] | 366 | 2020-09-22T18:05:01.000Z | 2022-03-31T17:47:17.000Z | #!/usr/bin/env python3
import csv
def read_employees(csv_file_location):
csv.register_dialect('empDialect', skipinitialspace=True, strict=True)
employee_file = csv.DictReader(open(csv_file_location), dialect = 'empDialect')
employee_list = []
for data in employee_file:
employee_list.append(data)
return employee_list
def process_data(employee_list):
department_list = []
for employee_data in employee_list:
department_list.append(employee_data['Department'])
department_data = {}
for department_name in set(department_list):
department_data[department_name] = department_list.count(department_name)
return department_data
def write_report(dictionary, report_file):
with open(report_file, "w+") as f:
for k in sorted(dictionary):
f.write(str(k)+':'+str(dictionary[k])+'\n')
f.close()
employee_list = read_employees('/home/student-00-3ab4479a1342/data/employees.csv')
print(employee_list)
dictionary = process_data(employee_list)
print(dictionary)
write_report(dictionary, '/home/student-00-3ab4479a1342/data/report.txt') | 30.8 | 82 | 0.764378 |
7945b8aa30e20aae761b3d33d0c289e01ff9615a | 593 | py | Python | apps/fyle/migrations/0011_auto_20211203_1156.py | fylein/fyle-intacct-api | 16e45538ec3a2b7af396742a42302704c33a7bd7 | [
"MIT"
] | null | null | null | apps/fyle/migrations/0011_auto_20211203_1156.py | fylein/fyle-intacct-api | 16e45538ec3a2b7af396742a42302704c33a7bd7 | [
"MIT"
] | 3 | 2020-07-20T10:54:15.000Z | 2022-02-23T17:13:49.000Z | apps/fyle/migrations/0011_auto_20211203_1156.py | fylein/fyle-intacct-api | 16e45538ec3a2b7af396742a42302704c33a7bd7 | [
"MIT"
] | 2 | 2020-07-25T14:50:56.000Z | 2020-08-02T13:48:37.000Z | # Generated by Django 3.1.13 on 2021-12-03 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fyle', '0010_auto_20211001_0525'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='custom_properties',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='expensegroup',
name='description',
field=models.JSONField(help_text='Description', max_length=255, null=True),
),
]
| 24.708333 | 87 | 0.598651 |
7945b91669618e82ec5b9854ca4f13f04072d3bf | 124 | py | Python | toolbelt/__init__.py | kadrlica/toolbelt | b616dcee76b38656e63f0b69bf88404970a5f6c6 | [
"MIT"
] | null | null | null | toolbelt/__init__.py | kadrlica/toolbelt | b616dcee76b38656e63f0b69bf88404970a5f6c6 | [
"MIT"
] | null | null | null | toolbelt/__init__.py | kadrlica/toolbelt | b616dcee76b38656e63f0b69bf88404970a5f6c6 | [
"MIT"
] | null | null | null | # Nothing much to see here....
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 20.666667 | 39 | 0.766129 |
7945b9dbf333716c773db4295def1bb72cb04108 | 10,339 | py | Python | model/training.py | n-fallahinia/realtime-force-estimation | f4718f4b7f011c1b7dba1e57bd4151c4de67a6dd | [
"Apache-2.0"
] | 2 | 2020-08-26T01:34:40.000Z | 2020-08-26T01:34:42.000Z | model/training.py | n-fallahinia/realtime-force-estimation | f4718f4b7f011c1b7dba1e57bd4151c4de67a6dd | [
"Apache-2.0"
] | null | null | null | model/training.py | n-fallahinia/realtime-force-estimation | f4718f4b7f011c1b7dba1e57bd4151c4de67a6dd | [
"Apache-2.0"
] | null | null | null | """Tensorflow utility functions for training"""
import os
import datetime
import time
from tqdm import tqdm
import tensorflow as tf
import numpy as np
from time import sleep
from model.utils.utils import save_dict_to_json
class Train_and_Evaluate():
def __init__(self, train_model_spec, train_ds, eval_ds, log_dir):
self.train_model_spec = train_model_spec
self.train_ds = train_ds
self.eval_ds = eval_ds
self.log_dir = log_dir
# Get relevant graph operations or nodes needed for training
self.model = self.train_model_spec['model']
self.loss_object = self.train_model_spec['loss']
self.opt = self.train_model_spec['opt']
self.metrics = train_model_spec['metrics']
self.train_loss = self.metrics['train_loss']
self.train_accuracy_rmse = self.metrics['train_RMSE']
self.train_accuracy_mse = self.metrics['train_MSE']
self.train_accuracy_mae = self.metrics['train_MAE']
self.test_loss = self.metrics['test_loss']
self.test_accuracy = self.metrics['test_accuracy']
# @tf.function
def train_step(self, x_train, y_train):
"""Train the model on batches
Args:
model_spec: (dict) contains the graph operations or nodes needed for training
writer: (tf.summary.FileWriter) writer for summaries
x_train: training images
y_train: training measured forces
"""
# keep track of our gradients
with tf.GradientTape() as tape:
# make a prediction using the model and then calculate the loss
logits = self.model(x_train, training=True)
loss = self.loss_object(y_train, logits)
# calculate the gradients using our tape and then update the model weights
grads = tape.gradient(loss, self.model.trainable_variables)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
# write metices to writer for summary use
self.train_accuracy_rmse.update_state(y_train, logits)
self.train_accuracy_mse.update_state(y_train, logits)
self.train_accuracy_mae.update_state(y_train, logits)
self.train_loss.update_state(loss)
return loss
@tf.function
def test_step(self, x_test, y_test):
"""Testing the model on batches
Args:
model_spec: (dict) contains the graph operations or nodes needed for training
x_train: training images
y_train: training measured forces
"""
y_test_pred = self.model(x_test, training=False)
loss = self.loss_object(y_test, y_test_pred)
# write metices to writer for summary use
self.test_accuracy.update_state(y_test, y_test_pred)
self.test_loss.update_state(loss)
return loss
def train_and_eval(self, params, restore_from=None):
"""Train the model and evaluate every epoch.
Args:
train_model_spec: (dict) contains the graph operations or nodes needed for training
params: (Params) contains hyperparameters of the model.
Must define: num_epochs, train_size, batch_size, eval_size, save_summary_steps
train_ds: training dataset
eval_ds: evaluation dataset
log_dir: directory for log
restore_from: (string) directory or file containing weights to restore the graph
"""
# set up the train summary writer
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = os.path.join(self.log_dir, current_time , 'train_summaries')
eval_log_dir = os.path.join(self.log_dir, current_time , 'eval_summaries')
checkpoint_dir = os.path.join(self.log_dir, current_time, "training_checkpoints", 'ckpt')
model_dir = os.path.join(self.log_dir, current_time)
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
eval_summary_writer = tf.summary.create_file_writer(eval_log_dir)
begin_at_epoch = 0
best_eval_acc = 100.0
# Reload weights from directory if specified
if restore_from is not None:
print("[INFO] Restoring parameters from {}".format(restore_from))
if os.path.isdir(restore_from):
reconstructed_model = os.path.join(restore_from, "model_{0:d}".format(params.restore_from_epoch))
self.model = keras.models.load_model(reconstructed_model)
# TRAINING MAIN LOOP
# ----------------------------------------------------------------------
print("[INFO] training started ...")
# loop over the number of epochs
epochStart = time.time()
for epoch in range(begin_at_epoch, begin_at_epoch + params.num_epochs):
# sys.stdout.flush()
# Compute number of batches in one epoch (one full pass over the training set)
num_steps_train = int(np.ceil(params.train_size / params.batch_size))
num_steps_eval = int(np.ceil(params.eval_size / params.batch_size))
# Use tqdm for progress bar
with tqdm(total=num_steps_train, desc="[INFO] Epoch {0:d}".format(epoch + 1)) as pbar:
# loop over the data in batch size increments
# ----------------------------------------------------------------------
# TRAIN SESSION
for x_train, y_train in self.train_ds.take(num_steps_train):
train_loss = self.train_step(x_train, y_train)
# Log the loss in the tqdm progress bar
sleep(0.1)
# Display metrics at the end of each epoch.
metrics = {
"Train_RMSE": '{:04.2f}'.format(self.train_accuracy_rmse.result().numpy()),
# "Train_MSE": '{:04.2f}'.format(self.train_accuracy_mse.result().numpy()),
# "Train_MAE": '{:04.2f}'.format(self.train_accuracy_mae.result().numpy()),
"Train_Loss": '{:04.2f}'.format(self.train_loss.result().numpy())
}
pbar.set_postfix(metrics)
pbar.update()
# record train summary for tensor board
with train_summary_writer.as_default():
tf.summary.scalar('loss', self.train_loss.result(), step=epoch + 1)
tf.summary.scalar('rmse', self.train_accuracy_rmse.result(), step=epoch + 1)
tf.summary.scalar('mse', self.train_accuracy_mse.result(), step=epoch + 1)
tf.summary.scalar('mae', self.train_accuracy_mae.result(), step=epoch + 1)
tf.summary.image('training images', x_train, step=epoch + 1, max_outputs=10)
# tf.summary.trace_export(name="test_step_trace", step=epoch, profiler_outdir=train_log_dir)
# ----------------------------------------------------------------------
# EVALUATION SESSION
# loop over the eval data in batch size increments
for x_eval, y_eval in self.eval_ds.take(num_steps_eval):
eval_loss = self.test_step(x_eval, y_eval)
# Display metrics at the end of each epoch.
metrics["Eval_Accuracy"] = '{:04.2f}'.format(self.test_accuracy.result().numpy())
metrics["Eval_Loss"] = '{:04.2f}'.format(self.test_loss.result().numpy())
pbar.set_postfix(metrics)
pbar.close()
# record train summary for tensor board
with eval_summary_writer.as_default():
tf.summary.scalar('loss', self.test_loss.result(), step=epoch + 1)
tf.summary.scalar('accuracy', self.test_accuracy.result(), step=epoch + 1)
# ----------------------------------------------------------------------
metrics["Epoch"] = '{0:d}'.format(epoch + 1)
# If best_eval, save the model at best_save_path
eval_acc = self.test_accuracy.result().numpy()
if params.save_model:
if eval_acc <= best_eval_acc:
# Store new best accuracy
best_eval_acc = eval_acc
# Save weights
best_save_path = os.path.join(model_dir, "model_{0:d}".format(epoch + 1))
tf.keras.models.save_model(self.model, best_save_path, save_format = "h5")
print("[INFO] Found new best accuracy, saving in {}".format(best_save_path))
# Save best eval metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_eval_best_weights.json")
save_dict_to_json(metrics, best_json_path)
# Save latest eval metrics in a json file in the model directory
last_json_path = os.path.join(model_dir, "metrics_eval_last_weights.json")
save_dict_to_json(metrics, last_json_path)
# ----------------------------------------------------------------------
# Reset training metrics at the end of each epoch
self.train_loss.reset_states()
self.train_accuracy_rmse.reset_states()
self.train_accuracy_mse.reset_states()
self.train_accuracy_mae.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
# end of train and eval
# show timing information for the epoch
epochEnd = time.time()
elapsed = (epochEnd - epochStart) / 60.0
print("[INFO] Took {:.4} minutes".format(elapsed))
# ----------------------------------------------------------------------
if params.save_model:
reconstructed_best_model = tf.keras.models.load_model(best_save_path)
reconstructed_best_model.compile(optimizer= self.opt, loss= self.loss_object)
best_final_path = os.path.join(model_dir, "best_full_model_path")
tf.saved_model.save(reconstructed_best_model, best_final_path)
print("[INFO] Final model save in {}".format(best_final_path))
print("[INFO] Training done and log saved in {} ".format(model_dir))
| 50.681373 | 113 | 0.595029 |
7945ba09a5b6713f91a2d3c20bc815a55e0213ed | 18,846 | py | Python | leo/plugins/xsltWithNodes.py | frakel/leo-editor | b574118ee3b7ffe8344fa0d00dac603096117ac7 | [
"MIT"
] | null | null | null | leo/plugins/xsltWithNodes.py | frakel/leo-editor | b574118ee3b7ffe8344fa0d00dac603096117ac7 | [
"MIT"
] | null | null | null | leo/plugins/xsltWithNodes.py | frakel/leo-editor | b574118ee3b7ffe8344fa0d00dac603096117ac7 | [
"MIT"
] | null | null | null | #@+leo-ver=5-thin
#@+node:mork.20041010095009: * @file xsltWithNodes.py
#@+<< docstring >>
#@+node:ekr.20050226120104: ** << docstring >>
""" Adds the Outline:XSLT menu containing XSLT-related commands.
This menu contains the following items:
- Set StyleSheet Node:
- Selects the current node as the xsl stylesheet the plugin will use.
- Process Node with Stylesheet Node:
- Processes the current node as an xml document,
resolving section references and Leo directives.
- Creates a sibling containing the results.
Requires 4Suite 1.0a3 or better, downloadable from http://4Suite.org.
"""
#@-<< docstring >>
#@@language python
#@@tabwidth -4
#@+<< imports >>
#@+node:mork.20041025113509: ** << imports >>
import leo.core.leoGlobals as g
from xml.dom import minidom
if g.isPython3:
import io
StringIO = io.StringIO
else:
import cStringIO
StringIO = cStringIO.StringIO
try:
import Ft
from Ft.Xml import InputSource
from Ft.Xml.Xslt.Processor import Processor
except ImportError:
g.cantImport("Ft",__name__)
Ft = None
import weakref
#@-<< imports >>
#@+<<parser problems>>
#@+node:mork.20041024091024: ** <<parser problems>>
#@@killcolor
#@+at
# 1. Having space before the start of the document caused it not to work. I fixed
# this by striping the whitespace from the start and end of the data at xslt
# time.
#
# 2. having a @ right before a tag causes it to not process.
# It appears to be safe to follow this pattern:
# @ </end>
# but not:
# @</end>
#
# I dont know at this point if its just illegal xml, or its a problem in the parser. ??
#@-<<parser problems>>
#@+<<future directions>>
#@+node:mork.20041025101943: ** <<future directions>>
#@+at
# 1. Add more XSLT boilerplate insertions.( done in .3 )
# 2. Maybe add a well-formedness check. (done in .3, test with minidom )
#@-<<future directions>>
__version__ = '0.6'
#@+<< version history >>
#@+node:mork.20041025113211: ** << version history >>
#@@killcolor
#@+at
#
# 0.1: Original code.
#
# 0.2 EKR: Converted to outline.
#
# 0.3: Added more XSLT boilerplate. Added Test with Minidom Discovered parser problem(?).
#
# 0.4 EKR:
# - Added init function.
# 0.5 EKR:
# - Remove 'start2' hook & haveseen dict.
# - Use keywords.get('c') instead of g.top().
# 0.6 EKR:
# - Removed g.top from example code.
#@-<< version history >>
#@+others
#@+node:ekr.20050226120104.1: ** init
def init():
'''Return True if the plugin has loaded successfully.'''
ok = Ft
if ok:
g.registerHandler(('menu2',"new"),addMenu)
g.plugin_signon(__name__)
return ok
#@+node:mork.20041025115037: ** xslt elements
#This dict contains elements that go into a stylesheet
xslt = {
'apply-imports': '<xsl:apply-imports/>',
'apply-templates': "<xsl:apply-templates select ='' mode='' />",
'attribute': "<xsl:attribute name=''> </xsl:attribute>",
'attribute-set': "<xsl:attribute-set name=''> </xsl:attribute-set>",
'call-template': "<xsl:call-template name=''> </xsl:call-template>",
'choose': "<xsl:choose> </xsl:choose>",
'comment': "<xsl:comment> </xsl:comment>",
'copy': "<xsl:copy> </xsl:copy>",
'copy-of': "<xsl:copy-of select='' />",
'decimal-format' : "<xsl:decimal-format />",
'element': "<xsl:element name='' > </xsl:element>",
'fallback': "<xsl:fallback> </xsl:fallback>",
'for-each': "<xsl:for-each select='' > </xsl:for-each>",
'if': "<xsl:if test=''> </xsl:if>",
'import': "<xsl:import href='' />",
'include': "<xsl:include href='' />",
'key': "<xsl:key name='' match='' use='' />",
'message': "<xsl:message> </xsl:message>",
'namespace-alias': "<xsl:namespace-alias stylesheet-prefix='' result-prefix='' />",
'number': "<xsl:number />",
'otherwise': "<xsl:otherwise> </xsl:otherwise>",
'output': "<xsl:output />",
'param': "<xsl:param name='' > </xsl:param>",
'preserve-space': "<xsl:preserve-space elements='' />",
'processing-instruction': "<xsl:processing-instruction name='' > </xsl:processing-instruction>",
'sort': "<xsl:sort />",
'strip-space': "<xsl:strip-space elements='' />",
'stylesheet': "<xsl:stylesheet xmlns:xsl='' version='' > </xsl:stylesheet>",
'template': "<xsl:template > </xsl:template>",
'text': "<xsl:text > </xsl:text>",
'transform': "<xsl:transform > </xsl:transform>",
'value-of': "<xsl:value-of select='' />",
'variable': "<xsl:variable name=''> </xsl:variable>",
'when': "<xsl:when text='' > </xsl:when>",
'with-param': "<xsl:with-param name=''> </xsl:with-param>",
}
#@+node:mork.20041010095202: ** setStyleNode
stylenodes = weakref.WeakKeyDictionary()
def setStyleNode( c ):
'''this command sets what the current style node is'''
position = c.p
stylenodes[ c ] = position
#@+node:mork.20041010095202.1: ** processDocumentNode
def processDocumentNode( c ):
'''this executes the stylesheet node against the current node'''
try:
if not styleNodeSelected( c ): return
proc = Processor()
stylenode = stylenodes[ c ]
pos = c.p
c.selectPosition( stylenode )
sIO = getString( c )
mdom1 = minidom.parseString( sIO )
sIO = str( mdom1.toxml() )
hstring = str( stylenode.h )
if hstring == "": hstring = "no headline"
stylesource = InputSource.DefaultFactory.fromString( sIO, uri = hstring)
proc.appendStylesheet( stylesource )
c.selectPosition( pos )
xmlnode = pos.v
xIO = getString( c )
mdom2 = minidom.parseString( xIO )
xIO = str( mdom2.toxml())
xhead = str( xmlnode.headString )
if xhead == "": xhead = "no headline"
xmlsource = InputSource.DefaultFactory.fromString( xIO, uri = xhead )
result = proc.run( xmlsource )
nhline = "xsl:transform of " + str( xmlnode.headString )
p2 = pos.insertAfter() # tnode )
p2.setBodyString(result)
p2.setHeadString(nhline)
c.redraw()
except Exception as x:
g.es( 'exception ' + str( x ))
c.redraw()
#@+node:mork.20041025121608: ** addXSLTNode
def addXSLTNode (c):
'''creates a node and inserts some xslt boilerplate'''
pos = c.p
#body = '''<?xml version="1.0"?>'''
# body = '''<?xml version="1.0"?>
#<xsl:transform xmlns:xsl="http:///www.w3.org/1999/XSL/Transform" version="1.0">'''
body = '''<?xml version="1.0"?>
<xsl:transform xmlns:xsl="http:///www.w3.org/1999/XSL/Transform" version="1.0">
</xsl:transform>'''
p2 = pos.insertAfter() # tnode)
p2.setBodyString(body)
p2.setHeadString("xslt stylesheet")
c.redraw()
#@+node:mork.20041010110121: ** addXSLTElement
def addXSLTElement( c , element):
'''adds some xslt to the text node'''
w = c.frame.body.wrapper
w.insert( 'insert', element )
### w.event_generate( '<Key>' )
### w.update_idletasks()
#@+node:mork.20041025113021: ** getString (xsltWithNodes.py)
def getString (c):
'''
This def turns a node into a string using Leo's file-nosent write logic.
'''
at = c.atFileCommands
# EKR: 2017/04/10: needs testing.
at.writeOpenFile(c.p, nosentinels=True, toString=True)
return cleanString(at.stringOutput)
#@+node:mork.20041025120706: ** doMinidomTest
def doMinidomTest( c ):
'''
This def performs a simple test on a node.
Can the data be successfully parsed by minidom or not?
Results are output to the log.
'''
s = getString( c )
try:
minidom.parseString( s )
except Exception as x:
g.error("Minidom could not parse node because of:\n %s" % x)
return
g.blue("Minidom could parse the node")
#@+node:mork.20041025090303: ** cleanString
def cleanString( data ):
'''This method cleans a string up for the processor. It currently just removes
leading and trailing whitespace'''
val = data.strip()
return val
#@+node:mork.20041010125444: ** jumpToStyleNode
def jumpToStyleNode( c ):
'''Simple method that jumps us to the current XSLT node'''
if not styleNodeSelected( c ): return
pos = stylenodes[ c ]
c.selectPosition( pos )
c.redraw()
#@+node:mork.20041010125444.1: ** styleNodeSelected
def styleNodeSelected( c ):
'''Determines if a XSLT Style node has not been selected'''
if c not in stylenodes:
g.es( "No Style Node selected" )
return False
return True
#@+node:mork.20041010100633: ** addMenu
def addMenu( tag, keywords ):
# pylint: disable=undefined-variable
# c *is* defined.
c = keywords.get('c')
if not c: return
mc = c.frame.menu
# men = men.getMenu( 'Outline' )
# xmen = Tk.Menu(men,tearoff = False)
xmen = mc.createNewMenu ('XSLT',"Outline")
c.add_command(xmen,
label = "Set Stylesheet Node",
command = lambda c = c : setStyleNode(c))
c.add_command(xmen,
label = "Jump To Style Node",
command = lambda c = c: jumpToStyleNode(c))
c.add_command(xmen,
label = "Process Node with Stylesheet Node",
command = lambda c=c : processDocumentNode(c))
xmen.add_separator(xmen)
c.add_command(xmen,
label = "Create Stylesheet Node",
command = lambda c = c : addXSLTNode(c))
# elmen= Tk.Menu( xmen, tearoff = False )
# xmen.add_cascade( label = "Insert XSL Element", menu = elmen )
m2 = mc.createNewMenu ('Insert XSL Element','XSLT')
xsltkeys = list(xslt.keys())
xsltkeys.sort()
for z in xsltkeys:
# pylint: disable=cell-var-from-loop
c.add_command(m2,
label = z,
command = lambda c=c,element=xslt[ z ]: addXSLTElement(c,element))
# men.add_cascade(menu = xmen, label = "XSLT-Node Commands")
m3 = mc.createNewMenu('XSLT-Node Commands','XSLT')
c.add_command(m3,
label = 'Test Node with Minidom',
command = lambda c=c: doMinidomTest(c))
#@+node:mork.20041025100716: ** examples/tests
#@+at
# table.leo contains the xml. xslt is in the other node.
#
# To test this plugin, set the xslt node to be the xslt node.
#
# Process it against the table.leo node.
#@@c
# pylint: disable=pointless-string-statement
r'''
#@+others
#@+node:ekr.20140906065955.18786: *3* table.leo
#@@path /boboo/leo-4.2-final/plugins
#@+node:ekr.20140906065955.18787: *4* @@nosent table.py
if g.isPython3:
import io
StringIO = io.StringIO
else:
import cStringIO
StringIO = cStringIO.StringIO
import Tkinter as Tk
import tktable as tktab
import leo.core.leoGlobals as g
import csv
import weakref
import Pmw
class CSVVisualizer(object):
arrays = []
#@+others
#@+node:ekr.20140906065955.18788: *5* init
def __init__( self, c ):
self.c = c
self.arr = tktab.ArrayVar()
CSVVisualizer.arrays.append( self.arr )
self.rows = 0
self.columns = 0
self.type = 'excel'
#@+node:ekr.20140906065955.18789: *5* addData
def addData( self ):
arr = self.arr
reader = self.readData()
hc = False
for n, d in enumerate( reader ):
for n1, d2 in enumerate( d ):
arr.set( "%s,%s" %( n, n1 ), str(d2) )
self.columns = n1 + 1
self.rows = n + 1
return self.columns, self.rows
#@+node:ekr.20140906065955.18790: *5* readData
def readData( self ):
c = self.c
pos = c.p
data = pos.b
cS = StringIO()
cS.write( data )
cS.seek( 0 )
sniff = csv.Sniffer()
self.type = sniff.sniff( data )
reader = csv.reader( cS, self.type )
return reader
#@+node:ekr.20140906065955.18791: *5* writeData
def writeData( self, save ):
pos = self.c.p
n2 = self.rows
n = self.columns
data = []
for z in range( n2 ):
ndata = []
for z2 in range( n ):
ndata.append( self.arr.get( "%s,%s" % ( z, z2 ) ) )
data.append( ndata )
cS = StringIO()
csv_write = csv.writer( cS, self.type )
for z in data:
csv_write.writerow( z )
cS.seek( 0 )
if not save:
p2 = pos.insertAfter() # tnd )
p2.setBodyString(cS.getvalue())
p2.setHeadString("Save of Edited " + str( pos.h))
else:
# pos.setTnodeText( cS.getvalue() )
pos.setBodyString(cS.getvalue())
self.c.redraw()
#@+node:ekr.20140906065955.18792: *5* addColumn
def addColumn( self, tab ):
self.columns = self.columns + 1
tab.configure( cols = self.columns )
for z in range( self.rows ):
self.arr.set( '%s,%s' %( z , self.columns -1 ), "" )
#@+node:ekr.20140906065955.18793: *5* deleteColumn
def deleteColumn( self, tab ):
i = tab.index( 'active' )
if i:
tab.delete_cols( i[ 1 ], 1 )
self.columns = self.columns - 1
#@+node:ekr.20140906065955.18794: *5* addRow
def addRow( self , tab ):
self.rows = self.rows + 1
tab.configure( rows = self.rows )
rc = '%s,0' % (self.rows -1 )
for z in range( self.columns ):
self.arr.set( '%s,%s' %( self.rows - 1, z ), "" )
tab.activate( rc )
tab.focus_set()
#@+node:ekr.20140906065955.18795: *5* deleteRow
def deleteRow( self, tab ):
i = tab.index( 'active' )
if i:
tab.delete_rows( i[ 0 ], 1 )
self.rows = self.rows - 1
#@+node:ekr.20140906065955.18796: *5* createDefaultRecord
def createDefaultRecord( self, rows, columns ):
self.rows = rows
self.columns = columns
for z in range( rows ):
for z1 in range( columns ):
self.arr.set( '%s,%s' %( z, z1 ), "" )
#@+node:ekr.20140906065955.18797: *5* newTable
def newTable( c ):
pos = c.p
npos = pos.insertAfter() # tnd )
npos.setHeadString('New Table')
c.redraw()
c.selectPosition( npos )
viewTable( c , True )
#@+node:ekr.20140906065955.18798: *5* viewTable
def viewTable( c, new = False ):
pos = c.p
dialog = createDialog( pos )
csvv = CSVVisualizer( c )
sframe = Pmw.ScrolledFrame( dialog.interior() )
sframe.pack()
tab = createTable( sframe.interior(), csvv.arr )
createBBox( dialog.interior(), csvv, tab )
if not new:
n = csvv.addData()
else:
n = ( 4, 1 )
csvv.createDefaultRecord( n[ 1 ], n[ 0 ] )
tab.configure( cols = n[ 0 ], rows = n[ 1 ] )
dialog.configure( command = lambda name, d = dialog, csvv = csvv:
fireButton( name, d, csvv ) )
dialog.activate()
#@+node:ekr.20140906065955.18799: *5* fireButton
def fireButton( name, dialog, csvv ):
if name == "Close":
dialog.deactivate()
dialog.destroy()
elif name == "Write To New":
csvv.writeData( False )
elif name == "Save To Current":
csvv.writeData( True )
#@+node:ekr.20140906065955.18800: *5* createDialog
def createDialog( pos ):
dialog = Pmw.Dialog( title = "Table Editor for " + str( pos.h),
buttons = [ 'Save To Current', 'Write To New', 'Close' ] )
dbbox = dialog.component( 'buttonbox' )
for z in range( dbbox.numbuttons() ):
dbbox.button( z ).configure( background = 'white', foreground = 'blue' )
return dialog
#@+node:ekr.20140906065955.18801: *5* createTable
def createTable( parent , arr ):
tab = tktab.Table( parent , rows = 0, cols = 0, variable = arr, sparsearray=1,
background = 'white', foreground = 'blue', selecttype = 'row' )
tab.tag_configure( 'active', background = '#FFE7C6', foreground = 'blue' )
tab.tag_configure( 'sel', background = '#FFE7C6', foreground = 'blue', bd =2 )
tab.pack()
return tab
#@+node:ekr.20140906065955.18802: *5* createBBox
def createBBox( parent, csvv, tab ):
bbox = Pmw.ButtonBox( parent )
bconfig = ( ( "Add Row", lambda tab = tab : csvv.addRow( tab ) ),
( "Delete Row", lambda tab = tab: csvv.deleteRow( tab ) ),
( "Add Column", lambda tab = tab: csvv.addColumn( tab ) ),
( "Delete Column", lambda tab = tab: csvv.deleteColumn( tab ) ) )
for z in bconfig:
bbox.add( z[ 0 ], command = z[ 1 ], background = 'white', foreground = 'blue' )
bbox.pack()
#@+node:ekr.20140906065955.18803: *5* addMenu
haveseen = weakref.WeakKeyDictionary()
def addMenu( tag, keywords ):
c = keywords.get('c') or keywords.get('new_c')
if c in haveseen:
return
haveseen[ c ] = None
men = c.frame.menu
men = men.getMenu( 'Outline' )
tmen = Tk.Menu( men, tearoff = 0 )
men.add_cascade( menu = tmen, label = "Table Commands" )
c.add_command(tmen, label = "Edit Node With Table", command = lambda c = c: viewTable( c ) )
c.add_command(tmen, label = "Create New Table", command = lambda c = c: newTable( c ) )
#@+node:ekr.20140906065955.18804: *5* if 1:
if 1:
registerHandler( ('start2' , 'open2', "new") , addMenu )
__version__ = ".125"
g.plugin_signon( __name__ )
#@-others
#@+node:mork.20041025100851.1: *3* xslt to turn leo file into html
<?xml version="1.0"?>
<xsl:transform xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method = 'xml' />
<xsl:preserve-space elements='leo_file/tnodes/t'/>
<xsl:template match='v'>
<ul type='square'>
<xsl:variable name ='t' select ='@t' />
<h1><xsl:value-of select='vh'/></h1>
<xsl:for-each select='ancestor::leo_file/tnodes/t'>
<xsl:if test="./attribute::tx=$t">
<li>
<pre>
<xsl:value-of select='.' />
</pre>
</li>
</xsl:if>
</xsl:for-each>
<xsl:if test ='./v' >
<xsl:apply-templates select = 'v'/>
</xsl:if>
</ul>
</xsl:template>
<xsl:template match ='leo_file'>
<html><head>
<style>
ul{ position:relative;right=25;
border:thin ridge blue}
li{ position:relative;right=25}
pre{ background:#FFE7C6 }
</style>
</head>
<body>
<xsl:apply-templates select='vnodes'/>
</body>
</html>
</xsl:template>
<xsl:template match = 'vnodes'>
<xsl:for-each select = 'v'>
<frame>
<xsl:apply-templates select ='.'/>
</frame>
</xsl:for-each>
</xsl:template>
</xsl:transform>
#@-others
'''
#@-others
#@-leo
| 30.945813 | 100 | 0.586225 |
7945ba0f34e96419c08149d45d3fc4b28d32f317 | 9,542 | py | Python | gizer/oplog_sync_alligned_data.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
] | null | null | null | gizer/oplog_sync_alligned_data.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
] | null | null | null | gizer/oplog_sync_alligned_data.py | racker/gizer | 4600999c35e99bce54071ea4f952b09b3fd5dc9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
""" Simplified version of synchronizer that is working with alligned data. """
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "[email protected]"
from logging import getLogger
from gizer.oplog_parser import exec_insert
from gizer.batch_comparator import ComparatorMongoPsql
from gizer.oplog_sync_base import OplogSyncBase
from gizer.oplog_sync_base import DO_OPLOG_REREAD_MAXCOUNT
from gizer.oplog_sync_base import MAX_CONSEQUENT_FAILURES
from gizer.oplog_sync_base import MAX_CONSEQUENT_TRANSPORT_FAILURES
from gizer.psql_objects import remove_rec_from_psqldb
from gizer.psql_objects import insert_tables_data_into_dst_psql
from gizer.collection_reader import CollectionReader
from mongo_reader.prepare_mongo_request import prepare_oplog_request
# usefull for testing recovering (it helps simulate bad records)
#MAX_REQCOUNT_FOR_SHARD = 100
class OplogSyncAllignedData(OplogSyncBase):
""" Simplified version of synchronizer that is working with alligned data.
As init load produces unalligned data this syncronizer should not be
used just after init load finishes. Instead OplogSyncUnallignedData
must be used. """
def __init__(self, psql, mongo_readers, oplog_readers,
schemas_path, schema_engines, psql_schema,
attempt, recovery_allowed):
""" params:
psql -- Postgres cursor wrapper
mongo_readers -- dict of mongo readers, one per collection
oplog -- Mongo oplog cursor wrappper
schemas_path -- Path with js schemas representing mongo collections
psql_schema -- psql schema whose tables data to patch."""
super(OplogSyncAllignedData, self).\
__init__(psql, mongo_readers, oplog_readers,
schemas_path, schema_engines, psql_schema, attempt)
self.recovery_allowed = recovery_allowed
self.comparator = ComparatorMongoPsql(schema_engines,
mongo_readers,
psql,
psql_schema)
def __del__(self):
del self.comparator
def sync(self, start_ts_dict):
""" Read oplog operations starting just after timestamp start_ts_dict
by gathering timestamps from all configured shards.
Apply oplog operations to psql db. After all records are applied do
consistency check by comparing source (mongo) and dest(psql) records.
Return False and do rollback if timestamps are applied but consistency
checks are failed.
Return sync points as dict.
params:
start_ts_dict -- dict with Timestamp for every shard. """
new_ts_dict = start_ts_dict
do_again_counter = 0
do_again = True
while do_again:
do_again = False
new_ts_dict = self.read_oplog_apply_ops(new_ts_dict,
do_again_counter)
compare_res = self.comparator.compare_src_dest()
failed_trydata = self.comparator.get_failed_trydata()
getLogger(__name__).warning("Failed cmp rereads %s",
failed_trydata)
last_portion_failed = False
recover = False
if failed_trydata:
if len(failed_trydata) == 1 \
and do_again_counter in failed_trydata:
# if failed only latest data
last_portion_failed = True
elif self.attempt < MAX_CONSEQUENT_FAILURES:
# on high level will not return an error
self.set_failed()
return start_ts_dict
elif self.recovery_allowed:
# recover records whose cmp get negative result
self.recover_failed_items(failed_trydata)
recover = True
compare_res = True
if not compare_res or not new_ts_dict:
# if transport returned an error then keep the same ts_start
# and return True, as nothing applied
if (self.failed or self.comparator.is_failed()):
getLogger(__name__).warning("Attempt %d failed",
self.attempt)
if self.attempt < MAX_CONSEQUENT_TRANSPORT_FAILURES:
return start_ts_dict
else:
return None
if last_portion_failed:
if do_again_counter < DO_OPLOG_REREAD_MAXCOUNT:
do_again = True
do_again_counter += 1
else: # Rereads count exceeded
getLogger(__name__).warning('Rereads count exceeded.\
Force assigning compare_res to True.')
compare_res = True
if compare_res:
getLogger(__name__).info('COMMIT')
self.psql.conn.commit()
if recover:
return 'resync' # must be handled specially
else:
return new_ts_dict
else:
getLogger(__name__).error('ROLLBACK')
self.psql.conn.rollback()
return None
def read_oplog_apply_ops(self, start_ts_dict, reread):
""" Apply ops going after specified timestamps.
params:
start_ts_dict -- dict with Timestamp for every shard.
Return updated sync points and dict containing affected record ids """
# derive new sync point from starting points and update it on the go
for name in self.oplog_readers:
# get new timestamps greater than sync point
js_oplog_query = prepare_oplog_request(start_ts_dict[name])
self.oplog_readers[name].make_new_request(js_oplog_query)
# TODO: comment it
#if self.oplog_readers[name].real_transport():
# self.oplog_readers[name].cursor.limit(MAX_REQCOUNT_FOR_SHARD)
parser = self.new_oplog_parser(dry_run=False)
getLogger(__name__).info("Reread oplog ntry=%d", reread)
# go over oplog, and apply oplog ops for every timestamp
oplog_queries = parser.next()
while oplog_queries != None:
collection_name = parser.item_info.schema_name
rec_id = parser.item_info.rec_id
self.oplog_rec_counter += 1
if len(oplog_queries):
getLogger(__name__).info(\
"Reread %d exec rec_id [%s] %s related queries [%s] %s:",
reread, collection_name, rec_id,
parser.item_info.oplog_name, parser.item_info.ts)
for oplog_query in oplog_queries:
self.queries_counter += 1
exec_insert(self.psql, oplog_query)
self.comparator.add_to_compare(collection_name, rec_id, reread)
oplog_queries = parser.next()
getLogger(__name__).info(\
"%d reread. Handled oplog records/psql queries: %d/%d" %
(reread, self.oplog_rec_counter, self.queries_counter))
res = {}
for shard in start_ts_dict:
if shard in parser.last_oplog_ts:
# ts updated for this shard
res[shard] = parser.last_oplog_ts[shard]
else:
# nothing received from this shard
res[shard] = start_ts_dict[shard]
if parser.is_failed():
self.set_failed()
res = None
return res
def recover_failed_items(self, failed_items):
getLogger(__name__).warning('start recovery')
for _, collection_ids in failed_items.iteritems():
for collection, ids in collection_ids.iteritems():
self.recover_collection_items(collection, ids)
# print list of recovered items
for _, collection_ids in failed_items.iteritems():
for collection, ids in collection_ids.iteritems():
for rec_id in ids:
getLogger(__name__).info("recovered %s %s",
collection, rec_id)
getLogger(__name__).info("recover complete")
def recover_collection_items(self, collection, ids):
reader = CollectionReader(collection,
self.schema_engines[collection],
self.mongo_readers[collection])
maxs = 100
splitted = [ids[i:i + maxs] for i in xrange(0, len(ids), maxs)]
for chunk_ids in splitted:
recs = reader.get_mongo_table_objs_by_ids(chunk_ids)
for str_rec_id, rec in recs.iteritems():
# 1. remove from psql
matched_list = [i for i in ids if str(i) == str(str_rec_id)]
if not matched_list:
# filter out results from mock transport,
# that was not requested
continue
rec_id_obj = matched_list[0]
remove_rec_from_psqldb(self.psql, self.psql_schema,
reader.schema_engine,
collection, rec, rec_id_obj)
# 2. add new one to psql
insert_tables_data_into_dst_psql(self.psql, rec,
self.psql_schema, '')
| 47.237624 | 79 | 0.59673 |
7945ba6e9e803ce105c6d57d71b7767f2877aa65 | 3,405 | py | Python | utils.py | zengru001usst/acne | ae652e814649e88034b3b506ccbe34432b1eb85a | [
"Apache-2.0"
] | 49 | 2020-03-27T21:00:57.000Z | 2022-03-25T06:54:00.000Z | utils.py | zengru001usst/acne | ae652e814649e88034b3b506ccbe34432b1eb85a | [
"Apache-2.0"
] | 5 | 2020-06-15T17:30:09.000Z | 2021-07-14T12:22:33.000Z | utils.py | zengru001usst/acne | ae652e814649e88034b3b506ccbe34432b1eb85a | [
"Apache-2.0"
] | 12 | 2020-05-06T14:50:10.000Z | 2022-03-25T06:53:55.000Z | # Filename: utils.py
# License: LICENSES/LICENSE_UVIC_EPFL
import gzip
import pickle
import numpy as np
import h5py
def np_skew_symmetric(v):
zero = np.zeros_like(v[:, 0])
M = np.stack([
zero, -v[:, 2], v[:, 1],
v[:, 2], zero, -v[:, 0],
-v[:, 1], v[:, 0], zero,
], axis=1)
return M
def denorm_points(x, T):
x = (x - np.array([T[0,2], T[1,2]])) / np.asarray([T[0,0], T[1,1]])
return x
def compute_T_with_imagesize(w, h, f=None, ratio=1.0):
cx = (w - 1.0) * 0.5
cy = (h - 1.0) * 0.5
mean = np.array([cx, cy])
if f is not None:
f = f
else:
f = max(w - 1.0, h - 1.0) * ratio
scale = 1.0 / f
T = np.zeros((3, 3,))
T[0, 0], T[1, 1], T[2, 2] = scale, scale, 1
T[0, 2], T[1, 2] = -scale * mean[0], -scale * mean[1]
return T.copy()
def norm_points(x):
x_mean = np.mean(x, axis=0)
dist = x - x_mean
meandist = np.sqrt((dist**2).sum(axis=1)).mean()
scale = np.sqrt(2) / meandist
T = np.zeros([3,3])
T[0,0], T[1,1], T[2,2] = scale, scale, 1
T[0,2], T[1,2] = -scale*x_mean[0], -scale*x_mean[1]
x = x * np.asarray([T[0,0], T[1,1]]) + np.array([T[0,2], T[1,2]])
return x, T
def norm_points_with_T(x, T):
x = x * np.asarray([T[0,0], T[1,1]]) + np.array([T[0,2], T[1,2]])
return x
def savepklz(data_to_dump, dump_file_full_name, force_run=False):
''' Saves a pickle object and gzip it '''
if not force_run:
raise RuntimeError("This function should no longer be used!")
with gzip.open(dump_file_full_name, 'wb') as out_file:
pickle.dump(data_to_dump, out_file)
def loadpklz(dump_file_full_name, force_run=False):
''' Loads a gziped pickle object '''
if not force_run:
raise RuntimeError("This function should no longer be used!")
with gzip.open(dump_file_full_name, 'rb') as in_file:
dump_data = pickle.load(in_file)
return dump_data
def saveh5(dict_to_dump, dump_file_full_name):
''' Saves a dictionary as h5 file '''
with h5py.File(dump_file_full_name, 'w') as h5file:
if isinstance(dict_to_dump, list):
for i, d in enumerate(dict_to_dump):
newdict = {'dict' + str(i): d}
writeh5(newdict, h5file)
else:
writeh5(dict_to_dump, h5file)
def writeh5(dict_to_dump, h5node):
''' Recursive function to write dictionary to h5 nodes '''
for _key in dict_to_dump.keys():
if isinstance(dict_to_dump[_key], dict):
h5node.create_group(_key)
cur_grp = h5node[_key]
writeh5(dict_to_dump[_key], cur_grp)
else:
h5node[_key] = dict_to_dump[_key]
def loadh5(dump_file_full_name):
''' Loads a h5 file as dictionary '''
try:
with h5py.File(dump_file_full_name, 'r') as h5file:
dict_from_file = readh5(h5file)
except Exception as e:
print("Error while loading {}".format(dump_file_full_name))
raise e
return dict_from_file
def readh5(h5node):
''' Recursive function to read h5 nodes as dictionary '''
dict_from_file = {}
for _key in h5node.keys():
if isinstance(h5node[_key], h5py._hl.group.Group):
dict_from_file[_key] = readh5(h5node[_key])
else:
dict_from_file[_key] = h5node[_key].value
return dict_from_file
#
# utils.py ends here
| 25.222222 | 71 | 0.585903 |
7945bbc0ca413a590fad4d24c39deb95ea0ca79d | 1,608 | py | Python | PTA/PAT_A/Python3/A1082_AC.py | StrayDragon/OJ-Solutions | b31b11c01507544aded2302923da080b39cf2ba8 | [
"MIT"
] | 1 | 2019-05-13T10:09:55.000Z | 2019-05-13T10:09:55.000Z | PTA/PAT_A/Python3/A1082_AC.py | StrayDragon/OJ-Solutions | b31b11c01507544aded2302923da080b39cf2ba8 | [
"MIT"
] | null | null | null | PTA/PAT_A/Python3/A1082_AC.py | StrayDragon/OJ-Solutions | b31b11c01507544aded2302923da080b39cf2ba8 | [
"MIT"
] | null | null | null | # reference: https://gumble.pw/python-num2chinese.html
import itertools
def revuniq(l):
return ''.join(
k + ' ' for k, g in itertools.groupby(reversed(l))
)
def digits2pinyin(digits):
basic = ('ling', 'yi', 'er', 'san', 'si', 'wu', 'liu', 'qi', 'ba', 'jiu')
unit1 = ('Shi', 'Bai', 'Qian')
unit2 = ('Wan', 'Yi')
result = []
nd = str(digits)
if nd[0] == '-':
result.append('Fu')
integer = nd.lstrip('+-')
if int(integer):
splitted = [integer[max(i - 4, 0):i]
for i in range(len(integer), 0, -4)]
intresult = []
for nu, unit in enumerate(splitted):
if int(unit) == 0: # 0000
intresult.append(basic[0])
continue
ulist = []
unit = unit.zfill(4)
for nc, ch in enumerate(reversed(unit)):
if ch == '0':
if ulist: # ???0
ulist.append(basic[0])
elif nc == 0:
ulist.append(basic[int(ch)])
elif nc == 1 and ch == '1' and unit[1] == '0':
ulist.append(unit1[0])
else:
ulist.append(basic[int(ch)] + ' ' + unit1[nc - 1])
ustr = revuniq(ulist)
if nu == 0:
intresult.append(ustr)
else:
intresult.append(ustr + unit2[nu - 1])
result.append(revuniq(intresult).strip(basic[0]))
else:
result.append(basic[0])
return result
res = digits2pinyin(input())
print(''.join(res).strip())
| 29.236364 | 77 | 0.458955 |
7945bd2d424475c8d628d8591dc2abb7934def5f | 498 | py | Python | Physics250-ME32/AngularFrequencyOftheWaves.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME32/AngularFrequencyOftheWaves.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME32/AngularFrequencyOftheWaves.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | import numpy as np
import math
c = 2.9979 * pow(10,8)
e = 8.85 * pow(10,-12)
#Practice test question 3 Type
def waveIntensity():
unit = input("Is it cm or mm?: ")
if unit == "cm":
wavelength = float(input("Input wavelength (cm): ")) / 100
if unit == "mm":
wavelength = float(input("Input wavelength (mm): ")) / 1000
angfreq = c / wavelength * 2 * math.pi
print("Angular Frequency:", angfreq / pow(10,9) ,"Grad/s" )
waveIntensity()
| 26.210526 | 68 | 0.566265 |
7945bd9d5b4bf0ebd8eae9ef8901a7fe482c987a | 1,293 | py | Python | Problem_sets/spiral_matrix/test_script/test.py | zanderhinton/DSA_collaborative_prep | 8427255e0084c6d69031027492d847a90b970840 | [
"MIT"
] | 3 | 2020-02-02T14:52:16.000Z | 2020-09-28T12:32:35.000Z | Problem_sets/spiral_matrix/test_script/test.py | zanderhinton/DSA_collaborative_prep | 8427255e0084c6d69031027492d847a90b970840 | [
"MIT"
] | 14 | 2020-02-02T21:17:49.000Z | 2020-02-10T15:48:36.000Z | Problem_sets/spiral_matrix/test_script/test.py | zanderhinton/DSA_collaborative_prep | 8427255e0084c6d69031027492d847a90b970840 | [
"MIT"
] | 9 | 2020-02-02T20:00:05.000Z | 2020-02-17T19:02:32.000Z | import time
import numpy as np
from test_script.solution import spiral_matrix
test_cases = [1, 2, 3, 4, 5] #insert test case objects into a list eg [test-case-1, test-case-2, test-case-3]
loops = 10 # loops to be taken for calculating average time, for complex problems, lower this value.
def test_spiral_matrix(user_fun):
success_flags = []
for test_case in test_cases:
expected_output = spiral_matrix(test_case)
user_output = user_fun(test_case)
if user_output == expected_output:
success_flags.append(1)
else:
success_flags.append(0)
success_flag = min(success_flags)
if success_flag == 1:
loop_time = []
for i in range(loops):
t1 = time.time()
_ = user_fun(test_cases[0])
t2 = time.time()
loop_time.append(t2-t1)
print("Passed {}/{} test cases!".format(sum(success_flags), len(success_flags)))
print("Completed {} loops in average time of {:.7f} s.".format(loops, np.mean(loop_time)))
print("Your output (first 20 lines limit):")
for out in user_output[:20]:
print(out)
else:
print("Test failed! {}/{} passed.".format(sum(success_flags), len(success_flags))) | 36.942857 | 109 | 0.614076 |
7945bda786d8c73ba263921744783e6698e87811 | 60,787 | py | Python | typhon/collocations/collocator.py | jmollard/typhon | 68d5ae999c340b60aa69e095b336d438632ad55c | [
"MIT"
] | null | null | null | typhon/collocations/collocator.py | jmollard/typhon | 68d5ae999c340b60aa69e095b336d438632ad55c | [
"MIT"
] | null | null | null | typhon/collocations/collocator.py | jmollard/typhon | 68d5ae999c340b60aa69e095b336d438632ad55c | [
"MIT"
] | null | null | null | from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from datetime import datetime, timedelta
import gc
from multiprocessing import Process, Queue
import time
import traceback
import numpy as np
import pandas as pd
from typhon.geodesy import great_circle_distance
from typhon.geographical import GeoIndex
from typhon.utils import add_xarray_groups, get_xarray_groups
from typhon.utils.timeutils import to_datetime, to_timedelta, Timer
import xarray as xr
__all__ = [
"Collocator",
"check_collocation_data"
]
# The names for the processes. This started as an easter egg, but it actually
# helps to identify different processes during debugging.
PROCESS_NAMES = [
'Newton', 'Einstein', 'Bohr', 'Darwin', 'Pasteur', 'Freud', 'Galilei',
'Lavoisier', 'Kepler', 'Copernicus', 'Faraday', 'Maxwell', 'Bernard',
'Boas', 'Heisenberg', 'Pauling', 'Virchow', 'Schrodinger', 'Rutherford',
'Dirac', 'Vesalius', 'Brahe', 'Buffon', 'Boltzmann', 'Planck', 'Curie',
'Herschel', 'Lyell', 'Laplace', 'Hubble', 'Thomson', 'Born', 'Crick',
'Fermi', 'Euler', 'Liebig', 'Eddington', 'Harvey', 'Malpighi', 'Huygens',
'Gauss', 'Haller', 'Kekule', 'Koch', 'Gell-Mann', 'Fischer', 'Mendeleev',
'Glashow', 'Watson', 'Bardeen', 'Neumann', 'Feynman', 'Wegener', 'Hawking',
'Leeuwenhoek', 'Laue', 'Kirchhoff', 'Bethe', 'Euclid', 'Mendel', 'Onnes',
'Morgan', 'Helmholtz', 'Ehrlich', 'Mayr', 'Sherrington', 'Dobzhansky',
'Delbruck', 'Lamarck', 'Bayliss', 'Chomsky', 'Sanger', 'Lucretius',
'Dalton', 'Broglie', 'Linnaeus', 'Piaget', 'Simpson', 'Levi-Strauss',
'Margulis', 'Landsteiner', 'Lorenz', 'Wilson', 'Hopkins', 'Elion', 'Selye',
'Oppenheimer', 'Teller', 'Libby', 'Haeckel', 'Salk', 'Kraepelin',
'Lysenko', 'Galton', 'Binet', 'Kinsey', 'Fleming', 'Skinner', 'Wundt',
'Archimedes'
]
class ProcessCrashed(Exception):
"""Helper exception for crashed processes"""
pass
class Collocator:
def __init__(
self, threads=None, verbose=1, name=None, #log_dir=None
):
"""Initialize a collocator object that can find collocations
Args:
threads: Finding collocations can be parallelized in threads. Give
here the maximum number of threads that you want to use. Which
number of threads is the best, may be machine-dependent. So
this is a parameter that you can use to fine-tune the
performance. Note: Not yet implemented due to GIL usage of
sklearn BallTree.
verbose: The higher this integer value the more debug messages
will be printed.
name: The name of this collocator, will be used in log statements.
"""
self.empty = None # xr.Dataset()
self.index = None
self.index_with_primary = False
self.threads = threads
# These optimization parameters will be overwritten in collocate
self.bin_factor = None
self.magnitude_factor = None
self.tunnel_limit = None
self.leaf_size = None
self.verbose = verbose
self.name = name if name is not None else "Collocator"
# If no collocations are found, this will be returned. We need empty
# arrays to concatenate the results without problems:
@property
def no_pairs(self):
return np.array([[], []])
@property
def no_intervals(self):
return np.array([], dtype='timedelta64[ns]')
@property
def no_distances(self):
return np.array([])
def __call__(self, *args, **kwargs):
return self.collocate(*args, **kwargs)
def _debug(self, msg):
if self.verbose > 1:
print(f"[{self.name}] {msg}")
def _info(self, msg):
if self.verbose > 0:
print(f"[{self.name}] {msg}")
def _error(self, msg):
print(f"[{self.name}] {msg}")
def collocate_filesets(
self, filesets, start=None, end=None, processes=None, output=None,
bundle=None, skip_file_errors=False, post_processor=None,
post_processor_kwargs=None, **kwargs
):
"""Find collocation between the data of two filesets
If you want to save the collocations directly to disk, it may be easier
to use :meth:`~typhon.collocations.Collocations.search` directly.
Args:
filesets: A list of two :class:`FileSet` objects, the primary and
the secondary fileset. Can be also
:class:`~typhon.collocations.common.Collocations` objects with
`read_mode=collapse`. The order of the filesets is irrelevant
for the results of the collocation search but files from the
secondary fileset might be read multiple times if using
parallel processing (`processes` is greater than one). The
number of output files could be different (see also the option
`bundle`).
start: Start date either as datetime object or as string
("YYYY-MM-DD hh:mm:ss"). Year, month and day are required.
Hours, minutes and seconds are optional. If not given, it is
datetime.min per default.
end: End date. Same format as "start". If not given, it is
datetime.max per default.
processes: Collocating can be parallelized which improves the
performance significantly. Pass here the number of processes to
use.
output: Fileset object where the collocated data should be stored.
bundle: Set this to *primary* if you want to bundle the output
files by their collocated primaries, i.e. there will be only
one output file per primary. *daily* is also possible, then all
files from one day are bundled together. Per default, all
collocations for each file match will be saved separately.
This might lead to a high number of output files.
Note: *daily* means one process bundles all collocations from
one day into one output file. If using multiple processes, this
could still produce several daily output files per day.
skip_file_errors: If this is *True* and a file could not be read,
the file and its match will be skipped and a warning will be
printed. Otheriwse the program will stop (default).
post_processor: A function for post-processing the collocated data
before saving it to `output`. Must accept two parameters: a
xarray.Dataset with the collocated data and a dictionary with
the path attributes from the collocated files.
post_processor_kwargs: A dictionary with keyword arguments that
should be passed to `post_processor`.
**kwargs: Further keyword arguments that are allowed for
:meth:`collocate`.
Yields:
A xarray.Dataset with the collocated data if `output` is not set.
If `output` is set to a FileSet-like object, only the filename of
the stored collocations is yielded. The results are not ordered if
you use more than one process. For more information about the
yielded xarray.Dataset have a look at :meth:`collocate`.
Examples:
.. code-block:: python
"""
timer = Timer().start()
if len(filesets) != 2:
raise ValueError("Only collocating two filesets at once is allowed"
"at the moment!")
# Check the max_interval argument because we need it later
max_interval = kwargs.get("max_interval", None)
if max_interval is None:
raise ValueError("Collocating filesets without max_interval is"
" not yet implemented!")
if start is None:
start = datetime.min
else:
start = to_datetime(start)
if end is None:
end = datetime.max
else:
end = to_datetime(end)
self._info(f"Collocate from {start} to {end}")
# Find the files from both filesets which overlap tempoerally.
matches = list(filesets[0].match(
filesets[1], start=start, end=end, max_interval=max_interval,
))
if processes is None:
processes = 1
# Make sure that there are never more processes than matches
processes = min(processes, len(matches))
total_matches = sum(len(match[1]) for match in matches)
self._info(f"using {processes} process(es) on {total_matches} matches")
# MAGIC with processes
# Each process gets a list with matches. Important: the matches should
# be continuous to guarantee a good performance. After finishing one
# match, the process pushes its results to the result queue. If errors
# are raised during collocating, the raised errors are pushed to the
# error queue,
matches_chunks = np.array_split(matches, processes)
# This queue collects all results:
results = Queue(maxsize=processes)
# This queue collects all error exceptions
errors = Queue()
# Extend the keyword arguments that we are going to pass to
# _collocate_files:
kwargs.update({
"start": start,
"end": end,
"filesets": filesets,
"output": output,
"bundle": bundle,
"skip_file_errors": skip_file_errors,
"post_processor": post_processor,
"post_processor_kwargs": post_processor_kwargs,
})
# This list contains all running processes
process_list = [
Process(
target=Collocator._process_caller,
args=(
self, results, errors, PROCESS_NAMES[i],
),
kwargs={**kwargs, "matches": matches_chunk},
daemon=True,
)
for i, matches_chunk in enumerate(matches_chunks)
]
# We want to keep track of the progress of the collocation search since
# it may take a while.
process_progress = {
name: 0. # Each process is at 0 percent at the beginning
for name in PROCESS_NAMES[:processes]
}
# Start all processes:
for process in process_list:
process.start()
# As long as some processes are still running, wait for their results:
running = process_list.copy()
processed_matches = 0
# The main process has two tasks during its child processes are
# collocating.
# 1) Collect their results and yield them to the user
# 2) Display the progress and estimate the remaining processing time
while running:
# Filter out all processes that are dead: they either crashed or
# complete their task
running = [
process for process in running if process.is_alive()
]
# Get all results from the result queue
while not results.empty():
process, progress, result = results.get()
# The process might be crashed. To keep the remaining time
# estimation useful, we exclude the crashed process from the
# calculation.
if result is ProcessCrashed:
del process_progress[process]
else:
process_progress[process] = progress
try:
nerrors = errors.qsize()
except NotImplementedError:
nerrors = 'unknown'
self._print_progress(
timer.elapsed, process_progress, len(running), nerrors)
if result is not None:
yield result
# Explicit free up memory:
gc.collect()
for process in process_list:
process.join()
if not errors.empty():
self._error("Some processes terminated due to errors:")
while not errors.empty():
error = errors.get()
print("-"*79)
print(error[2])
print("".join(traceback.format_tb(error[1])))
print("-" * 79 + "\n")
@staticmethod
def _print_progress(elapsed_time, process_progress, processes, errors):
elapsed_time -= timedelta(microseconds=elapsed_time.microseconds)
if len(process_progress) == 0:
msg = "-"*79 + "\n"
msg += f"100% | {elapsed_time} hours elapsed | " \
f"{errors} processes failed\n"
msg += "-"*79 + "\n"
print(msg)
return
progress = sum(process_progress.values()) / len(process_progress)
try:
expected_time = elapsed_time * (100 / progress - 1)
expected_time -= timedelta(
microseconds=expected_time.microseconds)
except ZeroDivisionError:
expected_time = "unknown"
msg = "-"*79 + "\n"
msg += f"{progress:.0f}% | {elapsed_time} hours elapsed, " \
f"{expected_time} hours left | {processes} proc running, " \
f"{errors} failed\n"
msg += "-"*79 + "\n"
print(msg)
@staticmethod
def _process_caller(
self, results, errors, name, output, bundle, post_processor,
post_processor_kwargs, **kwargs):
"""Wrapper around _collocate_matches
This function is called for each process. It communicates with the main
process via the result and error queue.
Result Queue:
Adds for each collocated file match the process name, its progress
and the actual results.
Error Queue:
If an error is raised, the name of this proces and the error
messages is put to this queue.
"""
self.name = name
# We keep track of how many file pairs we have already processed to
# make the error debugging easier. We need the match in flat form:
matches = [
[match[0], secondary]
for match in kwargs['matches']
for secondary in match[1]
]
# If we want to bundle the output, we need to collect some contents.
# The current_bundle_tag stores a certain information for the current
# bundle (e.g. filename of primary or day of the year). If it changes,
# the bundle is stored to disk and a new bundle is created.
cached_data = []
cached_attributes = {}
current_bundle_tag = None
try:
processed = 0
collocated_matches = self._collocate_matches(**kwargs)
for collocations, attributes in collocated_matches:
match = matches[processed]
processed += 1
progress = 100 * processed / len(matches)
if collocations is None:
results.put([name, progress, None])
continue
# The user does not want to bundle anything therefore just save
# the current collocations
if bundle is None:
result = self._save_and_return(
collocations, attributes, output,
post_processor, post_processor_kwargs
)
results.put([name, progress, result])
continue
# The user may want to bundle the collocations before writing
# them to disk, e.g. by their primaries.
save_cache = self._should_save_cache(
bundle, current_bundle_tag, match,
to_datetime(collocations.attrs["start_time"])
)
if save_cache:
result = self._save_and_return(
cached_data,
cached_attributes, output,
post_processor, post_processor_kwargs
)
results.put([name, progress, result])
cached_data = []
cached_attributes = {}
# So far, we have not cached any collocations or we still need
# to wait before saving them to disk.
cached_data.append(collocations)
cached_attributes.update(**attributes)
if bundle == "primary":
current_bundle_tag = match[0].path
elif bundle == "daily":
current_bundle_tag = \
to_datetime(collocations.attrs["start_time"]).date()
# After all iterations, save last cached data to disk:
if cached_data:
result = self._save_and_return(
cached_data,
cached_attributes, output,
post_processor, post_processor_kwargs
)
results.put([name, progress, result])
except Exception as exception:
# Tell the main process to stop considering this process for the
# remaining processing time:
results.put(
[name, 100., ProcessCrashed]
)
self._error("ERROR: I got a problem and terminate!")
# Build a message that contains all important information for
# debugging:
msg = f"Process {name} ({matches[0][0].times[0]} -" \
f"{matches[-1][0].times[1]}) failed\n" \
f"Failed to collocate {matches[processed]} with"\
f"{matches[processed]}\n"
# The main process needs to know about this exception!
error = [
name, exception.__traceback__,
msg + "ERROR: " + str(exception)
]
errors.put(error)
self._error(exception)
# Finally, raise the exception to terminate this process:
raise exception
self._info(f"Finished all {len(matches)} matches")
def _save_and_return(self, collocations, attributes, output,
post_processor, post_processor_kwargs):
"""Save collocations to disk or return them"""
if isinstance(collocations, list):
collocations = concat_collocations(
collocations
)
if output is None:
return collocations, attributes
else:
filename = output.get_filename(
[to_datetime(collocations.attrs["start_time"]),
to_datetime(collocations.attrs["end_time"])],
fill=attributes
)
# Apply a post processor function from the user
if post_processor is not None:
if post_processor_kwargs is None:
post_processor_kwargs = {}
collocations = post_processor(
collocations, attributes, **post_processor_kwargs
)
if collocations is None:
return None
self._info(f"Store collocations to\n{filename}")
# Write the data to the file.
output.write(collocations, filename)
return filename
@staticmethod
def _should_save_cache(bundle, current_bundle_tag, match, start_time):
"""Return true if the cache should be saved otherwise false
"""
if current_bundle_tag is None:
return False
elif bundle == "primary":
# Check whether the primary has changed since the last time:
return current_bundle_tag != match[0].path
elif bundle == "daily":
# Has the day changed since last time?
return current_bundle_tag != start_time.date()
# In all other cases, the bundle should not be saved yet:
return False
def _collocate_matches(
self, filesets, matches, skip_file_errors, **kwargs
):
"""Load file matches and collocate their content
Yields:
A tuple of two items: the first is always the current percentage
of progress. If output is True, the second is only the filename of
the saved collocations. Otherwise, it is a tuple of collocations
and their collected :class:`~typhon.files.handlers.common.FileInfo`
attributes as a dictionary.
"""
# Load all matches in a parallized queue:
loaded_matches = filesets[0].align(
filesets[1], matches=matches, return_info=True, compact=False,
skip_errors=skip_file_errors,
)
for loaded_match in loaded_matches:
# The FileInfo objects of the matched files:
files = loaded_match[0][0], loaded_match[1][0]
# We copy the data from the matches since they might be used for
# other matches as well:
primary, secondary = \
loaded_match[0][1].copy(), loaded_match[1][1].copy()
self._debug(f"Collocate {files[0].path}\nwith {files[1].path}")
collocations = self.collocate(
(filesets[0].name, primary),
(filesets[1].name, secondary), **kwargs,
)
if collocations is None:
self._debug("Found no collocations!")
# At least, give the process caller a progress update:
yield None, None
continue
# Check whether the collocation data is compatible and was build
# correctly
check_collocation_data(collocations)
found = [
collocations[f"{filesets[0].name}/time"].size,
collocations[f"{filesets[1].name}/time"].size
]
self._debug(
f"Found {found[0]} ({filesets[0].name}) and "
f"{found[1]} ({filesets[1].name}) collocations"
)
# Add the names of the processed files:
for f in range(2):
if f"{filesets[f].name}/__file" in collocations.variables:
continue
collocations[f"{filesets[f].name}/__file"] = files[f].path
# Collect the attributes of the input files. The attributes get a
# prefix, primary or secondary, to allow not-unique names.
attributes = {
f"primary.{p}" if f == 0 else f"secondary.{p}": v
for f, file in enumerate(files)
for p, v in file.attr.items()
}
yield collocations, attributes
def collocate(
self, primary, secondary, max_interval=None, max_distance=None,
bin_factor=1, magnitude_factor=10, tunnel_limit=None, start=None,
end=None, leaf_size=40
):
"""Find collocations between two xarray.Dataset objects
Collocations are two or more data points that are located close to each
other in space and/or time.
Each xarray.Dataset contain the variables *time*, *lat*, *lon*. They
must be - if they are coordinates - unique. Otherwise, their
coordinates must be unique, i.e. they cannot contain duplicated values.
*time* must be a 1-dimensional array with a *numpy.datetime64*-like
data type. *lat* and *lon* can be gridded, i.e. they can be multi-
dimensional. However, they must always share the first dimension with
*time*. *lat* must be latitudes between *-90* (south) and *90* (north)
and *lon* must be longitudes between *-180* (west) and *180* (east)
degrees. See below for examples.
The collocation searched is performed with a fast ball tree
implementation by scikit-learn. The ball tree is cached and reused
whenever the data points from `primary` or `secondary` have not
changed.
If you want to find collocations between FileSet objects, use
:class:`collocate_filesets` instead.
Args:
primary: A tuple of a string with the dataset name and a
xarray.Dataset that fulfill the specifications from above. Can
be also a xarray.Dataset only, the name is then automatically
set to *primary*.
secondary: A tuple of a string with the dataset name and a
xarray.Dataset that fulfill the specifications from above. Can
be also a xarray.Dataset only, the name is then automatically
set to *secondary*.
max_interval: Either a number as a time interval in seconds, a
string containing a time with a unit (e.g. *100 minutes*) or a
timedelta object. This is the maximum time interval between two
data points. If this is None, the data will be searched for
spatial collocations only.
max_distance: Either a number as a length in kilometers or a string
containing a length with a unit (e.g. *100 meters*). This is
the maximum distance between two data points to meet the
collocation criteria. If this is None, the data will be
searched for temporal collocations only. Either `max_interval`
or *max_distance* must be given.
tunnel_limit: Maximum distance in kilometers at which to switch
from tunnel to haversine distance metric. Per default this
algorithm uses the tunnel metric, which simply transform all
latitudes and longitudes to 3D-cartesian space and calculate
their euclidean distance. This is faster than the haversine
metric but produces an error that grows with larger distances.
When searching for distances exceeding this limit
(`max_distance` is greater than this parameter), the haversine
metric is used, which is more accurate but takes more time.
Default is 1000 kilometers.
magnitude_factor: Since building new trees is expensive, this
algorithm tries to use the last tree when possible (e.g. for
data with fixed grid). However, building the tree with the
larger dataset and query it with the smaller dataset is faster
than vice versa. Depending on which premise to follow, there
might have a different performance in the end. This parameter
is the factor of that one dataset must be larger than the other
to throw away an already-built ball tree and rebuild it with
the larger dataset.
leaf_size: The size of one leaf in the Ball Tree. The higher the
leaf size the faster is the tree building but the slower is the
tree query. The optimal leaf size is dataset-dependent. Default
is 40.
bin_factor: When using a temporal criterion via `max_interval`, the
data will be temporally binned to speed-up the search. The bin
size is `bin_factor` * `max_interval`. Which bin factor is the
best, may be dataset-dependent. So this is a parameter that you
can use to fine-tune the performance.
start: Limit the collocated data from this start date. Can be
either as datetime object or as string ("YYYY-MM-DD hh:mm:ss").
Year, month and day are required. Hours, minutes and seconds
are optional. If not given, it is datetime.min per default.
end: End date. Same format as "start". If not given, it is
datetime.max per default.
Returns:
None if no collocations were found. Otherwise,
a xarray.Dataset with the collocated data in *compact* form. It
consists of three groups (groups of variables containing */* in
their name): the *primary*, *secondary* and the *Collocations*
group. If you passed `primary` or `secondary` with own names,
they will be used in the output. The *Collocations* group contains
information about the found collocations. *Collocations/pairs* is
a 2xN array where N is the number of found collocations. It
contains the indices of the *primary* and *secondary* data points
which are collocations. The indices refer to the data points stored
in the *primary* or *secondary* group. *Collocations/interval* and
*Collocations/distance* are the intervals and distances between the
collocations in seconds and kilometers, respectively. Collocations
in *compact* form are efficient when saving them to disk but it
might be complicated to use them directly. Consider applying
:func:`~typhon.collocations.common.collapse` or
:func:`~typhon.collocations.common.expand` on them.
Examples:
.. code-block: python
# TODO: Update this example!
import numpy as np
from typhon.collocations import Collocator
# Create the data. primary and secondary can also be
# xarray.Dataset objects:
primary = {
"time": np.arange(
"2018-01-01", "2018-01-02", dtype="datetime64[h]"
),
"lat": 30.*np.sin(np.linspace(-3.14, 3.14, 24))+20,
"lon": np.linspace(0, 90, 24),
}
secondary = {
"time": np.arange(
"2018-01-01", "2018-01-02", dtype="datetime64[h]"
),
"lat": 30.*np.sin(np.linspace(-3.14, 3.14, 24)+1.)+20,
"lon": np.linspace(0, 90, 24),
}
# Find collocations with a maximum distance of 300 kilometers
# and a maximum interval of 1 hour
collocator = Collocator()
collocated = collocator.collocate(
primary, secondary,
max_distance="300km", max_interval="1h"
)
print(collocated)
"""
if max_distance is None and max_interval is None:
raise ValueError(
"Either max_distance or max_interval must be given!"
)
if max_interval is not None:
max_interval = to_timedelta(max_interval, numbers_as="seconds")
# The user can give strings instead of datetime objects:
start = datetime.min if start is None else to_datetime(start)
end = datetime.max if end is None else to_datetime(end)
# Did the user give the datasets specific names?
primary_name, primary, secondary_name, secondary = self._get_names(
primary, secondary
)
# Select the common time period of both datasets and flat them.
primary, secondary = self._prepare_data(
primary, secondary, max_interval, start, end
)
# Maybe there is no data left after selection?
if primary is None:
return self.empty
self.bin_factor = bin_factor
self.magnitude_factor = magnitude_factor
self.tunnel_limit = tunnel_limit
self.leaf_size = leaf_size
timer = Timer().start()
# We cannot allow NaNs in the time, lat or lon fields
not_nans1 = self._get_not_nans(primary)
not_nans2 = self._get_not_nans(secondary)
# Retrieve the important fields from the data. To avoid any overhead by
# xarray, we use the plain numpy.arrays and do not use the isel method
# (see https://github.com/pydata/xarray/issues/2227). We rather use
# index arrays that we use later to select the rest of the data
lat1 = primary.lat.values[not_nans1]
lon1 = primary.lon.values[not_nans1]
time1 = primary.time.values[not_nans1]
lat2 = secondary.lat.values[not_nans2]
lon2 = secondary.lon.values[not_nans2]
time2 = secondary.time.values[not_nans2]
original_indices = [
np.arange(primary.time.size)[not_nans1],
np.arange(secondary.time.size)[not_nans2]
]
self._debug(f"{timer} for filtering NaNs")
# We can search for spatial collocations (max_interval=None), temporal
# collocations (max_distance=None) or both.
if max_interval is None:
# Search for spatial collocations only:
pairs, distances = self.spatial_search(
lat1, lon1, lat2, lon2, max_distance,
)
intervals = self._get_intervals(
time1[pairs[0]], time2[pairs[1]]
)
return self._create_return(
primary, secondary, primary_name, secondary_name,
self._to_original(pairs, original_indices),
intervals, distances,
max_interval, max_distance
)
elif max_distance is None:
# Search for temporal collocations only:
pairs, intervals = self.temporal_search(
time1, time2, max_interval
)
distances = self._get_distances(
lat1[pairs[0]], lon1[pairs[0]],
lat2[pairs[1]], lon2[pairs[1]],
)
return self._create_return(
primary, secondary, primary_name, secondary_name,
self._to_original(pairs, original_indices),
intervals, distances,
max_interval, max_distance
)
# The user wants to use both criteria and search for spatial and
# temporal collocations. At first, we do a coarse temporal pre-binning
# so that we only search for collocations between points that might
# also be temporally collocated. Unfortunately, this also produces an
# overhead that is only negligible if we have a lot of data:
data_magnitude = time1.size * time2.size
if data_magnitude > 100_0000:
# We have enough data, do temporal pre-binning!
pairs, distances = self.spatial_search_with_temporal_binning(
{"lat": lat1, "lon": lon1, "time": time1},
{"lat": lat2, "lon": lon2, "time": time2},
max_distance, max_interval
)
else:
# We do not have enough data to justify that whole pre-binning.
# Simply do it directly!
pairs, distances = self.spatial_search(
lat1, lon1, lat2, lon2, max_distance,
)
# Did we find any spatial collocations?
if not pairs.any():
return self.empty
# Check now whether the spatial collocations really pass the temporal
# condition:
passed_temporal_check, intervals = self._temporal_check(
time1[pairs[0]], time2[pairs[1]], max_interval
)
# Return only the values that passed the time check
return self._create_return(
primary, secondary, primary_name, secondary_name,
self._to_original(
pairs[:, passed_temporal_check], original_indices),
intervals, distances[passed_temporal_check],
max_interval, max_distance
)
@staticmethod
def _to_original(pairs, original_indices):
return np.array([
original_indices[i][pair_array]
for i, pair_array in enumerate(pairs)
])
@staticmethod
def _get_names(primary, secondary):
# Check out whether the user gave the primary and secondary any name:
if isinstance(primary, (tuple, list)):
primary_name, primary = primary
else:
primary_name = "primary"
if isinstance(secondary, (tuple, list)):
secondary_name, secondary = secondary
else:
secondary_name = "secondary"
return primary_name, primary, secondary_name, secondary
def _prepare_data(self, primary, secondary, max_interval, start, end):
"""Prepare the data for the collocation search
This method selects the time period which should be searched for
collocations and flats the input datasets if they have gridded
variables.
Returns:
The datasets constraint to the common time period, sorted by time
and flattened. If no common time period could be found, two None
objects are returned.
"""
if max_interval is not None:
timer = Timer().start()
# We do not have to collocate everything, just the common time
# period expanded by max_interval and limited by the global start
# and end parameter:
primary_period, secondary_period = self._get_common_time_period(
primary, secondary, max_interval, start, end
)
# Check whether something is left:
if not primary_period.size or not secondary_period.size:
return None, None
# We need everything sorted by the time, otherwise xarray's stack
# method makes problems:
primary_period = primary_period.sortby(primary_period)
primary_dim = primary_period.dims[0]
secondary_period = secondary_period.sortby(secondary_period)
secondary_dim = secondary_period.dims[0]
# Select the common time period and while using sorted indices:
primary = primary.sel(**{primary_dim: primary_period[primary_dim]})
secondary = secondary.sel(
**{secondary_dim: secondary_period[secondary_dim]}
)
# Check whether something is left:
if not primary_period.size or not secondary_period.size:
return None, None
self._debug(f"{timer} for selecting common time period")
# Flat the data: For collocating, we need a flat data structure.
# Fortunately, xarray provides the very convenient stack method
# where we can flat multiple dimensions to one. Which dimensions do
# we have to stack together? We need the fields *time*, *lat* and
# *lon* to be flat. So we choose their dimensions to be stacked.
timer = Timer().start()
primary = self._flat_to_main_coord(primary)
secondary = self._flat_to_main_coord(secondary)
self._debug(f"{timer} for flatting data")
return primary, secondary
@staticmethod
def _get_common_time_period(
primary, secondary, max_interval, start, end):
max_interval = pd.Timedelta(max_interval)
# We want to select a common time window from both datasets,
# aligned to the primary's time coverage. Because xarray has a
# very annoying bug in time retrieving
# (https://github.com/pydata/xarray/issues/1240), this is a
# little bit cumbersome:
common_start = max(
start,
pd.Timestamp(primary.time.min().item(0)) - max_interval,
pd.Timestamp(secondary.time.min().item(0)) - max_interval
)
common_end = min(
end,
pd.Timestamp(primary.time.max().item(0)) + max_interval,
pd.Timestamp(secondary.time.max().item(0)) + max_interval
)
primary_period = primary.time.where(
(primary.time.values >= np.datetime64(common_start))
& (primary.time.values <= np.datetime64(common_end))
).dropna(primary.time.dims[0])
secondary_period = secondary.time.where(
(secondary.time.values >= np.datetime64(common_start))
& (secondary.time.values <= np.datetime64(common_end))
).dropna(secondary.time.dims[0])
return primary_period, secondary_period
@staticmethod
def _get_not_nans(dataset):
return dataset.lat.notnull().values & dataset.lon.notnull().values
@staticmethod
def _flat_to_main_coord(data):
"""Make the dataset flat despite of its original structure
We need a flat dataset structure for the collocation algorithms, i.e.
time, lat and lon are not allowed to be gridded, they must be
1-dimensional and share the same dimension (namely *collocation*).
There are three groups of original data structures that this method
can handle:
* linear (e.g. ship track measurements): time, lat and lon have the
same dimension and are all 1-dimensional. Fulfills all criteria
from above. No action has to be taken.
* gridded_coords (e.g. instruments on satellites with gridded swaths):
lat or lon are gridded (they have multiple dimensions). Stack
the coordinates of them together to a new shared dimension.
Args:
data: xr.Dataset object
Returns:
A xr.Dataset where time, lat and lon are aligned on one shared
dimension.
"""
# Flat:
shared_dims = list(
set(data.time.dims) | set(data.lat.dims) | set(data.lon.dims)
)
# Check whether the dataset is flat (time, lat and lon share the same
# dimension size and are 1-dimensional)
if len(shared_dims) == 1:
if shared_dims[0] in ("time", "lat", "lon"):
# One of the key variables is the main dimension! Change this:
data["collocation"] = shared_dims[0], np.arange(
data[shared_dims[0]].size)
data = data.swap_dims({shared_dims[0]: "collocation"})
data = data.reset_coords(shared_dims[0])
# So far, collocation is a coordinate. We want to make it to a
# dimension, so drop its values:
return data.drop("collocation")
return data.rename({
shared_dims[0]: "collocation"
})
# The coordinates are gridded:
# Some field might be more deeply stacked than another. Choose the
# dimensions of the most deeply stacked variable:
dims = max(
data["time"].dims, data["lat"].dims, data["lon"].dims,
key=lambda x: len(x)
)
# We want to be able to retrieve additional fields after collocating.
# Therefore, we give each dimension that is no coordinate yet a value
# to use them as indices later.
for dim in dims:
if dim not in data.coords:
data[dim] = dim, np.arange(data.dims[dim])
# We assume that coordinates must be unique! Otherwise, we would have
# to use this ugly work-around:
# Replace the former coordinates with new coordinates that have unique
# values.
# new_dims = []
# for dim in dims:
# new_dim = f"__replacement_{dim}"
# data[new_dim] = dim, np.arange(data.dims[dim])
# data = data.swap_dims({dim: new_dim})
# new_dims.append(new_dim)
return data.stack(collocation=dims)
def _create_return(
self, primary, secondary, primary_name, secondary_name,
original_pairs, intervals, distances,
max_interval, max_distance
):
if not original_pairs.any():
return self.empty
pairs = []
output = {}
names = [primary_name, secondary_name]
for i, dataset in enumerate([primary, secondary]):
# name of the current dataset (primary or secondary)
name = names[i]
# These are the indices of the points in the original data that
# have collocations. We remove the duplicates since we want to copy
# the required data only once. They are called original_indices
# because they are the indices in the original data array:
original_indices = pd.unique(original_pairs[i])
# After selecting the collocated data, the original indices cannot
# be applied any longer. We need new indices that indicate the
# pairs in the collocated data.
new_indices = np.empty(original_indices.max() + 1, dtype=int)
new_indices[original_indices] = np.arange(
original_indices.size
)
collocation_indices = new_indices[original_pairs[i]]
# Save the collocation indices in the metadata group:
pairs.append(collocation_indices)
output[names[i]] = dataset.isel(collocation=original_indices)
# We have to convert the MultiIndex to a normal index because we
# cannot store it to a file otherwise. We can convert it by simply
# setting it to new values, but we are losing the sub-level
# coordinates (the dimenisons that we stacked to create the
# multi-index in the first place) with that step. Hence, we store
# the sub-level coordinates in additional dataset to preserve them.
main_coord_is_multiindex = isinstance(
output[name].get_index("collocation"),
pd.core.indexes.multi.MultiIndex
)
if main_coord_is_multiindex:
stacked_dims_data = xr.merge([
xr.DataArray(
output[name][dim].values,
name=dim, dims=["collocation"]
)
for dim in output[name].get_index("collocation").names
])
# Okay, actually we want to get rid of the main coordinate. It
# should stay as a dimension name but without own labels. I.e. we
# want to drop it. Because it still may a MultiIndex, we cannot
# drop it directly but we have to set it to something different.
output[name]["collocation"] = \
np.arange(output[name]["collocation"].size)
if main_coord_is_multiindex:
# Now, since we unstacked the multi-index, we can add the
# stacked dimensions back to the dataset:
output[name] = xr.merge(
[output[name], stacked_dims_data],
)
# For the flattening we might have created temporal variables,
# also collect them to drop:
vars_to_drop = [
var for var in output[name].variables.keys()
if var.startswith("__replacement_")
]
output[name] = output[name].drop([
f"collocation", *vars_to_drop
])
# Merge all datasets into one:
output = add_xarray_groups(
xr.Dataset(), **output
)
# This holds the collocation information (pairs, intervals and
# distances):
metadata = xr.Dataset()
metadata["pairs"] = xr.DataArray(
np.array(pairs, dtype=int), dims=("group", "collocation"),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
"primary": primary_name,
"secondary": secondary_name,
}
)
metadata["interval"] = xr.DataArray(
intervals, dims=("collocation", ),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
"primary": primary_name,
"secondary": secondary_name,
}
)
metadata["distance"] = xr.DataArray(
distances, dims=("collocation",),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
"primary": primary_name,
"secondary": secondary_name,
"units": "kilometers",
}
)
metadata["group"] = xr.DataArray(
[primary_name, secondary_name], dims=("group",),
attrs={
"max_interval": f"Max. interval in secs: {max_interval}",
"max_distance": f"Max. distance in kilometers: {max_distance}",
}
)
output = add_xarray_groups(
output, Collocations=metadata
)
start = pd.Timestamp(
output[primary_name+"/time"].min().item(0)
)
end = pd.Timestamp(
output[primary_name+"/time"].max().item(0)
)
output.attrs = {
"start_time": str(start),
"end_time": str(end),
}
return output
@staticmethod
def get_meta_group():
return f"Collocations"
def spatial_search_with_temporal_binning(
self, primary, secondary, max_distance, max_interval
):
# For time-binning purposes, pandas Dataframe objects are a good choice
primary = pd.DataFrame(primary).set_index("time")
secondary = pd.DataFrame(secondary).set_index("time")
# Now let's split the two data data along their time coordinate so
# we avoid searching for spatial collocations that do not fulfill
# the temporal condition in the first place. However, the overhead
# of the finding algorithm must be considered too (for example the
# BallTree creation time). This can be adjusted by the parameter
# bin_factor:
bin_duration = self.bin_factor * max_interval
# The binning is more efficient if we use the largest dataset as
# primary:
swapped_datasets = secondary.size > primary.size
if swapped_datasets:
primary, secondary = secondary, primary
# Let's bin the primaries along their time axis and search for the
# corresponding secondary bins:
bin_pairs = (
self._bin_pairs(start, chunk, primary, secondary, max_interval)
for start, chunk in primary.groupby(pd.Grouper(freq=bin_duration))
)
# Add arguments to the bins (we need them for the spatial search
# function):
bins_with_args = (
[self, max_distance, *bin_pair]
for bin_pair in bin_pairs
)
# Unfortunately, a first attempt parallelizing this using threads
# worsened the performance. Update: The BallTree code from scikit-learn
# does not release the GIL. But apparently there will be a new version
# coming that solves this problem, see this scikit-learn issue:
# https://github.com/scikit-learn/scikit-learn/pull/10887. So stay
# tuned!
# threads = 1 if self.threads is None else self.threads
t = Timer(verbose=False).start()
# with ThreadPoolExecutor(max_workers=2) as pool:
# results = list(pool.map(
# Collocator._spatial_search_bin, bins_with_args
# ))
results = list(map(
Collocator._spatial_search_bin, bins_with_args
))
self._debug(f"Collocated {len(results)} bins in {t.stop()}")
pairs_list, distances_list = zip(*results)
pairs = np.hstack(pairs_list)
# No collocations were found.
if not pairs.any():
return self.no_pairs, self.no_distances
# Stack the rest of the results together:
distances = np.hstack(distances_list)
if swapped_datasets:
# Swap the rows of the results
pairs[[0, 1]] = pairs[[1, 0]]
return pairs.astype("int64"), distances
@staticmethod
def _bin_pairs(chunk1_start, chunk1, primary, secondary, max_interval):
""""""
chunk2_start = chunk1_start - max_interval
chunk2_end = chunk1.index.max() + max_interval
offset1 = primary.index.searchsorted(chunk1_start)
offset2 = secondary.index.searchsorted(chunk2_start)
chunk2 = secondary.loc[chunk2_start:chunk2_end]
return offset1, chunk1, offset2, chunk2
@staticmethod
def _spatial_search_bin(args):
self, max_distance, offset1, data1, offset2, data2 = args
if data1.empty or data2.empty:
return self.no_pairs, self.no_distances
pairs, distances = self.spatial_search(
data1["lat"].values, data1["lon"].values,
data2["lat"].values, data2["lon"].values, max_distance
)
pairs[0] += offset1
pairs[1] += offset2
return pairs, distances
def spatial_search(self, lat1, lon1, lat2, lon2, max_distance):
# Finding collocations is expensive, therefore we want to optimize it
# and have to decide which points to use for the index building.
index_with_primary = self._choose_points_to_build_index(
[lat1, lon1], [lat2, lon2],
)
self.index_with_primary = index_with_primary
if index_with_primary:
build_points = lat1, lon1
query_points = lat2, lon2
else:
build_points = lat2, lon2
query_points = lat1, lon1
self.index = self._build_spatial_index(*build_points)
pairs, distances = self.index.query(*query_points, r=max_distance)
# No collocations were found.
if not pairs.any():
# We return empty arrays to have consistent return values:
return self.no_pairs, self.no_distances
if not index_with_primary:
# The primary indices should be in the first row, the secondary
# indices in the second:
pairs[[0, 1]] = pairs[[1, 0]]
return pairs, distances
def _build_spatial_index(self, lat, lon):
# Find out whether the cached index still works with the new points:
if self._spatial_is_cached(lat, lon):
self._debug("Spatial index is cached and can be reused")
return self.index
return GeoIndex(lat, lon, leaf_size=self.leaf_size)
def _spatial_is_cached(self, lat, lon):
"""Return True if the cached ball tree is still applicable to the new
data"""
if self.index is None:
return False
try:
return np.allclose(lat, self.index.lat) \
& np.allclose(lon, self.index.lon)
except ValueError:
# The shapes are different
return False
def _choose_points_to_build_index(self, primary, secondary):
"""Choose which points should be used for tree building
This method helps to optimize the performance.
Args:
primary: Converted primary points
secondary: Converted secondary points
Returns:
True if primary points should be used for tree building. False
otherwise.
"""
# There are two options to optimize the performance:
# A) Cache the index and reuse it if either the primary or the
# secondary points have not changed (that is the case for data with a
# fixed grid). Building the tree is normally very expensive, so it
# should never be done without a reason.
# B) Build the tree with the larger set of points and query it with the
# smaller set.
# Which option should be used if A and B cannot be applied at the same
# time? If the magnitude of one point set is much larger (by
# `magnitude factor` larger) than the other point set, we strictly
# follow B. Otherwise, we prioritize A.
if primary[0].size > secondary[0].size * self.magnitude_factor:
# Use primary points
return True
elif secondary[0].size > primary[0].size * self.magnitude_factor:
# Use secondary points
return False
# Apparently, none of the datasets is much larger than the others. So
# just check whether we still have a cached tree. If we used the
# primary points last time and they still fit, use them again:
if self.index_with_primary and self._spatial_is_cached(*primary):
return True
# Check the same for the secondary data:
if not self.index_with_primary and self._spatial_is_cached(*secondary):
return False
# Otherwise, just use the larger dataset:
return primary[0].size > secondary[0].size
def temporal_search(self, primary, secondary, max_interval):
raise NotImplementedError("Not yet implemented!")
#return self.no_pairs, self.no_intervals
def _temporal_check(
self, primary_time, secondary_time, max_interval
):
"""Checks whether the current collocations fulfill temporal conditions
Returns:
"""
intervals = self._get_intervals(primary_time, secondary_time)
# Check whether the time differences are less than the temporal
# boundary:
passed_time_check = intervals < max_interval
return passed_time_check, intervals[passed_time_check]
@staticmethod
def _get_intervals(time1, time2):
return np.abs((time1 - time2)).astype("timedelta64[s]")
@staticmethod
def _get_distances(lat1, lon1, lat2, lon2):
return great_circle_distance(lat1, lon1, lat2, lon2)
def concat_collocations(collocations):
"""Concat compact collocations
Compact collocations cannot be concatenated directly because indices in
*Collocations/pairs* won't be correct any longer afterwards. This
concatenate function fixes this problem.
Args:
collocations: A list of xarray.Dataset objects
with compact collocations.
Returns:
One xarray.Dataset object
"""
# We need to increment the pair indices when concatening the datasets
primary = collocations[0]["Collocations/group"].item(0)
secondary = collocations[0]["Collocations/group"].item(1)
primary_size = 0
secondary_size = 0
collocation_coord = {
"Collocations": "Collocations/collocation",
primary: f"{primary}/collocation",
secondary: f"{secondary}/collocation",
}
# Collect all collocations for each single group:
groups = defaultdict(list)
for obj in collocations:
for group, data in get_xarray_groups(obj).items():
if group == "Collocations":
# Correct the indices:
data["Collocations/pairs"][0, :] += primary_size
data["Collocations/pairs"][1, :] += secondary_size
data = data.drop("Collocations/group")
groups[group].append(data)
primary_size += obj.dims[f"{primary}/collocation"]
secondary_size += obj.dims[f"{secondary}/collocation"]
starts = []
ends = []
for group, data_list in groups.items():
groups[group] = xr.concat(
data_list,
dim=collocation_coord[group]
)
start = pd.Timestamp(groups[primary][primary+"/time"].min().item(0))
end = pd.Timestamp(groups[primary][primary+"/time"].max().item(0))
merged = xr.merge(groups.values())
merged.attrs = {
"start_time": str(start),
"end_time": str(end),
}
merged["Collocations/group"] = collocations[0]["Collocations/group"]
return merged
class InvalidCollocationData(Exception):
"""Error when trying to collapse / expand invalid collocation data
"""
def __init__(self, message, *args):
Exception.__init__(self, message, *args)
def check_collocation_data(dataset):
"""Check whether the dataset fulfills the standard of collocated data
Args:
dataset: A xarray.Dataset object
Raises:
A InvalidCollocationData Error if the dataset did not pass the test.
"""
mandatory_fields = ["Collocations/pairs", "Collocations/group"]
for mandatory_field in mandatory_fields:
if mandatory_field not in dataset.variables:
raise InvalidCollocationData(
f"Could not find the field '{mandatory_field}'!"
)
| 40.390033 | 79 | 0.596312 |
7945be1e8856b4e17791df5d0d8bbab02d943b45 | 112 | py | Python | lavalink/exceptions.py | TrackRunny/Lavalink.py | b6b856bb017b740469bc5359024b224dd7a7b665 | [
"MIT"
] | null | null | null | lavalink/exceptions.py | TrackRunny/Lavalink.py | b6b856bb017b740469bc5359024b224dd7a7b665 | [
"MIT"
] | null | null | null | lavalink/exceptions.py | TrackRunny/Lavalink.py | b6b856bb017b740469bc5359024b224dd7a7b665 | [
"MIT"
] | 1 | 2019-07-10T19:51:01.000Z | 2019-07-10T19:51:01.000Z | class NodeException(Exception):
""" The exception will be raised when something went wrong with a node. """
| 37.333333 | 79 | 0.732143 |
7945c07306a7f8ff6edc3077ac02ebce73d5b067 | 1,354 | py | Python | src/dialogs/helpDialog.py | Alopex4/spruce | 2bf0bae18fe9b0d13691f2ee926071635cbe7c6f | [
"MIT"
] | 1 | 2019-07-04T10:32:07.000Z | 2019-07-04T10:32:07.000Z | src/dialogs/helpDialog.py | Alopex4/spruce | 2bf0bae18fe9b0d13691f2ee926071635cbe7c6f | [
"MIT"
] | null | null | null | src/dialogs/helpDialog.py | Alopex4/spruce | 2bf0bae18fe9b0d13691f2ee926071635cbe7c6f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'help.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.helpTextBro = QtWidgets.QTextBrowser(Dialog)
self.helpTextBro.setObjectName("helpTextBro")
self.verticalLayout.addWidget(self.helpTextBro)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
| 37.611111 | 78 | 0.716396 |
7945c2f84baf8bc4734c9a708a4883171a7af728 | 465 | py | Python | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_lamp_tatt_s01_on.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_lamp_tatt_s01_on.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_lamp_tatt_s01_on.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_lamp_tatt_s01_on.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.352941 | 91 | 0.737634 |
7945c50c0f1b2d2eea4d2e6e448663d3babba6e2 | 6,739 | py | Python | PDK_Generator/design_automation/polarization_splitter_rotator/psr_bitaper/neff_taper_width_sweep_setup.py | seanlam97/PDK_Generator | 15c1f4f56575f8e21ea874443d06ef740ccb5aa5 | [
"MIT"
] | null | null | null | PDK_Generator/design_automation/polarization_splitter_rotator/psr_bitaper/neff_taper_width_sweep_setup.py | seanlam97/PDK_Generator | 15c1f4f56575f8e21ea874443d06ef740ccb5aa5 | [
"MIT"
] | 3 | 2021-08-24T23:31:42.000Z | 2021-08-25T16:45:54.000Z | PDK_Generator/design_automation/polarization_splitter_rotator/psr_bitaper/neff_taper_width_sweep_setup.py | seanlam97/PDK_Generator | 15c1f4f56575f8e21ea874443d06ef740ccb5aa5 | [
"MIT"
] | null | null | null | #PSR Bitaper Width Sweep Simulation Recipe + Photonic Components + Adding Layers
#General Purpose Libraries
import numpy as np
from scipy.constants import c
lam_c = 1.550e-6
#Wafer and Waveguide Structure Variables
thick_Clad = 2.0e-6
thick_Si = 0.22e-6
thick_BOX = 2.0e-6
thick_Slab = 0.09e-6
width_ridge = 0.45e-6
width_slab = 0.5e-6
#Materials Used
material_Clad = 'SiO2 (Glass) - Palik'
material_BOX = "SiO2 (Glass) - Palik"
material_Si = "Si (Silicon) - Palik"
#Simulation Parameters
wavelength = 1.55e-6
meshsize = 10e-9
modes = 4
width_margin = 2.0e-6
height_margin = 1.0e-6
#Dimensions
Xmin = -2e-6; Xmax = 2e-6;
Zmin = -height_margin; Zmax = thick_Si + height_margin;
Y_span = 2*width_margin + width_ridge; Ymin = -Y_span/2; Ymax = -Ymin;
#Class that adds all materials
class material_setup:
def add_material(mode):
matname = "Air (1)";
if 1:
newmaterial = mode.addmaterial("Dielectric");
mode.setmaterial(newmaterial,"name",matname);
mode.setmaterial(matname,"Refractive Index",1);
#mode.setmaterial(matname,"color",[0.85, 0.85, 0, 1]);
matname = "Si (Silicon) - Dispersive & Lossless";
newmaterial = mode.addmaterial("Lorentz");
mode.setmaterial(newmaterial,"name",matname);
mode.setmaterial(matname,"Permittivity",7.98737492);
mode.setmaterial(matname,"Lorentz Linewidth",1e8);
mode.setmaterial(matname,"Lorentz Permittivity",3.68799143);
#mode.setmaterial(matname,"color",[0.85, 0, 0, 1]); # red
matname = "SiO2 (Glass) - Dispersive & Lossless";
newmaterial = mode.addmaterial("Lorentz");
mode.setmaterial(newmaterial,"name",matname);
mode.setmaterial(matname,"Permittivity",2.119881);
mode.setmaterial(matname,"Lorentz Linewidth",1e10);
mode.setmaterial(matname,"Lorentz Resonance",3.309238e+13);
mode.setmaterial(matname,"Lorentz Permittivity", 49.43721);
#mode.setmaterial(matname,"color",[0.5, 0.5, 0.5, 1]); # grey
matname = "SiO2 (Glass) - Const";
newmaterial = mode.addmaterial("Dielectric");
mode.setmaterial(newmaterial,"name",matname);
mode.setmaterial(matname,"Permittivity",1.444*1.444);
#mode.setmaterial(matname,"color",[0.5, 0.5, 0.5, 1]); # grey
matname = "SWG_strip";
if 1:
newmaterial = mode.addmaterial("Dielectric");
mode.setmaterial(newmaterial,"name",matname);
mode.setmaterial(matname,"Refractive Index",2.73);
#mode.setmaterial(matname,"color",[0.85, 0.5, 0, 1]);
mode.switchtolayout()
#Class that draws the photonic components and sets up FDE sweep
class width_sweep_setup:
#Adding mesh and FDE regions
def wg_2D_func(mode):
#Adding mesh regions
mode.addmesh();
mode.set("name", 'mesh1');
#set("solver type", "2D X normal");
mode.set("x max", Xmax);
mode.set("x min", Xmin);
mode.set("y", 0);
mode.set("y span", width_ridge);
mode.set("z min", -0.02e-6);
mode.set("z max", thick_Si+0.02e-6);
mode.set("dx", meshsize);
mode.set("dy", meshsize*0.5);
mode.set("dz", meshsize*0.5);
mode.addmesh();
mode.set("name", 'mesh2');
#set("solver type", "2D X normal");
mode.set("x max", Xmax);
mode.set("x min", Xmin);
mode.set("y", 0);
mode.set("y span", width_slab);
mode.set("z min", -0.02e-6);
mode.set("z max", thick_Slab+0.02e-6);
mode.set("dx", meshsize);
mode.set("dy", meshsize*0.5);
mode.set("dz", meshsize*0.5);
# add 2D mode solver (waveguide cross-section)
#Adding FDE solver
mode.addfde();
mode.set("solver type", "2D X normal");
mode.set("x", 0);
mode.set("y", 0);
mode.set("y span", Y_span);
mode.set("z max", Zmax);
mode.set("z min", Zmin);
mode.set("wavelength", wavelength);
mode.set("solver type","2D X normal");
mode.set("define y mesh by","maximum mesh step");
mode.set("dy", meshsize);
mode.set("define z mesh by","maximum mesh step");
mode.set("dz", meshsize);
mode.set("number of trial modes",modes);
#Adding Photonic Compoents
def wg_2D_draw(mode):
#Adding cladding
mode.addrect();
mode.set("name","Clad");
mode.set("y", 0);
mode.set("y span", Y_span+1e-6);
mode.set("z min", 0);
mode.set("z max", thick_Clad);
mode.set("x min", Xmin);
mode.set("x max", Xmax);
mode.set("override mesh order from material database",1);
mode.set("mesh order",3);
mode.set("alpha", 0.2);
mode.set('material', material_Clad);
#Adding Buried Oxide
mode.addrect();
mode.set("name", "BOX");
mode.set("material", material_BOX);
mode.set("x min", Xmin);
mode.set("x max", Xmax);
mode.set("z min", -thick_BOX);
mode.set("z max", 0);
mode.set("y", 0);
mode.set("y span", Y_span+1e-6);
mode.set("alpha", 0.1);
#Adding Silicon Wafer
mode.addrect();
mode.set("name", "Wafer");
mode.set("material", material_Si);
mode.set("x min", Xmin);
mode.set("x max", Xmax);
mode.set("z max", -thick_BOX);
mode.set("z min", -thick_BOX-2e-6);
mode.set("y", 0);
mode.set("y span", Y_span+1e-6);
mode.set("alpha", 0.1);
#Adding Waveguide
mode.addrect();
mode.set("name", "waveguide");
mode.set("material",material_Si);
#set("index",3.2);
mode.set("y", 0);
mode.set("y span", width_ridge);
mode.set("z min", 0);
mode.set("z max", thick_Si);
mode.set("x min", Xmin);
mode.set("x max", Xmax);
#Adding Slab Waveguide
mode.addrect();
mode.set("name", "slab");
mode.set("material",material_Si);
if thick_Slab==0:
mode.set("y min", 0);
mode.set("y max", 0);
else:
mode.set("y", 0);
mode.set("y span", width_slab);
mode.set("z min", 0);
mode.set("z max", thick_Slab);
mode.set("x min", Xmin);
mode.set("x max", Xmax);
mode.set("alpha", 0.8);
| 33.695 | 82 | 0.541178 |
7945c5c200cdbc4a5896ee8670bd2e7b2f3c0137 | 1,328 | py | Python | examples/docs_snippets/docs_snippets/overview/configuration/configured_named_solid_example.py | chasleslr/dagster | 88907f9473fb8e7a9b1af9a0a8b349d42f4b8153 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets/overview/configuration/configured_named_solid_example.py | chasleslr/dagster | 88907f9473fb8e7a9b1af9a0a8b349d42f4b8153 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets/overview/configuration/configured_named_solid_example.py | chasleslr/dagster | 88907f9473fb8e7a9b1af9a0a8b349d42f4b8153 | [
"Apache-2.0"
] | null | null | null | from dagster import Field, InputDefinition, Int, List, configured, execute_pipeline, pipeline, solid
# start_configured_named
@solid(
config_schema={
"is_sample": Field(bool, is_required=False, default_value=False),
},
input_defs=[InputDefinition("xs", List[Int])],
)
def variance(context, xs):
n = len(xs)
mean = sum(xs) / n
summed = sum((mean - x) ** 2 for x in xs)
result = summed / (n - 1) if context.solid_config["is_sample"] else summed / n
return result ** (1 / 2)
# If we want to use the same solid configured in multiple ways in the same pipeline,
# we have to specify unique names when configuring them:
sample_variance = configured(variance, name="sample_variance")({"is_sample": True})
population_variance = configured(variance, name="population_variance")({"is_sample": False})
@pipeline
def stats_pipeline():
sample_variance()
population_variance()
# end_configured_named
def run_pipeline():
result = execute_pipeline(
stats_pipeline,
{
"solids": {
"sample_variance": {"inputs": {"xs": [4, 8, 15, 16, 23, 42]}},
"population_variance": {
"inputs": {"xs": [33, 30, 27, 29, 32, 30, 27, 28, 30, 30, 30, 31]}
},
}
},
)
return result
| 28.255319 | 100 | 0.616717 |
7945c6724cbfef5842212ed1ee6d13b535bea1d1 | 13,648 | py | Python | Environment/build_environment.py | apayne19/DoubleAuctionMarket | 9e0e6ab033c2bac978dc94d4a94e88805df8e3b6 | [
"MIT"
] | 30 | 2018-06-27T18:09:30.000Z | 2022-02-28T16:22:29.000Z | Environment/build_environment.py | michaeljyt/DoubleAuctionMarket | ab3e6116f52a2fccc1f64028f8cd1a727d2bda14 | [
"MIT"
] | 20 | 2017-10-05T23:47:57.000Z | 2018-10-02T20:28:05.000Z | Environment/build_environment.py | michaeljyt/DoubleAuctionMarket | ab3e6116f52a2fccc1f64028f8cd1a727d2bda14 | [
"MIT"
] | 11 | 2017-09-21T22:13:43.000Z | 2021-10-30T18:17:24.000Z | import operator
import matplotlib.pyplot as plt # import matplotlib
import numpy as np # import numpy
import time
import random
import csv
class BuildMarketEnv(object):
""" A class that makes a market"""
env = {"demand": [], "dem": [], "supply": [], "sup": [], "buyers": {}, "sellers": {}, "eq": {}}
def __init__(self, name, num_buyers, num_sellers, debug=False):
self.name = name
self.num_buyers = num_buyers
self.num_sellers = num_sellers
self.debug = debug
if debug:
print(self.num_buyers, self.num_sellers)
for buyer in range(self.num_buyers):
buyer_id = "buyer" + str(buyer)
self.env["buyers"][buyer_id] = [] # Add a list of values to buyers[buyer_id] as key
for seller in range(self.num_sellers):
seller_id = "seller" + str(seller)
self.env["sellers"][seller_id] = [] # Add a list of costs to sellers[seller_id] as key
def show(self):
print("I am market {} with {} buyers and {} sellers.".format(self.name, self.num_buyers, self.num_sellers))
print("")
def show_participants(self):
print("Market Participants")
print("-------------------")
print("BUYERS")
print("------")
for buyer in range(self.num_buyers):
buyer_id = "buyer" + str(buyer)
print("buyer {} has values {}".format(buyer_id, self.env["buyers"][buyer_id]))
print("SELLERS")
print("-------")
for seller in range(self.num_sellers):
seller_id = "seller" + str(seller)
print("seller {} has costs {}".format(seller_id, self.env["sellers"][seller_id]))
print("")
def add_buyer(self, buyer_number, values):
buyer_id = "buyer" + str(buyer_number)
self.env["buyers"][buyer_id] = values
def get_buyer_values(self, buyer_number):
if buyer_number > self.num_buyers - 1:
return [-1]
else:
return self.env["buyers"]["buyer" + str(buyer_number)]
def add_seller(self, seller_number, costs):
seller_id = "seller" + str(seller_number)
self.env["sellers"][seller_id] = costs
def get_seller_costs(self, seller_number):
if seller_number > self.num_sellers - 1:
return [-1]
else:
return self.env["sellers"]["seller" + str(seller_number)]
def make_demand(self):
dem = []
for buyer in range(self.num_buyers):
buyer_id = "buyer" + str(buyer)
for value in self.env["buyers"][buyer_id]:
dem.append((buyer_id, value))
sdem = sorted(dem, key=operator.itemgetter(1), reverse=True)
self.env["demand"] = sdem
def make_supply(self):
sup = []
for seller in range(self.num_sellers):
seller_id = "seller" + str(seller)
for cost in self.env["sellers"][seller_id]:
sup.append((seller_id, cost))
ssup = sorted(sup, key=operator.itemgetter(1))
self.env["supply"] = ssup
def list_supply_demand(self):
dem = self.env["demand"]
sup = self.env["supply"]
sd = sup + dem
s_and_d = sorted(sd, key=operator.itemgetter(1), reverse=True)
print("Unit ID Cost | Value ID")
print("----------------------------------")
for unit in s_and_d:
if unit[0][0] == "b":
print(" " * 20 + "| {:^3} {:^3}".format(unit[1], unit[0]))
if unit[0][0] == "s":
print(" " * 5 + "{:^3} {:^3} |".format(unit[0], unit[1]))
print("")
def plot_supply_demand(self):
"""
First define supply and demand curves
"""
# make units
dunits = [units for units in range(len(self.env["demand"]) + 2)] # demand units list of numbers
sunits = [units for units in range(len(self.env["supply"]) + 1)] # supply units list of numbers
munits = [units for units in range(max(len(dunits), len(sunits)))] # maximum units list of numbers
self.calc_equilibrium()
"""
Then plot the curves
"""
demand_values = self.env["dem"]
supply_costs = self.env["sup"]
plt.step(dunits, demand_values, label='Demand') # generate the demand plot
plt.step(sunits, supply_costs, label='Supply') # generate the supply plot
eq_price_high = self.env["eq"]["price_high"]
eq_price_low = self.env["eq"]["price_low"]
if eq_price_high != eq_price_low:
plt.plot(munits, [eq_price_high for x in munits], label='Price High') # High Price Line
plt.plot(munits, [eq_price_low for x in munits], label='Price Low') # Low Price Line
else:
plt.plot(munits, [eq_price_high for x in munits], label='Price') # Just one price
plt.legend(bbox_to_anchor=(0.65, 0.98)) # places a legend on the plot
plt.title('Supply and Demand') # add the title
plt.xlabel('Units') # add the x axis label
plt.ylabel('$') # add the y axis label
#plt.show(block=False)
pass
def calc_equilibrium(self):
# make demand values
max_value = 0
for index in self.env["demand"]:
if index[1] > max_value: # find the maximum value
max_value = index[1]
demand_values = [max_value + 1] # note first element is just used to create upper range in graph
for index in self.env["demand"]: # get demand tuples
demand_values.append(index[1]) # and pull out second element to get value
demand_values.append(0) # put a zero value at the end to pull graph down to x axes
# make suppl values the same way
supply_costs = [0] # note first elemnt is used to create lower range of supply values
for index in self.env["supply"]: # get supply tupples
supply_costs.append(index[1]) # and pull out second element to get cost
self.env["dem"] = demand_values
self.env["sup"] = supply_costs
# calculate equilibrium and maximum surplus
# note supply and demand schedules can be different lengths
min_length = min(len(self.env["demand"]), len(self.env["supply"])) + 1
max_length = max(len(self.env["demand"]), len(self.env["supply"])) + 1
# now make equilibrium calculations
# TODO need to test for supply and dmeand not crossing
# this can happen at beginning or at end
#
max_surplus = 0 # max_surplus is the area under the supply and demand up to equilibrium
eq_units = 0 # this is the maximum number of units that can sell
for unit in range(1, min_length): # only go as far as shortest schedule
if demand_values[unit] >= supply_costs[unit]: # As long as value is above or equal to cost
eq_units = eq_units + 1 # unit should sell in equilibrium
max_surplus = max_surplus + demand_values[unit] - supply_costs[unit] # add surplus
last_accepted_value = demand_values[unit] # update last accepted value
last_accepted_cost = supply_costs[unit] # update first rejected
else: # now value is below cost
first_rejected_value = demand_values[unit] # calculate first rejected value
first_rejected_cost = supply_costs[unit] # calculate first rejected cost
break # exit loop we are done here
# Now caluclate equilibrium price range
eq_price_high = min(last_accepted_value, first_rejected_cost)
eq_price_low = max(last_accepted_cost, first_rejected_value)
self.env["eq"]["price_high"] = eq_price_high
self.env["eq"]["price_low"] = eq_price_low
self.env["eq"]["quantity"] = eq_units
self.env["eq"]["surplus"] = max_surplus
def show_equilibrium(self):
# Print out market equilibrium numbers
print()
print("----- Equilibrium -----")
print("When market {} is in equilibrium we have:".format(self.name))
print("equilibrium price = {} - {}".format(self.env["eq"]["price_low"], self.env["eq"]["price_high"]))
print("equilibrium quantity = {}".format(self.env["eq"]["quantity"]))
print("maximum surplus = {}".format(self.env["eq"]["surplus"]))
print(" ")
def get_equilibrium(self):
pl = self.env["eq"]["price_low"]
ph = self.env["eq"]["price_high"]
qt = self.env["eq"]["quantity"]
ms = self.env["eq"]["surplus"]
return (pl, ph, qt, ms)
def save_file(self, path):
# write out "env" as .csv file
output_file = open(path + '.csv', 'w', newline='')
output_writer = csv.writer(output_file)
# First write out number of buyers and number of sellers
output_writer.writerow([self.num_buyers, self.num_sellers])
# Second write out buyer information
for buyer in range(self.num_buyers):
buyer_id = "buyer" + str(buyer)
output_writer.writerow(self.env["buyers"][buyer_id])
# Third write out seller information
for seller in range(self.num_sellers):
seller_id = "seller" + str(seller)
output_writer.writerow(self.env["sellers"][seller_id])
# Fourth write out supply and demand curves with id's
# Write as two lists
self.make_supply()
s = []
for element in self.env["supply"]:
s.append(element[0])
s.append(element[1])
print(s)
output_writer.writerow(s)
self.make_demand()
s = []
for element in self.env["demand"]:
s.append(element[0])
s.append(element[1])
print(s)
output_writer.writerow(s)
# Make equilibrium calculations
self.calc_equilibrium()
# Fifth write out supply and demand without id's
output_writer.writerow(self.env["sup"])
output_writer.writerow(self.env["dem"])
# Sixth write out equilibrium values
output_writer.writerow([self.env["eq"]["price_high"],
self.env["eq"]["price_low"],
self.env["eq"]["quantity"],
self.env["eq"]["surplus"]])
# Thats it for now
output_file.close()
def load_file(self, path):
# load a .csv file
try:
input_file = open(path + '.csv')
input_reader = csv.reader(input_file)
env_data = list(input_reader)
# Process num_buyers and num_sellers (First)
line = 0
self.num_buyers = int(env_data[line][0])
self.num_sellers = int(env_data[line][1])
# Process buyer values (Second)
for buyer in range(self.num_buyers):
line = 1 + buyer
values = [int(x) for x in env_data[line]] # have to convert back to integers
buyer_id = "buyer" + str(buyer)
self.env["buyers"][buyer_id] = values
# Process seller costs (Third)
for seller in range(self.num_sellers):
line = 1 + self.num_buyers + seller
costs = [int(x) for x in env_data[line]] # have to convert back to integers
seller_id = "seller" + str(seller)
self.env["sellers"][seller_id] = costs
# Process supply and demand curves with id's (Fourth)
line = 1 + self.num_buyers + self.num_sellers
remake = []
for i in range(0, len(env_data[line]), 2):
e1 = env_data[line][i]
e2 = int(env_data[line][i + 1])
remake.append((e1, e2))
self.env["supply"] = remake
#
remake = []
for i in range(0, len(env_data[line + 1]), 2):
e1 = env_data[line + 1][i]
e2 = int(env_data[line + 1][i + 1])
remake.append((e1, e2))
self.env["demand"] = remake
# Process supply and demand curves without id's (Fifth)
self.env["sup"] = [int(x) for x in env_data[line + 2]]
self.env["dem"] = [int(x) for x in env_data[line + 3]]
# Process equilibrium values
self.env["eq"]["price_high"] = int(env_data[line + 4][0])
self.env["eq"]["price_low"] = int(env_data[line + 4][1])
self.env["eq"]["quantity"] = int(env_data[line + 4][2])
self.env["eq"]["surplus"] = int(env_data[line + 4][3])
except OSError as err:
print("File {} does not exist".format(path))
def prepare_market(self, input_path, input_file):
self.load_file(input_path + input_file)
#self.plot_supply_demand()
self.show_participants()
self.show_equilibrium()
if __name__ == "__main__":
# This code shows some basic usage of BuildMarkeEnv
mkt = BuildMarketEnv("test", 2, 3)
mkt.show()
mkt.add_buyer(0, [200, 100, 50])
mkt.add_buyer(1, [150, 125, 75])
mkt.add_seller(0, [50, 75, 125])
mkt.add_seller(1, [25, 65, 100])
mkt.add_seller(2, [60, 70, 150])
mkt.show_participants()
mkt.make_demand()
mkt.make_supply()
mkt.list_supply_demand()
mkt.calc_equilibrium()
mkt.show_equilibrium()
print(mkt.get_buyer_values(0))
print(mkt.get_seller_costs(0))
#mkt.plot_supply_demand()
# methods not shown
# load_file(path)
# save_file(path)
# prepare market(input_path, input_file)
| 40.619048 | 115 | 0.572978 |
7945c69a908f8be0428401aac3511ee89a5857ca | 1,135 | py | Python | wagtail/wagtailimages/migrations/0006_add_verbose_names.py | seddonym/wagtail-tableblock | aea3ce67a0800285b20b93018b7c0a8679e479b7 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/migrations/0006_add_verbose_names.py | seddonym/wagtail-tableblock | aea3ce67a0800285b20b93018b7c0a8679e479b7 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/migrations/0006_add_verbose_names.py | seddonym/wagtail-tableblock | aea3ce67a0800285b20b93018b7c0a8679e479b7 | [
"BSD-3-Clause"
] | 1 | 2019-03-05T15:37:22.000Z | 2019-03-05T15:37:22.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0005_make_filter_spec_unique'),
]
operations = [
migrations.AlterField(
model_name='image',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
migrations.AlterField(
model_name='image',
name='height',
field=models.IntegerField(verbose_name='Height', editable=False),
),
migrations.AlterField(
model_name='image',
name='uploaded_by_user',
field=models.ForeignKey(
blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True,
verbose_name='Uploaded by user'
),
),
migrations.AlterField(
model_name='image',
name='width',
field=models.IntegerField(verbose_name='Width', editable=False),
),
]
| 29.102564 | 85 | 0.587665 |
7945c7c421e2b8762fb580b5a41c5af1f7808cf5 | 1,518 | py | Python | logging_utils.py | yawana1/GrapeGS | 2878b9c3619128dbbc3730d712798200127a03cb | [
"MIT"
] | null | null | null | logging_utils.py | yawana1/GrapeGS | 2878b9c3619128dbbc3730d712798200127a03cb | [
"MIT"
] | null | null | null | logging_utils.py | yawana1/GrapeGS | 2878b9c3619128dbbc3730d712798200127a03cb | [
"MIT"
] | null | null | null | '''
Created on Jun 29, 2017
@author: Alex Martelli
'''
import sys
import traceback
import logging
_verbose = False
def setup_logging_to_file(filename, verbose = False):
if verbose:
logging.basicConfig( filename=filename,
filemode='w',
level=logging.DEBUG,
format= '%(asctime)s - %(levelname)s - %(message)s',
)
else:
logging.basicConfig( filename=filename,
filemode='w',
level=logging.ERROR,
format= '%(asctime)s - %(levelname)s - %(message)s',
)
_verbose = verbose
def extract_function_name():
"""
Extracts failing function name from Traceback
by Alex Martelli
http://stackoverflow.com/questions/2380073/\how-to-identify-what-function-call-raise-an-exception-in-python
"""
tb = sys.exc_info()[-1]
stk = traceback.extract_tb(tb, 1)
fname = stk[0][3]
return fname
def log_exception(e):
logging.error(
"Function {function_name} raised {exception_class} ({exception_docstring}): {exception_message}".format(
function_name = extract_function_name(), #this is optional
exception_class = e.__class__,
exception_docstring = e.__doc__,
exception_message = e.message))
def log_info(msg):
logging.info(msg)
print (msg)
def log_warn(msg):
logging.warn(msg)
print (msg)
| 28.111111 | 115 | 0.579051 |
7945c8a9ceac7f6c0b5e35ab8034c54543903e03 | 2,054 | py | Python | examples/example2.py | jorgehatccrma/pygrfnn | c67cb30c5cde579796ccbacc6338eb0631e81f6e | [
"BSD-3-Clause"
] | 7 | 2015-10-01T12:54:11.000Z | 2018-09-27T04:10:49.000Z | examples/example2.py | jorgehatccrma/pygrfnn | c67cb30c5cde579796ccbacc6338eb0631e81f6e | [
"BSD-3-Clause"
] | null | null | null | examples/example2.py | jorgehatccrma/pygrfnn | c67cb30c5cde579796ccbacc6338eb0631e81f6e | [
"BSD-3-Clause"
] | 3 | 2015-10-01T12:54:14.000Z | 2018-11-15T13:35:21.000Z | # 0. Preliminares
import sys
sys.path.append('../') # needed to run the examples from within the package folder
import numpy as np
from pygrfnn import Zparam, GrFNN, Model, make_connections
from pygrfnn.vis import plot_connections
from pygrfnn.vis import tf_detail
from pygrfnn.vis import GrFNN_RT_plot
# 1. Create Stimulus: Complex sinusoid
sr = 4000.0 # sample rate
dt = 1.0/sr
t = np.arange(0, 1, dt)
fc = 100.0 # frequency
A = 0.025 # amplitude
s = A * np.exp(1j * 2 * np.pi * fc * t)
# ramp signal linearly up/down
ramp_dur = 0.01 # in secs
ramp = np.arange(0, 1, dt / ramp_dur)
env = np.ones(s.shape, dtype=float)
env[0:len(ramp)] = ramp
env[-len(ramp):] = ramp[::-1]
# apply envelope
s = s * env
# plot stimulus
import matplotlib.pyplot as plt
plt.ion()
plt.plot(t, np.real(s))
plt.plot(t, np.imag(s))
plt.title('Stimulus')
# 2. Make the GrFNN model
# Explore different parameter sets
params1 = Zparam(0.01,-1.,-10., 0., 0., 1.) # Linear
params2 = Zparam( -1., 4., -3., 0., 0., 1.) # Critical
# Create the GrFNNs
layer1 = GrFNN(params1,
frequency_range=(50,200),
num_oscs=200,
stimulus_conn_type='active')
layer2 = GrFNN(params2,
frequency_range=(50,200),
num_oscs=200)
# create a connection matrix
# C = make_connections(layer1, layer2, 1, 1.005, self_connect=True)
C = np.eye(len(layer2.f), len(layer1.f))
# Make the model
model = Model()
model.add_layer(layer1, input_channel=0) # layer one will receive the external stimulus
model.add_layer(layer2) # layer 2 is a hidden layer (no external input)
# connect the layers
conn = model.connect_layers(layer1, layer2, C, '1freq', self_connect=True)
plot_connections(conn, title='Connection matrix (abs)')
# prepare real-time plots
GrFNN_RT_plot(layer1, update_interval=0.005, title='First Layer')
GrFNN_RT_plot(layer2, update_interval=0.005, title='Second Layer')
# 3. Run the model
model.run(s, t, dt)
## Profile
# cmd = "model.run(s, t, dt)"
# import cProfile
# results = cProfile.run(cmd) | 24.452381 | 88 | 0.680623 |
7945c8ff697bdb164efa0e447776fb04b5b8a6c5 | 8,081 | py | Python | src/encoded/types/quality_metric.py | KCL-ORG/encoded | 5a1904e948bfd652e8a8d52c6717d7fc0b56b681 | [
"MIT"
] | null | null | null | src/encoded/types/quality_metric.py | KCL-ORG/encoded | 5a1904e948bfd652e8a8d52c6717d7fc0b56b681 | [
"MIT"
] | null | null | null | src/encoded/types/quality_metric.py | KCL-ORG/encoded | 5a1904e948bfd652e8a8d52c6717d7fc0b56b681 | [
"MIT"
] | null | null | null | from snovault import (
abstract_collection,
collection,
calculated_property,
load_schema,
)
from snovault.attachment import ItemWithAttachment
from .base import (
Item,
)
from .shared_calculated_properties import CalculatedAssayTermID
@abstract_collection(
name='quality-metrics',
properties={
'title': "Quality metrics",
'description': 'Listing of all types of quality metric.',
})
class QualityMetric(ItemWithAttachment, CalculatedAssayTermID, Item):
base_types = ['QualityMetric'] + Item.base_types
set_status_up = [
'step_run',
]
set_status_down = []
@collection(
name='star-quality-metrics',
properties={
'title': "STAR mapping Quality Metrics",
'description': 'A set of QC metrics from STAR RNA-seq mapping',
})
class StarQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'star_quality_metric'
schema = load_schema('encoded:schemas/star_quality_metric.json')
@collection(
name='bismark-quality-metrics',
properties={
'title': "Bismark (WGBS) mapping quality metrics",
'description': 'A set of QC metrics from Bismark mapping for WGBS',
})
class BismarkQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'bismark_quality_metric'
schema = load_schema('encoded:schemas/bismark_quality_metric.json')
@collection(
name='cpg-correlation-quality-metrics',
properties={
'title': "WGBS replicate correlation CpG quality metrics",
'description': 'A set of QC metrics from WGBS replicate CpG correlations',
})
class CpgCorrelationQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'cpg_correlation_quality_metric'
schema = load_schema('encoded:schemas/cpg_correlation_quality_metric.json')
@collection(
name='chipseq-filter-quality-metrics',
properties={
'title': "Quality metrics for ChIP-seq (filtering step)",
'description': 'A set of QC metrics used ChIP-seq experiments (filtering step)',
})
class ChipSeqFilterQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'chipseq_filter_quality_metric'
schema = load_schema('encoded:schemas/chipseq_filter_quality_metric.json')
@collection(
name='correlation-quality-metrics',
properties={
'title': "Correlation of two replicate datasets",
'description': 'Correlation QC metrics for two replicate sets of items',
})
class CorrelationQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'correlation_quality_metric'
schema = load_schema('encoded:schemas/correlation_quality_metric.json')
@collection(
name='edwbamstats-quality-metrics',
properties={
'title': "Mapping Quality Metrics from 'edwBamStats'",
'description': "A set of mapping QC metrics from 'edwBamStats'",
})
class EdwbamstatsQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'edwbamstats_quality_metric'
schema = load_schema('encoded:schemas/edwbamstats_quality_metric.json')
@collection(
name='hotspot-quality-metrics',
properties={
'title': "Peak Quality Metrics from the 'HotSpot' package",
'description': "A set of peak QC metrics from the 'HotSpot' package",
})
class HotspotQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'hotspot_quality_metric'
schema = load_schema('encoded:schemas/hotspot_quality_metric.json')
@collection(
name='idr-summary-quality-metrics',
properties={
'title': "Irreproducible Discovery Rate (IDR) Summary Quality Metrics",
'description': "A set of Peak Replicate QC metrics from 'idr'",
})
class IdrSummaryQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'idr_summary_quality_metric'
schema = load_schema('encoded:schemas/idr_summary_quality_metric.json')
@collection(
name='mad-quality-metrics',
properties={
'title': "Replicate Concordance Metrics using Mean Absolute Deviation (MAD)",
'description': 'A set of QC metrics comparing two quantificiations '
'from replicates',
})
class MadQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'mad_quality_metric'
schema = load_schema('encoded:schemas/mad_quality_metric.json')
@collection(
name='complexity-xcorr-quality-metrics',
properties={
'title': "Quality Metrics for library complexity and cross-correlation of Mapping Sample",
'description': 'A set of sampled mapping QC metrics',
})
class ComplexityXcorrQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'complexity_xcorr_quality_metric'
schema = load_schema('encoded:schemas/complexity_xcorr_quality_metric.json')
@collection(
name='duplicates-quality-metrics',
properties={
'title': "Quality Metrics for duplicates as counted by Picard (non-UMI) or stampipes (UMI).",
'description': "A set of duplicate read QC metrics as detected by 'picard mark_duplicates' or 'stampipes mark_umi_dups'",
})
class DuplicatesQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'duplicates_quality_metric'
schema = load_schema('encoded:schemas/duplicates_quality_metric.json')
@collection(
name='filtering-quality-metrics',
properties={
'title': "Read Filtering Quality Metrics",
'description': 'QC metrics documenting bam file read filtering',
})
class FilteringQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'filtering_quality_metric'
schema = load_schema('encoded:schemas/filtering_quality_metric.json')
@collection(
name='trimming-quality-metrics',
properties={
'title': "Read Trimming Quality Metrics",
'description': 'QC metrics for documenting fastq file read trimming',
})
class TrimmingQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'trimming_quality_metric'
schema = load_schema('encoded:schemas/trimming_quality_metric.json')
@collection(
name='samtools-flagstats-quality-metrics',
properties={
'title': "Mapping Quality Metrics from 'samtools --flagstats'",
'description': "A set of mapping QC metrics from 'samtools --flagstats'",
})
class SamtoolsFlagstatsQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'samtools_flagstats_quality_metric'
schema = load_schema('encoded:schemas/samtools_flagstats_quality_metric.json')
@collection(
name='samtools-stats-quality-metrics',
properties={
'title': "Mapping Quality Metrics from the Summary of 'samtools --stats'",
'description': "A set of mapping QC metrics from 'samtools --stats'",
})
class SamtoolsStatsQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'samtools_stats_quality_metric'
schema = load_schema('encoded:schemas/samtools_stats_quality_metric.json')
@collection(
name='idr-quality-metrics',
properties={
'title': "IDR Metrics",
'description': "Quality metrics from Irreproducible Discovery Rate (IDR) analysis",
})
class IDRQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'idr_quality_metric'
schema = load_schema('encoded:schemas/idr_quality_metric.json')
@collection(
name='histone-chipseq-quality-metrics',
properties={
'title': "Histone ChIP-seq Quality Metrics",
'description': "Quality metrics from histone ChIP-seq peak overlap analysis",
})
class HistoneChipSeqQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'histone_chipseq_quality_metric'
schema = load_schema('encoded:schemas/histone_chipseq_quality_metric.json')
@collection(
name='generic-quality-metrics',
properties={
'title': "Generic Quality Metric",
'description': "Generic quality metric",
})
class GenericQualityMetric(QualityMetric, CalculatedAssayTermID):
item_type = 'generic_quality_metric'
schema = load_schema('encoded:schemas/generic_quality_metric.json')
| 35.756637 | 129 | 0.727756 |
7945c9ffad7cbe0025e8f6e28818662a599046fe | 7,864 | py | Python | src/m6_mutation.py | luffmama/16-SequencesAndMutation | 6669f0d99cf0159dd0dfbd1529921511f80ea4ec | [
"MIT"
] | null | null | null | src/m6_mutation.py | luffmama/16-SequencesAndMutation | 6669f0d99cf0159dd0dfbd1529921511f80ea4ec | [
"MIT"
] | null | null | null | src/m6_mutation.py | luffmama/16-SequencesAndMutation | 6669f0d99cf0159dd0dfbd1529921511f80ea4ec | [
"MIT"
] | null | null | null | """
This module lets you practice MUTATION of lists.
In this module, you mutate by CHANGING elements of a list.
Authors: David Mutchler, Amanda Stouder, Chandan Rupakheti, Katie Dion,
Claude Anderson, Delvin Defoe, Curt Clifton, their colleagues,
and Margaret Luffman.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# ----------------------------------------------------------------------
# STUDENTS:
# Be sure to READ and RUN the examples in the preceding files.
# Be sure to understand those examples BEFORE doing these exercises!
# ----------------------------------------------------------------------
def main():
run_test_RETURN_replace_negatives_by_zeros()
run_test_MUTATE_replace_negatives_by_zeros()
def run_test_RETURN_replace_negatives_by_zeros():
""" Tests the RETURN_replace_negatives_by_zeros function. """
print()
print('------------------------------------------')
print('Testing RETURN_replace_negatives_by_zeros:')
print('------------------------------------------')
# ------------------------------------------------------------------
# Test 1:
# ------------------------------------------------------------------
run_test_number = 1
original_argument = [-30.2, 50, 12.5, -1, -5, 8, 0]
correct_argument_value_after_function_call = original_argument.copy()
correct_returned_value = [0, 50, 12.5, 0, 0, 8, 0]
run_test(RETURN_replace_negatives_by_zeros,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
# ------------------------------------------------------------------
# Test 2:
# ------------------------------------------------------------------
run_test_number = 2
original_argument = [2, 0, -9, 1, -30]
correct_argument_value_after_function_call = original_argument.copy()
correct_returned_value = [2, 0, 0, 1, 0]
run_test(RETURN_replace_negatives_by_zeros,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
def run_test(function_to_test, argument, run_test_number,
correct_returned_value,
correct_argument_value_after_function_call):
"""
Runs a test, by sending the given function the given argument.
The function should return the given correct_returned_value.
After the function call, the argument should equal the given
correct_argument_value_after_function_call.
Prints messages to indicate whether the test passed or failed.
"""
print()
print('Running TEST {}:'.format(run_test_number, run_test_number))
actual_returned_value = function_to_test(argument)
passed_check1 = check_returned_value(actual_returned_value,
correct_returned_value)
passed_check2 = check_argument(argument,
correct_argument_value_after_function_call)
if passed_check1 and passed_check2:
print(' Your code PASSES Test {}.'.format(run_test_number))
def check_returned_value(actual_returned_value, correct_returned_value):
"""
Checks whether the two given returned-values are equal.
If so, returns True.
If not, prints an appropriate message and returns False.
"""
if actual_returned_value == correct_returned_value:
return True
else:
print(' Your code FAILS this test')
print(' because it returns the wrong value:')
print(' -- The correct returned value is:',
correct_returned_value)
print(' -- Your code returned this value:',
actual_returned_value)
return False
def check_argument(actual_argument_value, correct_argument_value):
"""
Checks whether the two given argument-values are equal.
If so, returns True.
If not, prints an appropriate message and returns False.
"""
if actual_argument_value == correct_argument_value:
return True
else:
print(' Your code FAILS this test because the argument')
print(' has the wrong value after the function call:')
print(' -- The correct value after the function call is:',
correct_argument_value)
print(' -- Your actual value after the function call is:',
actual_argument_value)
return False
def RETURN_replace_negatives_by_zeros(numbers):
"""
RETURNs a NEW list that is the same as the given list of numbers,
but with each negative number in the list replaced by zero.
For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0].
then the returned list is the NEW list [0, 50, 12.5, 0, 0, 8, 0].
This function must NOT mutate the given list.
Precondition: The argument is a list of numbers.
"""
# DONE: 2. First, READ THE ABOVE TEST CODE.
# Make sure that you understand it.
# Then, IMPLEMENT and test THIS FUNCTION
# (using the above code for testing).
newnum = []
for k in range(len(numbers)):
if numbers[k] > 0:
newnum = newnum + [numbers[k]]
else:
newnum = newnum + [0]
return newnum
def run_test_MUTATE_replace_negatives_by_zeros():
""" Tests the MUTATE_replace_negatives_by_zeros function. """
print()
print('------------------------------------------')
print('Testing MUTATE_replace_negatives_by_zeros:')
print('------------------------------------------')
# ------------------------------------------------------------------
# Test 1:
# ------------------------------------------------------------------
run_test_number = 1
original_argument = [-30.2, 50, 12.5, -1, -5, 8, 0]
correct_argument_value_after_function_call = [0, 50, 12.5, 0, 0, 8, 0]
correct_returned_value = None
run_test(MUTATE_replace_negatives_by_zeros,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
# ------------------------------------------------------------------
# Test 2:
# ------------------------------------------------------------------
run_test_number = 2
original_argument = [2, 0, -9, 1, -30]
correct_argument_value_after_function_call = [2, 0, 0, 1, 0]
correct_returned_value = None
run_test(MUTATE_replace_negatives_by_zeros,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
def MUTATE_replace_negatives_by_zeros(numbers):
"""
MUTATES the given list of numbers so that
each negative number in the list is replaced by zero
(and non-negative numbers are left unchanged).
For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0].
then that list is MUTATED to become [0, 50, 12.5, 0, 0, 8, 0].
This function must NOT use any additional lists beyond the given
list and must NOT return anything (other than the default None).
Precondition: The argument is a list of numbers.
"""
# DONE: 3. First, READ THE ABOVE TEST CODE.
# Make sure that you understand it.
# Then, IMPLEMENT and test THIS FUNCTION
# (using the above code for testing).
for k in range(len(numbers)):
if numbers[k] < 0:
numbers[k] = 0
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| 37.990338 | 78 | 0.575407 |
7945ca1d7e3e163652a85f24c15137e1082df56e | 3,674 | py | Python | dace/codegen/prettycode.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T13:36:34.000Z | 2022-03-11T13:36:34.000Z | dace/codegen/prettycode.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | null | null | null | dace/codegen/prettycode.py | Walon1998/dace | 95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Code I/O stream that automates indentation and mapping of code to SDFG
nodes. """
import inspect
from six import StringIO
from dace.config import Config
class CodeIOStream(StringIO):
""" Code I/O stream that automates indentation and mapping of code to SDFG
nodes. """
def __init__(self, base_indentation=0):
super(CodeIOStream, self).__init__()
self._indent = 0
self._spaces = int(Config.get('compiler', 'indentation_spaces'))
self._lineinfo = Config.get_bool('compiler', 'codegen_lineinfo')
def write(self, contents, sdfg=None, state_id=None, node_id=None):
# Delete single trailing newline, as this will be implicitly inserted
# anyway
if contents:
if contents[-1] == "\n":
lines = contents[:-1].split("\n")
else:
lines = contents.split('\n')
else:
lines = contents
# If SDFG/state/node location is given, annotate this line
if sdfg is not None:
location_identifier = ' ////__DACE:%d' % sdfg.sdfg_id
if state_id is not None:
location_identifier += ':' + str(state_id)
if node_id is not None:
if not isinstance(node_id, list):
node_id = [node_id]
for i, nid in enumerate(node_id):
if not isinstance(nid, int):
node_id[i] = sdfg.nodes()[state_id].node_id(nid)
location_identifier += ':' + ','.join([str(nid) for nid in node_id])
else:
location_identifier = ''
# Annotate code generator line
if self._lineinfo:
caller = inspect.getframeinfo(inspect.stack()[1][0])
location_identifier += f' ////__CODEGEN;{caller.filename};{caller.lineno}'
# Write each line separately
for line in lines:
opening_braces = line.count('{')
closing_braces = line.count('}')
# Count closing braces before opening ones (e.g., for "} else {")
first_opening_brace = line.find('{')
initial_closing_braces = 0
if first_opening_brace > 0:
initial_closing_braces = line[:first_opening_brace].count('}')
closing_braces -= initial_closing_braces
brace_balance = opening_braces - closing_braces
# Write line and then change indentation
if initial_closing_braces > 0:
self._indent -= initial_closing_braces
if brace_balance < 0:
self._indent += brace_balance
codeline = self._indent * self._spaces * ' ' + line.strip()
# Location identifier is written at character 81 and on, find out
# how many spaces we need to add for that
loc_spaces = max(80 - len(codeline), 2)
if location_identifier != '':
super(CodeIOStream, self).write(codeline + loc_spaces * ' ' + location_identifier + '\n')
else: # avoid ending spaces (useful for OpenCL and multiline macros)
super(CodeIOStream, self).write(codeline + '\n')
if brace_balance > 0:
self._indent += brace_balance
# If indentation failed, warn user
if self._indent < -1:
super(CodeIOStream, self).write('///WARNING: Indentation failure! This probably ' +
'indicates an error in the SDFG.\n')
self._indent = 0
| 41.75 | 105 | 0.56859 |
7945cba57dd076965b718874a6cfc33d41b5cb60 | 1,589 | py | Python | sample/basic/basic_file_report_example.py | ndcolter-mcafee/opendxl-virustotal-service-python | c65515d9b9c91bca08d5024c27b593ee07a22f92 | [
"Apache-2.0"
] | 6 | 2017-05-04T18:42:44.000Z | 2021-03-23T13:46:03.000Z | sample/basic/basic_file_report_example.py | ndcolter-mcafee/opendxl-virustotal-service-python | c65515d9b9c91bca08d5024c27b593ee07a22f92 | [
"Apache-2.0"
] | 1 | 2018-07-30T17:01:25.000Z | 2018-07-30T20:15:15.000Z | sample/basic/basic_file_report_example.py | ndcolter-mcafee/opendxl-virustotal-service-python | c65515d9b9c91bca08d5024c27b593ee07a22f92 | [
"Apache-2.0"
] | 10 | 2017-08-01T00:01:35.000Z | 2019-02-27T21:40:51.000Z | # This sample invokes and displays the results of a VirusTotal "file report" via DXL.
#
# See: https://www.virustotal.com/en/documentation/public-api/#getting-file-scans
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from dxlclient.client_config import DxlClientConfig
from dxlclient.client import DxlClient
from dxlclient.message import Message, Request
from dxlbootstrap.util import MessageUtils
# Import common logging and configuration
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from common import *
# Configure local logger
logging.getLogger().setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
# Create DXL configuration from file
config = DxlClientConfig.create_dxl_config_from_file(CONFIG_FILE)
# Create the client
with DxlClient(config) as client:
# Connect to the fabric
client.connect()
logger.info("Connected to DXL fabric.")
# Invoke 'file report' method on service
request_topic = "/opendxl-virustotal/service/vtapi/file/report"
req = Request(request_topic)
MessageUtils.dict_to_json_payload(req, {"resource": "7657fcb7d772448a6d8504e4b20168b8"})
res = client.sync_request(req, timeout=30)
if res.message_type != Message.MESSAGE_TYPE_ERROR:
# Display results
res_dict = MessageUtils.json_payload_to_dict(res)
print(MessageUtils.dict_to_json(res_dict, pretty_print=True))
else:
print("Error invoking service with topic '{0}': {1} ({2})".format(
request_topic, res.error_message, res.error_code))
| 33.808511 | 92 | 0.759597 |
7945cbdd8dca1e08689b86cff06faacf31a725c1 | 2,046 | py | Python | gewittergefahr/gg_utils/conus_boundary_test.py | dopplerchase/GewitterGefahr | 4415b08dd64f37eba5b1b9e8cc5aa9af24f96593 | [
"MIT"
] | 26 | 2018-10-04T01:07:35.000Z | 2022-01-29T08:49:32.000Z | gewittergefahr/gg_utils/conus_boundary_test.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 4 | 2017-12-25T02:01:08.000Z | 2018-12-19T01:54:21.000Z | gewittergefahr/gg_utils/conus_boundary_test.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 11 | 2017-12-10T23:05:29.000Z | 2022-01-29T08:49:33.000Z | """Unit tests for conus_boundary.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import conus_boundary
QUERY_LATITUDES_DEG = numpy.array([
33.7, 42.6, 39.7, 34.9, 40.2, 33.6, 36.4, 35.1, 30.8, 47.4, 44.2, 45.1,
49.6, 38.9, 35.0, 38.1, 40.7, 47.1, 30.2, 39.2
])
QUERY_LONGITUDES_DEG = numpy.array([
276.3, 282.7, 286.6, 287.5, 271.0, 266.4, 258.3, 257.3, 286.8, 235.0, 273.5,
262.5, 277.2, 255.3, 271.8, 254.3, 262.1, 247.8, 262.9, 251.6
])
IN_CONUS_FLAGS = numpy.array(
[1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1], dtype=bool
)
class ConusBoundaryTests(unittest.TestCase):
"""Each method is a unit test for conus_boundary.py."""
def test_find_points_in_conus_no_shortcuts(self):
"""Ensures correct output from find_points_in_conus.
In this case, does not use shortcuts.
"""
conus_latitudes_deg, conus_longitudes_deg = (
conus_boundary.read_from_netcdf()
)
these_flags = conus_boundary.find_points_in_conus(
conus_latitudes_deg=conus_latitudes_deg,
conus_longitudes_deg=conus_longitudes_deg,
query_latitudes_deg=QUERY_LATITUDES_DEG,
query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=False)
self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))
def test_find_points_in_conus_with_shortcuts(self):
"""Ensures correct output from find_points_in_conus.
In this case, uses shortcuts.
"""
conus_latitudes_deg, conus_longitudes_deg = (
conus_boundary.read_from_netcdf()
)
these_flags = conus_boundary.find_points_in_conus(
conus_latitudes_deg=conus_latitudes_deg,
conus_longitudes_deg=conus_longitudes_deg,
query_latitudes_deg=QUERY_LATITUDES_DEG,
query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=True)
self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))
if __name__ == '__main__':
unittest.main()
| 32.47619 | 80 | 0.673021 |
7945cbdec870e2d52c62d4f64be13d19c9c2e223 | 3,397 | py | Python | processing/simulate_signals.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | null | null | null | processing/simulate_signals.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:21:18.000Z | 2021-11-30T16:21:18.000Z | processing/simulate_signals.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | null | null | null | ####----------------------- IMPORTS ------------------- ######
import numpy as np
from processing.stft import get_freq_index, Stft, Properties
####--------------------------------------------------- ######
class SimSignal(Properties):
""" Simulate eeg/lgp signals
"""
def __init__(self, properties:dict, time_duration:np.ndarray):
# pass parameters to object
super().__init__(properties)
self.time_duration = time_duration
# create time vector
self.t = np.arange(0, self.time_duration, 1/self.sampling_rate)
def make_sine(self, freq:float, amp:float):
return np.sin(freq*self.t*np.pi*2) * amp
def make_sine_norm(self, freq:float, amp:float, rhythm):
"""
Create normally distributed sine wave
Parameters
----------
amp : float, amplitude
freq : float, frequency
rhythm : float, rhythmicity 0 to inf
Returns
-------
signal : np.ndarray, 1d signal
"""
# create template wave
template = np.sin(freq*self.t[0:int(np.ceil(self.sampling_rate/freq))+1]*np.pi*2)
# create normaly distributed events
mu = 1/freq
sigma = mu/(rhythm + (1^-10))
n_events = int(np.ceil(self.t.shape[0]/template.shape[0]))
s = np.random.normal(mu, sigma, int(n_events *1.2))
# get inter event interval and find index
isi = np.cumsum(s)
index = get_freq_index(self.t, isi)
# create logic vector to be convolved
logic_vector = np.zeros(self.t.shape)
logic_vector[index] = 1
# return convolved signal
return np.convolve(logic_vector, template, mode = 'same') * amp
def add_sines(self, freq:list, amp:list):
"""
Add multiple sine waves
Parameters
----------
freq : list
amp : list
Returns
-------
signal : np.ndarray, 1d signal
"""
signal = np.zeros(self.t.shape)
for f,a in zip(freq, amp):
signal += self.make_sine(f, a)
return signal
def add_sines_norm(self, freq:list, amp:list, rhythm:list):
"""
Add multiple sine waves
Parameters
----------
freq : list
amp : list
Returns
-------
signal : np.ndarray, 1d signal
"""
signal = np.zeros(self.t.shape)
for i in range(len(freq)):
signal += self.make_sine_norm(freq[i], amp[i], rhythm[i])
return signal
if __name__ == '__main__':
from matplotlib.pyplot import plot
properties = {'sampling_rate':4000, 'fft_win':5, 'freq_range': [5, 121],
'fft_overlap':0.5, 'mains_noise': [59, 61]}
freq = [10, 60]
amp = [5, 10]
rhythm = [15, 15]
time_duration = 30 # in seconds
obj = SimSignal(properties, time_duration)
signal = obj.add_sines_norm(freq, amp, rhythm) + obj.add_sines([60], [3])
# plot(signal)
# get power
stft_obj = Stft(properties)
freq_vector, pmat = stft_obj.get_stft(signal)
# plot original
plot(freq_vector, np.mean(pmat, axis = 1))
# remove noise
pmat = stft_obj.remove_mains(freq_vector, pmat)
# plot noise removed PSD
plot(freq_vector, np.mean(pmat, axis = 1))
| 24.615942 | 89 | 0.543126 |
7945cbf9ea279ffd72d320cb9709cf7516dec7ed | 3,475 | py | Python | MC-MPC/scripts/plots.py | Anna-Kuosmanen/DAGChainer | 2e095e30dd9b158563c05d088443d6b548eeb870 | [
"MIT"
] | 9 | 2018-04-18T12:48:49.000Z | 2022-03-23T20:53:30.000Z | MC-MPC/scripts/plots.py | Anna-Kuosmanen/DAGChainer | 2e095e30dd9b158563c05d088443d6b548eeb870 | [
"MIT"
] | null | null | null | MC-MPC/scripts/plots.py | Anna-Kuosmanen/DAGChainer | 2e095e30dd9b158563c05d088443d6b548eeb870 | [
"MIT"
] | 3 | 2019-04-10T13:02:47.000Z | 2022-03-23T20:54:02.000Z | import matplotlib.pyplot as plt
import math
import time
import solvers
import timeit
TEST_GRAPH_NAME = "test_graph"
def plot(k_values, n_values , m_values, title, filename):
plt.clf()
m = 0.5
results_decomp = [0]*len(values)
results_normal = [0]*len(values)
index = 0
for n in n_values:
solvers.generate_k_path_graph(k_values[index], n, m_values[index], TEST_GRAPH_NAME)
#solvers.generate_dag(n, 0.9, TEST_GRAPH_NAME)
print "GRAPH OF SIZE: " +str(n)
start = time.time()
decomposed_sum = solvers.solve_with_decomposition(TEST_GRAPH_NAME)
print decomposed_sum
decomp_end = time.time()
normal_sum = solvers.solve_without_decomposition(TEST_GRAPH_NAME)
print normal_sum
end = time.time()
results_decomp[index] = decomp_end - start
print "decomp: " + str(results_decomp[index])
results_normal[index] = end - decomp_end
print "normal: " + str(results_normal[index])
index += 1
plt.plot(n_values, results_decomp, 'r', label="with decomposition")
plt.plot(n_values, results_normal, 'b', label="without decomposition")
plt.ylabel("time in seconds")
plt.xlabel("n (path length)")
plt.title(title)
plt.legend()
plt.savefig(filename)
def plot_alt(k_values, n_values , m_values, title, filename):
plt.clf()
m = 0.5
results_decomp = [0]*len(values)
results_normal = [0]*len(values)
index = 0
for n in n_values:
solvers.generate_k_path_graph_alt(k_values[index], n, m_values[index], TEST_GRAPH_NAME)
#solvers.generate_dag(n, 0.9, TEST_GRAPH_NAME)
print "GRAPH OF SIZE: " +str(n)
start = time.time()
decomposed_sum = solvers.solve_with_decomposition(TEST_GRAPH_NAME)
print decomposed_sum
decomp_end = time.time()
normal_sum = solvers.solve_without_decomposition(TEST_GRAPH_NAME)
print normal_sum
end = time.time()
results_decomp[index] = decomp_end - start
print "decomp: " + str(results_decomp[index])
results_normal[index] = end - decomp_end
print "normal: " + str(results_normal[index])
index += 1
plt.plot(n_values, results_decomp, 'r', label="with decomposition")
plt.plot(n_values, results_normal, 'b', label="without decomposition")
plt.ylabel("time in seconds")
plt.xlabel("n (path length)")
plt.title(title)
plt.legend()
plt.savefig(filename)
values = [500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000]
#values = [10, 100]
def k_10():
k_values = len(values)*[10]
plot(k_values, values, values, "k = 10, m = n", "k10.png")
def k_logn():
k_values = map(lambda n: max(10, math.ceil(math.log(n))), values)
plot(k_values, values, values, "k = log(n), m = n", "klogn.png")
def k_sqrt():
k_values = map(lambda n: max(10, math.ceil(math.sqrt(n))), values)
plot(k_values, values, values, "k = sqrt(n), m = n", "ksqrtn.png")
def k_div10():
k_values = map(lambda n: n/10, values)
plot(k_values, values, values, "k = n/10, m = n", "kdiv10.png")
def k_10_alt():
k_values = len(values)*[10]
plot_alt(k_values, values, values, "k = 10, m = n", "k10_alt.png")
def k_sqrt_alt():
k_values = map(lambda n: max(10, math.ceil(math.sqrt(n))), values)
plot_alt(k_values, values, values, "k = sqrt(n), m = n", "ksqrtn_alt.png")
def k_div10_alt():
k_values = map(lambda n: n/10, values)
plot_alt(k_values, values, values, "k = n/10, m = n", "k_div10_alt.png")
k_10()
k_sqrt()
k_10_alt()
k_sqrt_alt()
k_div10()
k_div10_alt() | 32.476636 | 129 | 0.686043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.