max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
test/dialect/mssql/test_compiler.py | gujun4990/sqlalchemy | 1 | 1000 | # -*- encoding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import schema
from sqlalchemy.sql import table, column, quoted_name
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import mxodbc
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import sql
from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
update, delete, insert, extract, union, func, PrimaryKeyConstraint, \
UniqueConstraint, Index, Sequence, literal
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_select(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select(),
'SELECT sometable.somecolumn FROM sometable')
def test_select_with_nolock(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)')
def test_select_with_nolock_schema(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', Integer),
schema='test_schema')
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT test_schema.sometable.somecolumn '
'FROM test_schema.sometable WITH (NOLOCK)')
def test_select_w_order_by_collate(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', String))
self.assert_compile(
select([t]).
order_by(
t.c.somecolumn.collate("Latin1_General_CS_AS_KS_WS_CI").asc()),
"SELECT sometable.somecolumn FROM sometable "
"ORDER BY sometable.somecolumn COLLATE "
"Latin1_General_CS_AS_KS_WS_CI ASC"
)
def test_join_with_hint(self):
t1 = table('t1',
column('a', Integer),
column('b', String),
column('c', String),
)
t2 = table('t2',
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
join = t1.join(t2, t1.c.a == t2.c.a).\
select().with_hint(t1, 'WITH (NOLOCK)')
self.assert_compile(
join,
'SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c '
'FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a'
)
def test_insert(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.insert(),
'INSERT INTO sometable (somecolumn) VALUES '
'(:somecolumn)')
def test_update(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.update(t.c.somecolumn == 7),
'UPDATE sometable SET somecolumn=:somecolum'
'n WHERE sometable.somecolumn = '
':somecolumn_1', dict(somecolumn=10))
def test_insert_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.insert().
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"INSERT INTO sometable WITH (PAGLOCK) "
"(somecolumn) VALUES (:somecolumn)"
)
def test_update_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"UPDATE sometable WITH (PAGLOCK) "
"SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_update_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("XYZ", "mysql"),
"UPDATE sometable SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.delete().where(t.c.somecolumn == "q").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"DELETE FROM sometable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.delete().
where(t.c.somecolumn == "q").
with_hint("XYZ", dialect_name="mysql"),
"DELETE FROM sometable WHERE "
"sometable.somecolumn = :somecolumn_1"
)
def test_delete_extra_froms(self):
t1 = table('t1', column('c1'))
t2 = table('t2', column('c1'))
q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1"
)
def test_delete_extra_froms_alias(self):
a1 = table('t1', column('c1')).alias('a1')
t2 = table('t2', column('c1'))
q = sql.delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM a1 FROM t1 AS a1, t2 WHERE a1.c1 = t2.c1"
)
self.assert_compile(sql.delete(a1), "DELETE FROM t1 AS a1")
def test_update_from_hint(self):
t = table('sometable', column('somecolumn'))
t2 = table('othertable', column('somecolumn'))
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == t2.c.somecolumn).
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=t2,
dialect_name=darg),
"UPDATE sometable SET somecolumn=:somecolumn "
"FROM sometable, othertable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = othertable.somecolumn"
)
def test_update_to_select_schema(self):
meta = MetaData()
table = Table(
"sometable", meta,
Column("sym", String),
Column("val", Integer),
schema="schema"
)
other = Table(
"#other", meta,
Column("sym", String),
Column("newval", Integer)
)
stmt = table.update().values(
val=select([other.c.newval]).
where(table.c.sym == other.c.sym).as_scalar())
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"(SELECT [#other].newval FROM [#other] "
"WHERE [schema].sometable.sym = [#other].sym)",
)
stmt = table.update().values(val=other.c.newval).\
where(table.c.sym == other.c.sym)
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"[#other].newval FROM [schema].sometable, "
"[#other] WHERE [schema].sometable.sym = [#other].sym",
)
# TODO: not supported yet.
# def test_delete_from_hint(self):
# t = table('sometable', column('somecolumn'))
# t2 = table('othertable', column('somecolumn'))
# for darg in ("*", "mssql"):
# self.assert_compile(
# t.delete().where(t.c.somecolumn==t2.c.somecolumn).
# with_hint("WITH (PAGLOCK)",
# selectable=t2,
# dialect_name=darg),
# ""
# )
def test_strict_binds(self):
"""test the 'strict' compiler binds."""
from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
mxodbc_dialect = mxodbc.dialect()
mxodbc_dialect.statement_compiler = MSSQLStrictCompiler
t = table('sometable', column('foo'))
for expr, compile in [
(
select([literal("x"), literal("y")]),
"SELECT 'x' AS anon_1, 'y' AS anon_2",
),
(
select([t]).where(t.c.foo.in_(['x', 'y', 'z'])),
"SELECT sometable.foo FROM sometable WHERE sometable.foo "
"IN ('x', 'y', 'z')",
),
(
t.c.foo.in_([None]),
"sometable.foo IN (NULL)"
)
]:
self.assert_compile(expr, compile, dialect=mxodbc_dialect)
def test_in_with_subqueries(self):
"""Test removal of legacy behavior that converted "x==subquery"
to use IN.
"""
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().where(t.c.somecolumn
== t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn = '
'(SELECT sometable.somecolumn FROM '
'sometable)')
self.assert_compile(t.select().where(t.c.somecolumn
!= t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn != '
'(SELECT sometable.somecolumn FROM '
'sometable)')
@testing.uses_deprecated
def test_count(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.count(),
'SELECT count(sometable.somecolumn) AS '
'tbl_row_count FROM sometable')
def test_noorderby_insubquery(self):
"""test that the ms-sql dialect removes ORDER BY clauses from
subqueries"""
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
q = select([table1.c.myid],
order_by=[table1.c.myid]).alias('foo')
crit = q.c.myid == table1.c.myid
self.assert_compile(select(['*'], crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid")
def test_force_schema_quoted_name_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_name_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("Foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="[Foo.dbo]"
)
self.assert_compile(
select([tbl]),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test"
)
def test_schema_autosplit_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="foo.dbo"
)
self.assert_compile(
select([tbl]),
"SELECT foo.dbo.test.id FROM foo.dbo.test"
)
def test_schema_autosplit_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="Foo.dbo"
)
self.assert_compile(
select([tbl]),
"SELECT [Foo].dbo.test.id FROM [Foo].dbo.test"
)
def test_owner_database_pairs(self):
dialect = mssql.dialect()
for identifier, expected_schema, expected_owner in [
("foo", None, "foo"),
("foo.bar", "foo", "bar"),
("Foo.Bar", "Foo", "Bar"),
("[Foo.Bar]", None, "Foo.Bar"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat"),
]:
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
def test_delete_schema(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM paj.test WHERE paj.test.id = '
':id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM paj.test WHERE paj.test.id IN '
'(SELECT paj.test.id FROM paj.test '
'WHERE paj.test.id = :id_1)')
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer,
primary_key=True),
schema='banana.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id IN (SELECT banana.paj.test.id '
'FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1)')
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema='banana split.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id IN ('
'SELECT [banana split].paj.test.id FROM '
'[banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1)')
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True),
schema='banana split.paj with a space')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].[paj with a '
'space].test WHERE [banana split].[paj '
'with a space].test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id IN "
"(SELECT [banana split].[paj with a space].test.id "
"FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id = :id_1)"
)
def test_union(self):
t1 = table(
't1', column('col1'), column('col2'),
column('col3'), column('col4'))
t2 = table(
't2', column('col1'), column('col2'),
column('col3'), column('col4'))
s1, s2 = select(
[t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(['t2col2r2', 't2col2r3']))
u = union(s1, s2, order_by=['col3', 'col4'])
self.assert_compile(u,
'SELECT t1.col3 AS col3, t1.col4 AS col4 '
'FROM t1 WHERE t1.col2 IN (:col2_1, '
':col2_2) UNION SELECT t2.col3 AS col3, '
't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
'(:col2_3, :col2_4) ORDER BY col3, col4')
self.assert_compile(u.alias('bar').select(),
'SELECT bar.col3, bar.col4 FROM (SELECT '
't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
'WHERE t1.col2 IN (:col2_1, :col2_2) UNION '
'SELECT t2.col3 AS col3, t2.col4 AS col4 '
'FROM t2 WHERE t2.col2 IN (:col2_3, '
':col2_4)) AS bar')
def test_function(self):
self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)')
self.assert_compile(func.current_time(), 'CURRENT_TIME')
self.assert_compile(func.foo(), 'foo()')
m = MetaData()
t = Table(
'sometable', m, Column('col1', Integer), Column('col2', Integer))
self.assert_compile(select([func.max(t.c.col1)]),
'SELECT max(sometable.col1) AS max_1 FROM '
'sometable')
def test_function_overrides(self):
self.assert_compile(func.current_date(), "GETDATE()")
self.assert_compile(func.length(3), "LEN(:length_1)")
def test_extract(self):
t = table('t', column('col1'))
for field in 'day', 'month', 'year':
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART(%s, t.col1) AS anon_1 FROM t' % field)
def test_update_returning(self):
table1 = table(
'mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)))
u = update(
table1,
values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name')
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description')
u = update(
table1,
values=dict(
name='foo')).returning(table1).where(table1.c.name == 'bar')
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description WHERE mytable.name = '
':name_1')
u = update(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'LEN(inserted.name) AS length_1')
def test_delete_returning(self):
table1 = table(
'mytable', column('myid', Integer),
column('name', String(128)), column('description', String(128)))
d = delete(table1).returning(table1.c.myid, table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name')
d = delete(table1).where(table1.c.name == 'bar'
).returning(table1.c.myid,
table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name WHERE mytable.name = :name_1')
def test_insert_returning(self):
table1 = table(
'mytable', column('myid', Integer),
column('name', String(128)), column('description', String(128)))
i = insert(
table1,
values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name VALUES '
'(:name)')
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description VALUES (:name)')
i = insert(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'LEN(inserted.name) AS length_1 VALUES '
'(:name)')
def test_limit_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(10)
self.assert_compile(
s,
"SELECT TOP 10 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
def test_limit_zero_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(0)
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
def test_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).offset(20)
# test that the select is not altered with subsequent compile
# calls
for i in range(2):
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y "
"AS y, ROW_NUMBER() OVER (ORDER BY t.y) AS "
"mssql_rn FROM t WHERE t.x = :x_1) AS "
"anon_1 WHERE mssql_rn > :param_1",
checkparams={'param_1': 20, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
def test_limit_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
assert t.c.y in set(c._create_result_map()['y'][1])
def test_limit_offset_w_ambiguous_cols(self):
t = table('t', column('x', Integer), column('y', Integer))
cols = [t.c.x, t.c.x.label('q'), t.c.x.label('p'), t.c.y]
s = select(cols).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.q, anon_1.p, anon_1.y "
"FROM (SELECT t.x AS x, t.x AS q, t.x AS p, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 4)
result_map = c._create_result_map()
for col in cols:
is_(result_map[col.key][1][0], col)
def test_limit_offset_with_correlated_order_by(self):
t1 = table('t1', column('x', Integer), column('y', Integer))
t2 = table('t2', column('x', Integer), column('y', Integer))
order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
.limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t1.x AS x, t1.y AS y, "
"ROW_NUMBER() OVER (ORDER BY "
"(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
") AS mssql_rn "
"FROM t1 "
"WHERE t1.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t1.c.x in set(c._create_result_map()['x'][1])
assert t1.c.y in set(c._create_result_map()['y'][1])
def test_offset_dont_misapply_labelreference(self):
m = MetaData()
t = Table('t', m, Column('x', Integer))
expr1 = func.foo(t.c.x).label('x')
expr2 = func.foo(t.c.x).label('y')
stmt1 = select([expr1]).order_by(expr1.desc()).offset(1)
stmt2 = select([expr2]).order_by(expr2.desc()).offset(1)
self.assert_compile(
stmt1,
"SELECT anon_1.x FROM (SELECT foo(t.x) AS x, "
"ROW_NUMBER() OVER (ORDER BY foo(t.x) DESC) AS mssql_rn FROM t) "
"AS anon_1 WHERE mssql_rn > :param_1"
)
self.assert_compile(
stmt2,
"SELECT anon_1.y FROM (SELECT foo(t.x) AS y, "
"ROW_NUMBER() OVER (ORDER BY foo(t.x) DESC) AS mssql_rn FROM t) "
"AS anon_1 WHERE mssql_rn > :param_1"
)
def test_limit_zero_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(0).offset(0)
# render the LIMIT of zero, but not the OFFSET
# of zero, so produces TOP 0
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
def test_primary_key_no_identity(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=False,
primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL, "
"PRIMARY KEY (id))"
)
def test_primary_key_defaults_to_identity(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1), "
"PRIMARY KEY (id))"
)
def test_identity_no_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1)"
")"
)
def test_identity_separate_from_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=False,
primary_key=True),
Column('x', Integer, autoincrement=True)
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL, "
"x INTEGER NOT NULL IDENTITY(1,1), "
"PRIMARY KEY (id))"
)
def test_identity_illegal_two_autoincrements(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=True),
Column('id2', Integer, autoincrement=True),
)
# this will be rejected by the database, just asserting this is what
# the two autoincrements will do right now
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1), "
"id2 INTEGER NOT NULL IDENTITY(1,1))"
)
def test_identity_start_0(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, mssql_identity_start=0,
primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), "
"PRIMARY KEY (id))"
)
def test_identity_increment_5(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, mssql_identity_increment=5,
primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,5), "
"PRIMARY KEY (id))"
)
def test_sequence_start_0(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', 0), primary_key=True))
with testing.expect_deprecated(
"Use of Sequence with SQL Server in order to affect "):
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), "
"PRIMARY KEY (id))"
)
def test_sequence_non_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', start=5),
primary_key=False))
with testing.expect_deprecated(
"Use of Sequence with SQL Server in order to affect "):
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(5,1))"
)
def test_sequence_ignore_nullability(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', start=5),
nullable=True))
with testing.expect_deprecated(
"Use of Sequence with SQL Server in order to affect "):
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(5,1))"
)
def test_table_pkc_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY CLUSTERED (x, y))"
)
def test_table_pkc_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY NONCLUSTERED (x, y))"
)
def test_table_idx_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False)
)
idx = Index("myidx", tbl.c.x, tbl.c.y, mssql_clustered=False)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE NONCLUSTERED INDEX myidx ON test (x, y)"
)
def test_table_uc_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
UniqueConstraint("x", "y", mssql_clustered=False))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NULL, y INTEGER NULL, "
"UNIQUE NONCLUSTERED (x, y))"
)
def test_table_uc_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NULL, "
"PRIMARY KEY (x), UNIQUE CLUSTERED (y))"
)
def test_index_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer))
idx = Index("foo", tbl.c.id, mssql_clustered=True)
self.assert_compile(schema.CreateIndex(idx),
"CREATE CLUSTERED INDEX foo ON test (id)"
)
def test_index_ordering(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x.desc(), "y")
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x DESC, y)"
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer)
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)"
)
def test_drop_index_w_schema(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer),
schema='bar'
)
self.assert_compile(
schema.DropIndex(Index("idx_foo", t1.c.x)),
"DROP INDEX idx_foo ON bar.foo"
)
def test_index_extra_include_1(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=['y'])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_extra_include_2(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=[tbl.c.y])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
class SchemaTest(fixtures.TestBase):
def setup(self):
t = Table('sometable', MetaData(),
Column('pk_column', Integer),
Column('test_column', String)
)
self.column = t.c.test_column
dialect = mssql.dialect()
self.ddl_compiler = dialect.ddl_compiler(dialect,
schema.CreateTable(t))
def _column_spec(self):
return self.ddl_compiler.get_column_specification(self.column)
def test_that_mssql_default_nullability_emits_null(self):
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_none_nullability_does_not_emit_nullability(self):
self.column.nullable = None
eq_("test_column VARCHAR(max)", self._column_spec())
def test_that_mssql_specified_nullable_emits_null(self):
self.column.nullable = True
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_specified_not_nullable_emits_not_null(self):
self.column.nullable = False
eq_("test_column VARCHAR(max) NOT NULL", self._column_spec())
| 2.296875 | 2 |
sdk/python/pulumi_kubernetes/coordination/v1/_inputs.py | polivbr/pulumi-kubernetes | 277 | 1001 | <reponame>polivbr/pulumi-kubernetes
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'LeaseSpecArgs',
'LeaseArgs',
]
@pulumi.input_type
class LeaseSpecArgs:
def __init__(__self__, *,
acquire_time: Optional[pulumi.Input[str]] = None,
holder_identity: Optional[pulumi.Input[str]] = None,
lease_duration_seconds: Optional[pulumi.Input[int]] = None,
lease_transitions: Optional[pulumi.Input[int]] = None,
renew_time: Optional[pulumi.Input[str]] = None):
"""
LeaseSpec is a specification of a Lease.
:param pulumi.Input[str] acquire_time: acquireTime is a time when the current lease was acquired.
:param pulumi.Input[str] holder_identity: holderIdentity contains the identity of the holder of a current lease.
:param pulumi.Input[int] lease_duration_seconds: leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
:param pulumi.Input[int] lease_transitions: leaseTransitions is the number of transitions of a lease between holders.
:param pulumi.Input[str] renew_time: renewTime is a time when the current holder of a lease has last updated the lease.
"""
if acquire_time is not None:
pulumi.set(__self__, "acquire_time", acquire_time)
if holder_identity is not None:
pulumi.set(__self__, "holder_identity", holder_identity)
if lease_duration_seconds is not None:
pulumi.set(__self__, "lease_duration_seconds", lease_duration_seconds)
if lease_transitions is not None:
pulumi.set(__self__, "lease_transitions", lease_transitions)
if renew_time is not None:
pulumi.set(__self__, "renew_time", renew_time)
@property
@pulumi.getter(name="acquireTime")
def acquire_time(self) -> Optional[pulumi.Input[str]]:
"""
acquireTime is a time when the current lease was acquired.
"""
return pulumi.get(self, "acquire_time")
@acquire_time.setter
def acquire_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acquire_time", value)
@property
@pulumi.getter(name="holderIdentity")
def holder_identity(self) -> Optional[pulumi.Input[str]]:
"""
holderIdentity contains the identity of the holder of a current lease.
"""
return pulumi.get(self, "holder_identity")
@holder_identity.setter
def holder_identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "holder_identity", value)
@property
@pulumi.getter(name="leaseDurationSeconds")
def lease_duration_seconds(self) -> Optional[pulumi.Input[int]]:
"""
leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
"""
return pulumi.get(self, "lease_duration_seconds")
@lease_duration_seconds.setter
def lease_duration_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lease_duration_seconds", value)
@property
@pulumi.getter(name="leaseTransitions")
def lease_transitions(self) -> Optional[pulumi.Input[int]]:
"""
leaseTransitions is the number of transitions of a lease between holders.
"""
return pulumi.get(self, "lease_transitions")
@lease_transitions.setter
def lease_transitions(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lease_transitions", value)
@property
@pulumi.getter(name="renewTime")
def renew_time(self) -> Optional[pulumi.Input[str]]:
"""
renewTime is a time when the current holder of a lease has last updated the lease.
"""
return pulumi.get(self, "renew_time")
@renew_time.setter
def renew_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "renew_time", value)
@pulumi.input_type
class LeaseArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['LeaseSpecArgs']] = None):
"""
Lease defines a lease concept.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['LeaseSpecArgs'] spec: Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'coordination.k8s.io/v1')
if kind is not None:
pulumi.set(__self__, "kind", 'Lease')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['LeaseSpecArgs']]:
"""
Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['LeaseSpecArgs']]):
pulumi.set(self, "spec", value)
| 2.125 | 2 |
Components/Align All Components.py | davidtahim/Glyphs-Scripts | 1 | 1002 | <reponame>davidtahim/Glyphs-Scripts<filename>Components/Align All Components.py
#MenuTitle: Align All Components
# -*- coding: utf-8 -*-
__doc__="""
Fakes auto-alignment in glyphs that cannot be auto-aligned.
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
thisFontMasterID = thisFont.selectedFontMaster.id # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def process( thisLayer ):
advance = 0.0
for thisComponent in thisLayer.components:
thisComponent.position = NSPoint( advance, 0.0 )
advance += thisComponent.component.layers[thisFontMasterID].width
thisLayer.width = advance
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
print "Aligning components in:", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| 2.328125 | 2 |
SC101Lecture_code/SC101_week4/draw_basic.py | Jewel-Hong/SC-projects | 0 | 1003 | <gh_stars>0
#!/usr/bin/env python3
"""
Stanford CS106AP
TK Drawing Lecture Exercises
Courtesy of <NAME>
"""
import tkinter as tk
# provided function, this code is complete
def make_canvas(width, height):
"""
Creates and returns a drawing canvas
of the given int size, ready for drawing.
"""
top = tk.Tk()
top.minsize(width=width + 10, height=height + 10)
canvas = tk.Canvas(top, width=width, height=height)
canvas.pack()
canvas.xview_scroll(6, "units") # hack so (0, 0) works correctly
canvas.yview_scroll(6, "units")
return canvas
def main():
w = make_canvas(1000, 500)
w.create_line(0, 0, 1000, 500, width=5, fill='red')
w.create_text(0, 0, text='SC101', anchor=tk.NW, font='times 80')
tk.mainloop() #告訴電腦不要關掉視窗
if __name__ == '__main__':
main()
| 4.03125 | 4 |
audio/audio_server.py | artigianitecnologici/marrtino_apps | 0 | 1004 | <reponame>artigianitecnologici/marrtino_apps<gh_stars>0
# Only PCM 16 bit wav 44100 Hz - Use audacity or sox to convert audio files.
# WAV generation
# Synth
# sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bip.wav synth 0.25 sine 800
# sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bop.wav synth 0.25 sine 400
# Voices
# pico2wave -l "it-IT" -w start.wav "Bene! Si Parte!"
# Then convert wav files to to 44100 Hz
# Note: some initial sound may not be played.
# alsaaudio examples
# https://larsimmisch.github.io/pyalsaaudio/libalsaaudio.html
import threading
import time
import socket
import sys, os, platform
import re
import wave
import argparse
import rospy
use_sound_play = False
use_alsaaudio = True
try:
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
except:
print('ROS package sound_play required.')
print('Install with: sudo apt-get install ros-kinetic-audio-common libasound2')
use_sound_play = False
#sys.exit(0)
try:
import sox
except:
print('sox required. Install with: pip install --user sox')
sys.exit(0)
try:
import alsaaudio
except:
print('alsaaudio required. Install with: pip install --user pyalsaaudio')
use_alsaaudio = False
#sys.exit(0)
from asr_server import ASRServer
SOUNDS_DIR = "sounds/" # dir with sounds
soundfile = None # sound file
tts_server = None
asr_server = None
def TTS_callback(in_data, frame_count, time_info, status):
global soundfile
if (soundfile==None):
return (None, True)
else:
data = soundfile.readframes(frame_count)
return (data, pyaudio.paContinue)
class TTSServer(threading.Thread):
def __init__(self, port, output_device):
global use_alsaaudio, use_sound_play
threading.Thread.__init__(self)
# Initialize audio player
self.streaming = False
self.output_device = output_device
self.soundhandle = None
m = platform.machine()
print "Machine type:" , m
if (m[0:3]=='arm'):
use_sound_play = False
if (use_sound_play):
os.system('roslaunch sound_play.launch &')
time.sleep(5)
rospy.init_node('sound_client', disable_signals=True)
use_alsaaudio = False
elif (use_alsaaudio):
self.init_alsaaudio()
else:
print('Cannot initializa audio interface')
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.settimeout(3)
# Bind the socket to the port
server_address = ('', port)
self.sock.bind(server_address)
self.sock.listen(1)
print "TTS Server running on port ", port, " ..."
self.dorun = True
self.connection = None
# Dictionary of sounds
self.Sounds = {}
self.Sounds['bip'] = wave.open(SOUNDS_DIR+'bip.wav', 'rb')
self.idcache = 0
def init_alsaaudio(self):
print("Audio devices available")
pp = alsaaudio.pcms()
if (self.output_device=='sysdefault'):
# select proper sysdefault name
for l in pp:
print(' %s' %l)
if (l[0:10]=='sysdefault'):
print "choose ",l
self.output_device = l # choose default device
break
print("Audio device used: %s" %self.output_device)
self.aa_stream = None
retry = 3
while retry>0:
try:
self.aa_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, self.output_device)
retry = 0
except Exception as e:
print(e)
retry -= 1
time.sleep(2)
if self.aa_stream == None:
retry = 3
while retry>0:
try:
self.output_device='default'
print("Audio device used: %s" %self.output_device)
self.aa_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, self.output_device)
retry = 0
except Exception as e:
print(e)
retry -= 1
time.sleep(2)
self.audio_rate = 44100
self.periodsize = self.audio_rate / 8
if self.aa_stream != None:
self.aa_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
self.aa_stream.setchannels(1)
self.aa_stream.setrate(self.audio_rate)
self.aa_stream.setperiodsize(self.periodsize)
def stop(self):
self.dorun = False
def connect(self):
connected = False
while (self.dorun and not connected):
try:
# print 'Waiting for a connection ...'
# Wait for a connection
self.connection, client_address = self.sock.accept()
self.connection.settimeout(3) # timeout when listening (exit with CTRL+C)
connected = True
print 'TTS Server Connection from ', client_address
except:
pass #print "Listen again ..."
def reply(self,mstr):
if (self.connection != None):
try:
mstr = mstr.encode('utf-8')
self.connection.send(mstr+'\n\r')
except:
print('Connection closed')
def setVolume(self,volperc): # volume in percentag [0-100]
cmdstr = 'amixer set PCM %d%%' %volperc
os.system(cmdstr)
def run(self):
global asr_server
if (use_sound_play and self.soundhandle == None):
self.soundhandle = SoundClient()
time.sleep(3)
self.setVolume(99) # set volume (99% = +3 dB)
#print('bip')
#self.play('bip')
#time.sleep(3)
self.say('Hello!', 'en')
self.say('Audio server is running.', 'en')
time.sleep(3)
while (self.dorun):
self.connect()
try:
# Receive the data in small chunks
while (self.dorun):
try:
data = self.connection.recv(320)
data = data.strip()
except socket.timeout:
data = "***"
except:
data = None
if (data!=None and data !="" and data!="***"):
if data!="ASR":
print 'TTS Received [%s]' % data
if (data.startswith('TTS')):
lang = 'en-US' # default language
strsay = data[4:]
if (data[3]=='['):
vd = re.split('\[|\]',data)
lang = vd[1]
strsay = vd[2]
self.say(strsay,lang)
self.reply('OK')
elif (data=="ASR"):
#print('asr request')
bh = asr_server.get_asr()
self.reply(bh)
if bh!='':
print('ASR sent [%s]' %bh)
elif (data.startswith('SOUND')):
self.play(data[6:]) # play this sound
self.reply('OK')
#print 'sending data back to the client'
#self.connection.sendall("OK")
else:
print('Message not understood: %s' %data)
self.reply('ERR')
elif (data == None or data==""):
break
finally:
print 'TTS Server Connection closed.'
# Clean up the connection
if (self.connection != None):
self.connection.close()
self.connection = None
self.say('Audio server has been closed.', 'en')
time.sleep(2)
self.aa_stream = None
def say(self, data, lang):
print 'Say ',data
if (use_sound_play):
voice = 'voice_kal_diphone'
volume = 1.0
print 'Saying: %s' % data
print 'Voice: %s' % voice
print 'Volume: %s' % volume
self.soundhandle.say(data, voice, volume)
rospy.sleep(3)
elif (use_alsaaudio):
cachefile = 'cache'+str(self.idcache)
self.idcache = (self.idcache+1)%10
tmpfile = "/tmp/cache.wav"
ofile = "%s%s.wav" %(SOUNDS_DIR, cachefile)
cmd = 'rm %s %s' %(tmpfile, ofile)
os.system(cmd)
if (lang=='en'):
lang = 'en-US'
elif (len(lang)==2):
lang = lang+'-'+lang.upper()
time.sleep(0.2)
cmd = 'pico2wave -l "%s" -w %s " , %s"' %(lang,tmpfile, data)
print cmd
os.system(cmd)
time.sleep(0.2)
# convert samplerate
tfm = sox.Transformer()
tfm.rate(samplerate=self.audio_rate)
tfm.build(tmpfile, ofile)
time.sleep(0.2)
self.play(cachefile)
else:
print('Cannot play audio. No infrastructure available.')
def play(self, name):
if (use_alsaaudio):
print('Playing %s ...' %name)
soundfile = None
i = 0
while (i<3): #((not name in self.Sounds) and (i<3)):
try:
soundfile = wave.open(SOUNDS_DIR+name+".wav", 'rb')
#self.Sounds[name] = soundfile
except:
print "File %s%s.wav not found." %(SOUNDS_DIR,name)
time.sleep(1)
i += 1
if (soundfile != None and use_alsaaudio): #(name in self.Sounds):
self.playwav_aa(soundfile)
print('Play completed.')
def playwav_aa(self, soundfile):
soundfile.setpos(0)
data = soundfile.readframes(self.periodsize)
while (len(data)>0):
# print('stream data %d' %(len(data)))
if self.aa_stream != None:
self.aa_stream.write(data)
data = soundfile.readframes(self.periodsize)
# def playwav_pa(self, sfile):
# global soundfile
# self.streaming = True
# self.stream = self.pa.open(format = 8, #self.pa.get_format_from_width(f.getsampwidth#()),
# channels = 1, #f.getnchannels(),
# rate = 44100, #f.getframerate(),
# output = True,
# stream_callback = TTS_callback,
# output_device_index = self.output_device)
# soundfile = sfile
# soundfile.setpos(0)
# self.stream.start_stream()
# while self.stream.is_active():
# time.sleep(1.0)
# self.stream.stop_stream()
# self.stream.close()
# self.streaming = False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='audio_server')
parser.add_argument('-ttsport', type=int, help='TTS server port [default: 9001]', default=9001)
parser.add_argument('-asrport', type=int, help='ASR server port [default: 9002]', default=9002)
parser.add_argument('-device', type=str, help='audio device [default: \'sysdefault\']', default='sysdefault')
args = parser.parse_args()
tts_server = TTSServer(args.ttsport,args.device)
asr_server = ASRServer(args.asrport)
tts_server.start()
time.sleep(1)
asr_server.start()
run = True
while (run):
try:
time.sleep(3)
#if (not tts_server.streaming):
# cmd = 'play -n --no-show-progress -r 44100 -c1 synth 0.1 sine 50 vol 0.01' # keep sound alive
# os.system(cmd)
except KeyboardInterrupt:
print "Exit"
run = False
tts_server.stop()
asr_server.stop()
sys.exit(0)
| 2.359375 | 2 |
torchvision/datasets/kinetics.py | sh1doy/vision | 0 | 1005 | from .video_utils import VideoClips
from .utils import list_dir
from .folder import make_dataset
from .vision import VisionDataset
class Kinetics400(VisionDataset):
"""
`Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_
dataset.
Kinetics-400 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (string): Root directory of the Kinetics-400 Dataset.
frames_per_clip (int): number of frames in a clip
step_between_clips (int): number of frames between each clip
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
Returns:
video (Tensor[T, H, W, C]): the `T` video frames
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
label (int): class of the video clip
"""
def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,
extensions=('avi',), transform=None, _precomputed_metadata=None):
super(Kinetics400, self).__init__(root)
extensions = ('avi',)
classes = list(sorted(list_dir(root)))
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
self.classes = classes
video_list = [x[0] for x in self.samples]
self.video_clips = VideoClips(
video_list,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
)
self.transform = transform
def __len__(self):
return self.video_clips.num_clips()
def __getitem__(self, idx):
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[video_idx][1]
if self.transform is not None:
video = self.transform(video)
return video, audio, label
| 3.015625 | 3 |
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py | CharleyFarley/ovvio | 0 | 1006 | <reponame>CharleyFarley/ovvio<gh_stars>0
# -*- coding: utf-8 -*-
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
Build input files for the Qt collection generator.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from six import text_type
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>%(title)s</title>
<homePage>%(homepage)s</homePage>
<startPage>%(startpage)s</startPage>
</assistant>
<docFiles>
<generate>
<file>
<input>%(outname)s.qhp</input>
<output>%(outname)s.qch</output>
</file>
</generate>
<register>
<file>%(outname)s.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
<namespace>%(namespace)s</namespace>
<virtualFolder>doc</virtualFolder>
<customFilter name="%(project)s %(version)s">
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
</customFilter>
<filterSection>
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
<toc>
<section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
</section>
</toc>
<keywords>
%(keywords)s
</keywords>
<files>
%(files)s
</files>
</filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Qt help project, contents and index files.
"""
name = 'qthelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
# self.config.html_style = 'traditional.css'
def handle_finish(self):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
self.info('writing project file...')
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
sections = []
for node in tocdoc.traverse(istoctree):
sections.extend(self.write_toc(node))
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
sections.append(' ' * 4 * 4 + item)
# sections may be unicode strings or byte strings, we have to make sure
# they are all unicode strings before joining them
new_sections = []
for section in sections:
if not isinstance(section, text_type):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
sections = u'\n'.join(new_sections)
# keywords
keywords = []
index = self.env.create_index(self, group_entries=False)
for (key, group) in index:
for title, (refs, subitems, key_) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
keywords = u'\n'.join(keywords)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
staticdir = path.join(outdir, '_static')
imagesdir = path.join(outdir, self.imagedir)
for root, dirs, files in os.walk(outdir):
resourcedir = root.startswith(staticdir) or \
root.startswith(imagesdir)
for fn in files:
if (resourcedir and not fn.endswith('.js')) or \
fn.endswith('.html'):
filename = path.join(root, fn)[olen:]
projectfiles.append(file_template %
{'filename': htmlescape(filename)})
projectfiles = '\n'.join(projectfiles)
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
# dots, are also forbidden
nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
nspace = re.sub(r'\.+', '.', nspace).strip('.')
nspace = nspace.lower()
# write the project file
f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8')
try:
f.write(project_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title),
'version': htmlescape(self.config.version),
'project': htmlescape(self.config.project),
'namespace': htmlescape(nspace),
'masterdoc': htmlescape(self.config.master_doc),
'sections': sections,
'keywords': keywords,
'files': projectfiles})
finally:
f.close()
homepage = 'qthelp://' + posixpath.join(
nspace, 'doc', self.get_target_uri(self.config.master_doc))
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
self.info('writing collection project file...')
f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8')
try:
f.write(collection_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title),
'homepage': htmlescape(homepage),
'startpage': htmlescape(startpage)})
finally:
f.close()
def isdocnode(self, node):
if not isinstance(node, nodes.list_item):
return False
if len(node.children) != 2:
return False
if not isinstance(node.children[0], addnodes.compact_paragraph):
return False
if not isinstance(node.children[0][0], nodes.reference):
return False
if not isinstance(node.children[1], nodes.bullet_list):
return False
return True
def write_toc(self, node, indentlevel=4):
# XXX this should return a Unicode string, not a bytestring
parts = []
if self.isdocnode(node):
refnode = node.children[0][0]
link = refnode['refuri']
title = htmlescape(refnode.astext()).replace('"', '"')
item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link}
parts.append(' '*4*indentlevel + item)
for subnode in node.children[1]:
parts.extend(self.write_toc(subnode, indentlevel+1))
parts.append(' '*4*indentlevel + '</section>')
elif isinstance(node, nodes.list_item):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"', '"')
item = section_template % {'title': title, 'ref': link}
item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
return parts
def keyword_item(self, name, ref):
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
shortname = groupdict['title']
id = groupdict.get('id')
# descr = groupdict.get('descr')
if shortname.endswith('()'):
shortname = shortname[:-2]
id = '%s.%s' % (id, shortname)
else:
id = None
if id:
item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
name, id, ref[1])
else:
item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
item.encode('ascii', 'xmlcharrefreplace')
return item
def build_keywords(self, title, refs, subitems):
keywords = []
title = htmlescape(title)
# if len(refs) == 0: # XXX
# write_param('See Also', title)
if len(refs) == 1:
keywords.append(self.keyword_item(title, refs[0]))
elif len(refs) > 1:
for i, ref in enumerate(refs): # XXX
# item = (' '*12 +
# '<keyword name="%s [%d]" ref="%s"/>' % (
# title, i, ref))
# item.encode('ascii', 'xmlcharrefreplace')
# keywords.append(item)
keywords.append(self.keyword_item(title, ref))
if subitems:
for subitem in subitems:
keywords.extend(self.build_keywords(subitem[0], subitem[1], []))
return keywords
| 1.59375 | 2 |
scripts/scrape_sciencedirect_urls.py | UWPRG/BETO2020 | 4 | 1007 | """
This code is used to scrape ScienceDirect of publication urls and write them to
a text file in the current directory for later use.
"""
import selenium
from selenium import webdriver
import numpy as np
import pandas as pd
import bs4
from bs4 import BeautifulSoup
import time
from sklearn.utils import shuffle
def scrape_page(driver):
"""
This method finds all the publication result web elements on the webpage.
Parameters
----------
driver (Selenium webdriver object) : Instance of the webdriver class e.g.
webdriver.Chrome()
Returns
-------
elems (list) : A list of all scraped hrefs from the page
"""
elems = driver.find_elements_by_class_name('ResultItem')
return elems
def clean(elems):
"""
This method takes a list of scraped selenium web elements
and filters/ returns only the hrefs leading to publications.
Filtering includes removing all urls with keywords that are indicative of
non-html links.
Parameters
----------
elems (list) : The list of hrefs to be filtered
Returns
-------
urls (list) : The new list of hrefs, which should be the same as the list
displayed on gui ScienceDirect
"""
titles = []
urls = []
for elem in elems:
href_child = elem.find_element_by_css_selector('a[href]')
url = href_child.get_attribute('href')
title = href_child.text
titles.append(title)
urls.append(url)
return urls, titles
def build_url_list(gui_prefix,search_terms,journal_list):
"""
This method takes the list of journals and creates a tiple nested dictionary
containing all accessible urls to each page, in each year, for each journal,
for a given search on sciencedirect.
"""
dict1 = {}
years = np.arange(1995,2020)
for journal in journal_list:
dict2 = {}
for year in years:
dict3 = {}
for i in range(60):
url = gui_prefix + search_terms + '&show=100'+ '&articleTypes=FLA%2CREV' + '&years='+ str(year)
if i != 0:
url = url + '&offset=' + str(i) +'00'
url = url + '&pub=' + journal
dict3[i] = url
dict2[year] = dict3
dict1[journal] = dict2
return dict1
def proxify(scraped_urls,uw_prefix):
"""
This method takes a list of scraped urls and turns them into urls that
go through the UW Library proxy so that all of them are full access.
Parameters
----------
scraped_urls (list) : The list of URLs to be converted
uw_prefix (str) : The string that all URLs which go through the UW Library
Proxy start with.
Returns
-------
proxy_urls (list) : The list of converted URLs which go through UW Library
proxy
"""
proxy_urls = []
for url in scraped_urls:
sd_id = url[-17:]
newlink = uw_prefix + sd_id
if sd_id.startswith('S'):
proxy_urls.append(newlink)
return proxy_urls
def write_urls(urls,titles,file,journal,year):
"""
This method takes a list of urls and writes them to a desired text file.
Parameters
----------
urls (list) : The list of URLs to be saved.
file (file object) : The opened .txt file which will be written to.
year (str or int) : The year associated with the publication date.
Returns
-------
Does not return anything
"""
for link,title in zip(urls,titles):
line = link + ',' + title + ',' + journal + ',' + str(year)
file.write(line)
file.write('\n')
def find_pubTitle(driver,journal):
"""
This method finds the identifying number for a specific journal. This
identifying number is added to the gui query URL to ensure only publciations
from the desired journal are being found.
"""
pub_elems = driver.find_elements_by_css_selector('input[id*=publicationTitles]')
pub_names = []
for elem in pub_elems:
pub_name = elem.get_attribute("name")
if pub_name == journal:
return elem.get_attribute('id')[-6:] #returns the identifying number
#for that journal
df = pd.read_excel('elsevier_journals.xls')
df.Full_Category = df.Full_Category.str.lower() # lowercase topics for searching
df = df.drop_duplicates(subset = 'Journal_Title') # drop any duplicate journals
df = shuffle(df,random_state = 42)
# The set of default strings that will be used to sort which journals we want
journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem'
,'organic','polymer','chemical engineering','biotech','coloid']
name = df.Full_Category.str.contains # making this an easier command to type
# new dataframe full of only journals who's topic description contained the
# desired keywords
df2 = df[name('polymer') | name('chemistry') | name('energy')
| name('molecular') | name('colloid') | name('biochem')
| name('organic') | name('biotech') | name('chemical')]
journal_list = df2.Journal_Title # Series of only the journals to be searched
gui_prefix = 'https://www.sciencedirect.com/search/advanced?qs='
search_terms = 'chemistry%20OR%20molecule%20OR%20polymer%20OR%20organic'
url_dict = build_url_list(gui_prefix,search_terms,journal_list)
driver = webdriver.Chrome()
uw_prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/'
filename = input("Input filename with .txt extension for URL storage: ")
url_counter = 0
master_list = []
file = open(filename,'a+')
for journal in journal_list:
for year in np.arange(1995,2020):
for offset in np.arange(60):
page = url_dict[journal][year][offset]
print("journal, year, offset = ",journal,year,offset)
driver.get(page)
time.sleep(2) # need sleep to load the page properly
if offset == 0: # if on page 1, we need to grab the publisher number
try: # we may be at a page which won't have the item we are looking for
pubTitles = find_pubTitle(driver,journal_list[journal_counter])
for url in url_dict[journal]:
url = url + '&pubTitles=' + pubTitles # update every url in the list
driver.get(url_dict[journal][year][0]) # reload the first page with the new url
except:
pass # if there is an exception, it means we are on the right page
scraped_elems = scrape_page(driver) # scrape the page
scraped_urls, titles = clean(scraped_elems)
proxy_urls = proxify(scraped_urls,uw_prefix) # not even sure this is needed
write_urls(proxy_urls,titles,file,journal,year)
url_counter += len(proxy_urls)
print('Total URLs saved is: ',url_counter)
if len(scraped_elems) < 100: # after content is saved, go to the next year
break # because we know this is the last page of urls for this year
file.close()
driver.quit()
| 3.515625 | 4 |
superset/typing.py | GodelTech/superset | 7 | 1008 | <filename>superset/typing.py<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from flask import Flask
from flask_caching import Cache
from werkzeug.wrappers import Response
CacheConfig = Union[Callable[[Flask], Cache], Dict[str, Any]]
DbapiDescriptionRow = Tuple[
str, str, Optional[str], Optional[str], Optional[int], Optional[int], bool
]
DbapiDescription = Union[List[DbapiDescriptionRow], Tuple[DbapiDescriptionRow, ...]]
DbapiResult = Sequence[Union[List[Any], Tuple[Any, ...]]]
FilterValue = Union[datetime, float, int, str]
FilterValues = Union[FilterValue, List[FilterValue], Tuple[FilterValue]]
FormData = Dict[str, Any]
Granularity = Union[str, Dict[str, Union[str, float]]]
AdhocMetric = Dict[str, Any]
Metric = Union[AdhocMetric, str]
OrderBy = Tuple[Metric, bool]
QueryObjectDict = Dict[str, Any]
VizData = Optional[Union[List[Any], Dict[Any, Any]]]
VizPayload = Dict[str, Any]
# Flask response.
Base = Union[bytes, str]
Status = Union[int, str]
Headers = Dict[str, Any]
FlaskResponse = Union[
Response, Base, Tuple[Base, Status], Tuple[Base, Status, Headers],
]
| 1.835938 | 2 |
log_system_information.py | ibaiGorordo/depthai | 476 | 1009 | #!/usr/bin/env python3
import json
import platform
def make_sys_report(anonymous=False, skipUsb=False, skipPackages=False):
def get_usb():
try:
import usb.core
except ImportError:
yield "NoLib"
return
speeds = ["Unknown", "Low", "Full", "High", "Super", "SuperPlus"]
format_hex = lambda val: f"{val:#0{6}x}"
try:
for dev in usb.core.find(find_all=True):
yield {
"port": dev.port_number,
"vendor_id": format_hex(dev.idVendor),
"product_id": format_hex(dev.idProduct),
"speed": speeds[dev.speed] if dev.speed < len(speeds) else dev.speed
}
except usb.core.NoBackendError:
yield "No USB backend found"
result = {
"architecture": ' '.join(platform.architecture()).strip(),
"machine": platform.machine(),
"platform": platform.platform(),
"processor": platform.processor(),
"python_build": ' '.join(platform.python_build()).strip(),
"python_compiler": platform.python_compiler(),
"python_implementation": platform.python_implementation(),
"python_version": platform.python_version(),
"release": platform.release(),
"system": platform.system(),
"version": platform.version(),
"win32_ver": ' '.join(platform.win32_ver()).strip(),
}
if not skipPackages:
from pip._internal.operations.freeze import freeze
result["packages"] = list(freeze(local_only=True))
if not skipUsb:
result["usb"] = list(get_usb())
if not anonymous:
result["uname"] = ' '.join(platform.uname()).strip(),
return result
if __name__ == "__main__":
data = make_sys_report()
with open("log_system_information.json", "w") as f:
json.dump(data, f, indent=4)
print(json.dumps(data, indent=4))
print("System info gathered successfully - saved as \"log_system_information.json\"")
| 2.375 | 2 |
patch.py | silverhikari/romtools | 5 | 1010 | """
Utils for creating xdelta patches.
"""
import logging
from subprocess import check_output, CalledProcessError
from shutil import copyfile
from os import remove, path
class PatchChecksumError(Exception):
def __init__(self, message, errors):
super(PatchChecksumError, self).__init__(message)
class Patch:
# TODO: Abstract out the need for "edited" by just copying the original
# file.
def __init__(self, original, filename, edited=None, xdelta_dir='.'):
self.original = original
self.edited = edited
self.filename = filename
# Need to have this absolute path for xdelta3 to be found.
self.xdelta_path = path.join(xdelta_dir, 'xdelta3')
# self.xdelta_path = 'xdelta3'
def create(self):
if self.edited is None:
raise Exception
cmd = [
self.xdelta_path,
'-f',
'-s',
self.original,
self.edited,
self.filename,
]
print(cmd)
logging.info(cmd)
try:
check_output(cmd)
except CalledProcessError as e:
raise Exception(e.output)
def apply(self):
if not self.edited:
copyfile(self.original, self.original + "_temp")
self.edited = self.original
self.original = self.original + "_temp"
cmd = [
self.xdelta_path,
'-f',
'-d',
'-s',
self.original,
self.filename,
self.edited,
]
logging.info(cmd)
try:
check_output(cmd)
except CalledProcessError:
raise PatchChecksumError('Target file had incorrect checksum', [])
finally:
if self.original.endswith('_temp'):
remove(self.original)
| 2.71875 | 3 |
enigma.py | fewieden/Enigma-Machine | 1 | 1011 | from rotor import Rotor
import sys
import getopt
class Enigma:
def __init__(self, key, rotors):
self.key = list(key)
self.rotors = []
for i in range(0, len(rotors)):
self.rotors.append(Rotor(self.key[i], rotors[i]))
def encrypt(self, word):
cipher = ''
for i, char in enumerate(word.upper()):
distance = self.rotors[i % 2].get_distance(char)
cipher += self.rotors[2].rotate((i + 1) % 2, distance)
return cipher
def decrypt(self, cipher):
word = ''
for i, char in enumerate(cipher.upper()):
distance = self.rotors[2].get_distance(char)
word += self.rotors[i % 2].rotate((i + 1) % 2, distance)
return word
def print_help():
print("\ncommand line arguments:\n" +
"-h/--help: all possible options\n" +
"-k/--key KEY: rotor starting key\n" +
"-p/--phrase Phrase: phrase to encrypt/decrypt\n" +
"-d/--decrypt: enables decrypt default is encrypt\n" +
"--r1 ROTOR: sets rotor 1\n" +
"--r2 ROTOR: sets rotor 2\n" +
"--r3 ROTOR: sets rotor 3\n" +
"possible rotors are 50, 51, 60, 61, 70 and 71\n")
def main(argv):
try:
opts, args = getopt.getopt(argv, "hk:p:d", ["help", "key=", "phrase", "decrypt", "r1=", "r2=", "r3="])
except getopt.GetoptError:
print_help()
sys.exit(2)
key = ''
phrase = ''
encrypt = True
rotors = ['', '', '']
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-k", "--key"):
key = arg
elif opt in ("-p", "--phrase"):
phrase = arg
elif opt in ("-d", "--decrypt"):
encrypt = False
elif opt == "--r1":
rotors[0] = arg
elif opt == "--r2":
rotors[1] = arg
elif opt == "--r3":
rotors[2] = arg
if not key == '' and not phrase == '' and not rotors[0] == ''\
and not rotors[1] == '' and not rotors[2] == '':
machine = Enigma(key, rotors)
if encrypt:
print(machine.encrypt(phrase))
else:
print(machine.decrypt(phrase))
else:
print_help()
if __name__ == '__main__':
main(sys.argv[1:])
| 3.328125 | 3 |
andersoncd/group.py | idc9/andersoncd | 0 | 1012 | <gh_stars>0
import time
import numpy as np
from scipy import sparse
from numba import njit
from numpy.linalg import norm
from scipy.sparse.linalg import svds
from andersoncd.lasso import dual_lasso
def primal_grp(R, w, alpha, grp_size):
return (0.5 * norm(R) ** 2 + alpha *
norm(w.reshape(-1, grp_size), axis=1).sum())
@njit
def BST(x, u):
"""Block soft-thresholding of vector x at level u."""
norm_x = norm(x)
if norm_x < u:
return np.zeros_like(x)
else:
return (1 - u / norm_x) * x
def BST_vec(x, u, grp_size):
norm_grp = norm(x.reshape(-1, grp_size), axis=1)
scaling = np.maximum(1 - u / norm_grp, 0)
return (x.reshape(-1, grp_size) * scaling[:, None]).reshape(x.shape[0])
@njit
def _bcd(X, w, R, alpha, lc, groups):
grp_size = w.shape[0] // lc.shape[0]
for g in groups:
grp = slice(g * grp_size, (g + 1) * grp_size)
Xg = X[:, grp]
old_w_g = w[grp].copy()
w[grp] = BST(old_w_g + Xg.T @ R / lc[g], alpha / lc[g])
if norm(w[grp] - old_w_g) != 0:
R += np.sum((old_w_g - w[grp]) * Xg, axis=1)
@njit
def _bcd_sparse(
X_data, X_indices, X_indptr, w, R, alpha, lc):
grp_size = w.shape[0] // lc.shape[0]
grad = np.zeros(grp_size)
for g in range(lc.shape[0]):
grad.fill(0)
grp = slice(g * grp_size, (g + 1) * grp_size)
for j in range(grp_size * g, grp_size * (g + 1)):
for ix in range(X_indptr[j], X_indptr[j + 1]):
grad[j % g] += X_data[ix] * R[X_indices[ix]]
old_w_g = w[grp].copy()
w[grp] = BST(old_w_g + grad / lc[g], alpha / lc[g])
if norm(w[grp] - old_w_g) != 0:
for j in range(g * grp_size, (g + 1) * grp_size):
for ix in range(X_indptr[j], X_indptr[j + 1]):
R[X_indices[ix]] += (old_w_g[j % grp_size] -
w[j % grp_size]) * X_data[ix]
def solver_group(
X, y, alpha, grp_size, max_iter=10000, tol=1e-4, f_gap=10, K=5,
use_acc=False, algo='bcd', compute_time=False, tmax=np.infty,
verbose=True):
"""Solve the GroupLasso with BCD/ISTA/FISTA, eventually with extrapolation.
Groups are contiguous, of size grp_size.
Objective:
norm(y - Xw, ord=2)**2 / 2 + alpha * sum_g ||w_{[g]}||_2
TODO: filled docstring
Parameters:
algo: string
'bcd', 'pgd', 'fista'
compute_time : bool, default=False
If you want to compute timings or not
tmax : float, default=1000
Maximum time (in seconds) the algorithm is allowed to run
alpha: strength of the group penalty
"""
is_sparse = sparse.issparse(X)
n_features = X.shape[1]
if n_features % grp_size != 0:
raise ValueError("n_features is not a multiple of group size")
n_groups = n_features // grp_size
_range = np.arange(n_groups)
groups = dict(
bcd=lambda: _range,
bcdshuf=lambda: np.random.choice(n_groups, n_groups, replace=False),
rbcd=lambda: np.random.choice(n_groups, n_groups, replace=True))
if not is_sparse and not np.isfortran(X):
X = np.asfortranarray(X)
last_K_w = np.zeros([K + 1, n_features])
U = np.zeros([K, n_features])
if algo in ('pgd', 'fista'):
if is_sparse:
L = svds(X, k=1)[1][0] ** 2
else:
L = norm(X, ord=2) ** 2
lc = np.zeros(n_groups)
for g in range(n_groups):
X_g = X[:, g * grp_size: (g + 1) * grp_size]
if is_sparse:
gram = (X_g.T @ X_g).todense()
lc[g] = norm(gram, ord=2)
else:
lc[g] = norm(X_g, ord=2) ** 2
w = np.zeros(n_features)
if algo == 'fista':
z = np.zeros(n_features)
t_new = 1
R = y.copy()
E = []
gaps = np.zeros(max_iter // f_gap)
if compute_time:
times = []
t_start = time.time()
for it in range(max_iter):
if it % f_gap == 0:
if algo == 'fista':
R = y - X @ w
p_obj = primal_grp(R, w, alpha, grp_size)
E.append(p_obj)
theta = R / alpha
if compute_time:
elapsed_times = time.time() - t_start
times.append(elapsed_times)
if verbose:
print("elapsed time: %f " % elapsed_times)
if elapsed_times > tmax:
break
d_norm_theta = np.max(
norm((X.T @ theta).reshape(-1, grp_size), axis=1))
if d_norm_theta > 1.:
theta /= d_norm_theta
d_obj = dual_lasso(y, theta, alpha)
gap = p_obj - d_obj
if verbose:
print("Iteration %d, p_obj::%.5f, d_obj::%.5f, gap::%.2e" %
(it, p_obj, d_obj, gap))
gaps[it // f_gap] = gap
if gap < tol:
print("Early exit")
break
if algo.endswith('bcd'):
if is_sparse:
_bcd_sparse(
X.data, X.indices, X.indptr, w, R, alpha, lc)
else:
_bcd(X, w, R, alpha, lc, groups[algo]())
elif algo == 'pgd':
w[:] = BST_vec(w + X.T @ R / L, alpha / L, grp_size)
R[:] = y - X @ w
elif algo == 'fista':
w_old = w.copy()
w[:] = BST_vec(z - X.T @ (X @ z - y) / L, alpha / L, grp_size)
t_old = t_new
t_new = (1. + np.sqrt(1 + 4 * t_old ** 2)) / 2.
z[:] = w + (t_old - 1.) / t_new * (w - w_old)
else:
raise ValueError("Unknown algo %s" % algo)
if use_acc:
if it < K + 1:
last_K_w[it] = w
else:
for k in range(K):
last_K_w[k] = last_K_w[k + 1]
last_K_w[K - 1] = w
for k in range(K):
U[k] = last_K_w[k + 1] - last_K_w[k]
C = np.dot(U, U.T)
try:
z = np.linalg.solve(C, np.ones(K))
c = z / z.sum()
w_acc = np.sum(last_K_w[:-1] * c[:, None],
axis=0)
p_obj = primal_grp(R, w, alpha, grp_size)
R_acc = y - X @ w_acc
p_obj_acc = primal_grp(R_acc, w_acc, alpha, grp_size)
if p_obj_acc < p_obj:
w = w_acc
R = R_acc
except np.linalg.LinAlgError:
if verbose:
print("----------Linalg error")
if compute_time:
return w, np.array(E), gaps[:it // f_gap + 1], times
return w, np.array(E), gaps[:it // f_gap + 1]
| 2.296875 | 2 |
textattack/search_methods/greedy_word_swap_wir.py | dheerajrav/TextAttack | 0 | 1013 | """
Greedy Word Swap with Word Importance Ranking
===================================================
When WIR method is set to ``unk``, this is a reimplementation of the search
method from the paper: Is BERT Really Robust?
A Strong Baseline for Natural Language Attack on Text Classification and
Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and
https://github.com/jind11/TextFooler.
"""
import numpy as np
import torch
from torch.nn.functional import softmax
from textattack.goal_function_results import GoalFunctionResultStatus
from textattack.search_methods import SearchMethod
from textattack.shared.validators import (
transformation_consists_of_word_swaps_and_deletions,
)
class GreedyWordSwapWIR(SearchMethod):
"""An attack that greedily chooses from a list of possible perturbations in
order of index, after ranking indices by importance.
Args:
wir_method: method for ranking most important words
"""
def __init__(self, wir_method="unk"):
self.wir_method = wir_method
def _get_index_order(self, initial_text):
"""Returns word indices of ``initial_text`` in descending order of
importance."""
len_text = len(initial_text.words)
if self.wir_method == "unk":
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif self.wir_method == "weighted-saliency":
# first, compute word saliency
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
saliency_scores = np.array([result.score for result in leave_one_results])
softmax_saliency_scores = softmax(
torch.Tensor(saliency_scores), dim=0
).numpy()
# compute the largest change in score we can find by swapping each word
delta_ps = []
for idx in range(len_text):
transformed_text_candidates = self.get_transformations(
initial_text,
original_text=initial_text,
indices_to_modify=[idx],
)
if not transformed_text_candidates:
# no valid synonym substitutions for this word
delta_ps.append(0.0)
continue
swap_results, _ = self.get_goal_results(transformed_text_candidates)
score_change = [result.score for result in swap_results]
max_score_change = np.max(score_change)
delta_ps.append(max_score_change)
index_scores = softmax_saliency_scores * np.array(delta_ps)
elif self.wir_method == "delete":
leave_one_texts = [
initial_text.delete_word_at_index(i) for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif self.wir_method == "random":
index_order = np.arange(len_text)
np.random.shuffle(index_order)
search_over = False
else:
raise ValueError(f"Unsupported WIR method {self.wir_method}")
if self.wir_method != "random":
index_order = (-index_scores).argsort()
return index_order, search_over
def _perform_search(self, initial_result):
attacked_text = initial_result.attacked_text
# Sort words by order of importance
index_order, search_over = self._get_index_order(attacked_text)
i = 0
cur_result = initial_result
results = None
while i < len(index_order) and not search_over:
transformed_text_candidates = self.get_transformations(
cur_result.attacked_text,
original_text=initial_result.attacked_text,
indices_to_modify=[index_order[i]],
)
i += 1
if len(transformed_text_candidates) == 0:
continue
results, search_over = self.get_goal_results(transformed_text_candidates)
results = sorted(results, key=lambda x: -x.score)
# Skip swaps which don't improve the score
if results[0].score > cur_result.score:
cur_result = results[0]
else:
continue
# If we succeeded, return the index with best similarity.
if cur_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:
best_result = cur_result
# @TODO: Use vectorwise operations
max_similarity = -float("inf")
for result in results:
if result.goal_status != GoalFunctionResultStatus.SUCCEEDED:
break
candidate = result.attacked_text
try:
similarity_score = candidate.attack_attrs["similarity_score"]
except KeyError:
# If the attack was run without any similarity metrics,
# candidates won't have a similarity score. In this
# case, break and return the candidate that changed
# the original score the most.
break
if similarity_score > max_similarity:
max_similarity = similarity_score
best_result = result
return best_result
return cur_result
def check_transformation_compatibility(self, transformation):
"""Since it ranks words by their importance, GreedyWordSwapWIR is
limited to word swap and deletion transformations."""
return transformation_consists_of_word_swaps_and_deletions(transformation)
def extra_repr_keys(self):
return ["wir_method"]
| 3.484375 | 3 |
lemur/deployment/service.py | rajatsharma94/lemur | 1,656 | 1014 | from lemur import database
def rotate_certificate(endpoint, new_cert):
"""
Rotates a certificate on a given endpoint.
:param endpoint:
:param new_cert:
:return:
"""
# ensure that certificate is available for rotation
endpoint.source.plugin.update_endpoint(endpoint, new_cert)
endpoint.certificate = new_cert
database.update(endpoint)
| 2.65625 | 3 |
pype/celery.py | h2020-westlife-eu/VRE | 1 | 1015 | # coding: utf-8
# Copyright Luna Technology 2015
# <NAME> <<EMAIL>>
from __future__ import absolute_import
import os
from celery import Celery
# Set the default Django settings module for the 'celery' program
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pype.settings')
from django.conf import settings
from celery.signals import setup_logging
@setup_logging.connect
def configure_logging(sender=None, **kwargs):
import logging
import logging.config
logging.config.dictConfig(settings.LOGGING)
app = Celery('pype')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 1.953125 | 2 |
train/general_train_example/1_parse.py | ss433s/sosweety | 0 | 1016 | <reponame>ss433s/sosweety<gh_stars>0
import os, sys
import json
# 获取当前路径, 通过anchor文件获取项目root路径
this_file_path = os.path.split(os.path.realpath(__file__))[0]
this_path = this_file_path
root_path = this_file_path
while this_path:
if os.path.exists(os.path.join(this_path, 'sosweety_root_anchor.py')):
root_path = this_path
break
par_path = os.path.dirname(this_path)
# print(par_path)
if par_path == this_path:
break
else:
this_path = par_path
sys.path.append(root_path)
from modules.sParser.sParser import sParser
from modules.knowledgebase.kb import KnowledgeBase
train_dir = 'data/train_zh_wiki'
train_dir = os.path.join(root_path, train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# 解析出parse result file
parse_result_dir = 'parse_result'
parse_result_dir = os.path.join(train_dir, parse_result_dir)
if not os.path.exists(parse_result_dir):
os.makedirs(parse_result_dir)
pos_tags_file_name = 'pos_tags_file'
pos_tags_file_path = os.path.join(parse_result_dir, pos_tags_file_name)
KB = KnowledgeBase()
parser = sParser(KB)
with open(pos_tags_file_path, 'w') as pos_tags_file:
# 打开语料文件
file_path = 'data/corpus/zh_wiki/wiki_test'
file_path = os.path.join(root_path, file_path)
file = open(file_path)
line = file.readline()
count = 0
while line:
count += 1
if count % 5000 == 0:
print('parsed %s sentence' % count)
text = line.strip()
try:
ss_pos_tags = parser.text2ss_pos_tags(text)
for pos_tags in ss_pos_tags:
pos_tags_file.write(json.dumps(pos_tags, ensure_ascii=False) + '\n')
except Exception:
print('line %s decode error' % count)
line = file.readline()
file.close()
| 2.359375 | 2 |
ruleex/hypinv/model.py | rohancode/ruleex_modified | 18 | 1017 | from gtrain import Model
import numpy as np
import tensorflow as tf
class NetForHypinv(Model):
"""
Implementaion of the crutial function for the HypINV algorithm.
Warning: Do not use this class but implement its subclass, for example see FCNetForHypinv
"""
def __init__(self, weights):
self.eval_session = None
self.grad_session = None
self.initial_x = None
self.center = None
self.weights = weights
self.out_for_eval = None #(going to be filled in build_for_eval method)
self.boundary_out_for_eval = None
self.trained_x = None
self.training_class_index = None
self.x = None # tf variable for inversion (going to be filled in build method)
self.x_for_eval = None
self.out = None
self.boundary_out = None # list of tf tensorf for each class of softmax class vs others output
self.loss = None
self.boundary_loss = None
self.t = None #target
self.boundary_t = None
self.x1 = None # this attribute is used of purposes of modified loss function
def __del__(self):
# close arr sessions
if self.eval_session:
self.eval_session.close()
if self.grad_session:
self.grad_session.close()
def set_initial_x(self, initial_x):
# sets starting point for the search of the closest point
self.initial_x = initial_x
def set_center(self, center):
# sets center point
self.center = center / np.linalg.norm(center)
def set_x1(self, x1):
# sets x1 to which we want to found the cosest point x0
self.x1 = x1
def has_modified_loss(self):
pass # if uses modified loss then it returns true
def set_initial_x_in_session(self, x, session=None):
# sets initial x in certain session
if session is None:
self.set_initial_x(x)
else:
pass # overide this method
def eval(self, x):
if len(x.shape) == 1:
x = x.reshape((1,len(x)))
if not self.eval_session:
self.eval_session = tf.Session()
with self.eval_session.as_default():
self.build_for_eval()
self.eval_session.run(tf.global_variables_initializer())
return self.eval_session.run(self.out_for_eval, {self.x_for_eval: x})
def boundary_eval(self, x, class_index):
# evaluates binary classificaitons class_index and other classes
if not self.eval_session:
self.eval_session = tf.Session()
with self.eval_session.as_default():
self.build_for_eval()
self.eval_session.run(tf.global_variables_initializer())
return self.eval_session.run(self.boundary_out_for_eval[class_index], {self.x_for_eval: x})
def get_boundary_gradient(self, x, class_index):
# computes gradient of the boundary for specified class_index
if not self.grad_session:
self.grad_session = tf.Session()
with self.grad_session.as_default():
self.build_for_eval()
self.grad = list()
for i in range(len(self.weights[0][-1][0])):
self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0])
self.grad_x = self.x_for_eval
return self.grad_session.run(self.grad[class_index], {self.grad_x: x})
def build_for_eval(self):
# build model for evaluation
pass #override this method (fill self.out_for_eval)
def train_ended(self, session):
self.trained_x = session.run(self.x)
def build(self):
# build model for training
pass #override this method (fill self.x, self.out)
def set_train_class(self, class_index):
# sets class of the x1
self.training_class_index = class_index
# overided methods from gtrain.Model
def get_loss(self):
if self.training_class_index is None:
return self.loss
else:
return self.boundary_loss[self.training_class_index]
def get_hits(self):
return self.get_loss()
def get_count(self):
return self.get_loss()
def get_train_summaries(self):
return []
def get_dev_summaries(self):
return []
def get_placeholders(self):
if self.training_class_index is None:
return [self.t]
else:
return [self.boundary_t]
#________________________________________EXAMPLES_OF_NetForHypinv_CLASS_____________________________________________
class FCNetForHypinv(NetForHypinv):
"""
Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm
"""
def __init__(self, weights, function=tf.sigmoid, use_modified_loss=False, mu = 0.01):
"""
:param weights: saved as [list of weights for layers][0 weight, 1 bias]
:param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan
:param use_modified_loss: weather the modified loss should be used
:param mu: factor of the penalty terms that specified the distance between x0 and x1 and
the distance x1 from the boundary
"""
super(FCNetForHypinv, self).__init__(weights)
self.function = function
self.layer_sizes = [len(self.weights[0][0])]
for bias in weights[1]:
self.layer_sizes.append(len(bias))
self.num_classes = self.layer_sizes[-1]
self.initial_x = np.zeros([1, self.layer_sizes[0]])
self.use_modified_loss = use_modified_loss
self.mu = mu
def build(self):
with tf.name_scope("Input"):
if self.center is not None:
self.point_weights = tf.Variable(self.center.reshape((1, len(self.center))),
dtype=tf.float64, trainable=False, name="Boundary_point")
init_factor = self.center
init_factor[init_factor!=0] = self.initial_x[init_factor!=0] / self.center[init_factor!=0]
self.factor = tf.Variable(init_factor.reshape((1, len(self.center))),
dtype=tf.float64, name="factor")
else:
self.point_weights = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))),
dtype=tf.float64, trainable=False, name="Boundary_point")
self.factor = tf.Variable(np.ones((1, len(self.center))),
dtype=tf.float64, name="factor")
self.x = self.point_weights * self.factor
with tf.name_scope("Target"):
if self.use_modified_loss:
x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64)
self.t = tf.placeholder(tf.float64, shape=[None, self.num_classes], name="Target_output")
self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output")
with tf.name_scope("FC_net"):
flowing_x = self.x
for i, _ in enumerate(self.weights[0]):
with tf.name_scope("layer_{}".format(i)):
W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64)
b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64)
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b))
y = flowing_x
self.out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out = list()
for i in range(self.num_classes):
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[i] = False
x0 = self.out[:,i]
x1 = tf.reduce_max(tf.boolean_mask(self.out, mask, axis=1), axis=1)
s = x0+x1
out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out.append(out)
with tf.name_scope("Loss_functions"):
self.loss = tf.reduce_mean(
tf.nn.l2_loss(self.out-self.t),
name="loss")
with tf.name_scope("Binary_class_loss"):
self.boundary_loss = list()
if self.use_modified_loss:
for i in range(self.num_classes):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) +
self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant))
)
else:
for i in range(self.num_classes):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t))
)
def set_initial_x_in_session(self, x, session=None):
if session is None:
self.set_initial_x(x)
else:
if self.center is None:
session.run([
self.point_weights.assign(x.reshape((1, len(x)))),
self.factor.assign(np.ones((1, len(x))))
])
else:
init_factor = self.center
init_factor[init_factor!=0] = x[init_factor!=0] / self.center[init_factor!=0]
session.run(self.factor.assign(init_factor.reshape((1,len(init_factor)))))
def build_for_eval(self):
with tf.name_scope("eInput"):
self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point")
with tf.name_scope("eFC_net"):
flowing_x = self.x_for_eval
for i, _ in enumerate(self.weights[0]):
W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i))
b = tf.constant(self.weights[1][i], name="eBias_{}".format(i))
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i))
y = flowing_x
self.out_for_eval = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out_for_eval = list()
for i in range(self.num_classes):
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[i] = False
x0 = self.out_for_eval[:, i]
x1 = tf.reduce_max(tf.boolean_mask(self.out_for_eval, mask, axis=1), axis=1)
s = x0+x1
out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out_for_eval.append(out)
def has_modified_loss(self):
return self.use_modified_loss
def name(self):
return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes]))
class FCNetForHypinvBinary(FCNetForHypinv):
"""
Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm
The task is simplified to the binary classificaiton base_class_index against the other classes
"""
def __init__(self, weights, base_class_index, function=tf.sigmoid, use_modified_loss=False, mu = 0.01):
"""
:param weights: saved as [list of weights for layers][0 weight, 1 bias]
:param base_class_index: an index of the class which is used as the base class
:param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan
:param use_modified_loss: weather the modified loss should be used
:param mu: factor of the penalty terms that specified the distance between x0 and x1 and
the distance x1 from the boundary
"""
super(FCNetForHypinvBinary, self).__init__(weights)
self.base_class_index = base_class_index
self.function = function
self.layer_sizes = [len(self.weights[0][0])]
for bias in weights[1]:
self.layer_sizes.append(len(bias))
self.num_classes = self.layer_sizes[-1]
self.initial_x = np.zeros([1, self.layer_sizes[0]])
self.use_modified_loss = use_modified_loss
self.mu = mu
def build(self):
with tf.name_scope("Input"):
self.init_point = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))),
dtype=tf.float64, trainable=False, name="Boundary_point")
self.factor = tf.Variable(np.ones((1, len(self.initial_x))),
dtype=tf.float64, name="factor")
self.x = self.init_point * self.factor
with tf.name_scope("Target"):
if self.use_modified_loss:
x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64)
self.t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_output")
self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output")
with tf.name_scope("FC_net"):
flowing_x = self.x
for i, _ in enumerate(self.weights[0]):
with tf.name_scope("layer_{}".format(i)):
W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64)
b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64)
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b))
y = flowing_x
full_out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out = list()
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[self.base_class_index] = False
x0 = full_out[:,self.base_class_index]
x1 = tf.reduce_max(tf.boolean_mask(full_out, mask, axis=1), axis=1)
s = x0+x1
self.out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out.append(self.out)
self.boundary_out.append(tf.stack([x1/s, x0/s], axis=1))
with tf.name_scope("Loss_functions"):
self.loss = tf.reduce_mean(
tf.nn.l2_loss(self.out-self.t),
name="loss")
with tf.name_scope("Binary_class_loss"):
self.boundary_loss = list()
if self.use_modified_loss:
for i in range(2):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) +
self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant))
)
else:
for i in range(2):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t))
)
def build_for_eval(self):
with tf.name_scope("eInput"):
self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point")
with tf.name_scope("eFC_net"):
flowing_x = self.x_for_eval
for i, _ in enumerate(self.weights[0]):
W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i))
b = tf.constant(self.weights[1][i], name="eBias_{}".format(i))
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i))
y = flowing_x
full_out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out_for_eval = list()
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[self.base_class_index] = False
x0 = full_out[:, self.base_class_index]
x1 = tf.reduce_max(tf.boolean_mask(full_out, mask, axis=1), axis=1)
s = x0+x1
self.out_for_eval = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out_for_eval.append(self.out_for_eval)
self.boundary_out_for_eval.append(tf.stack([x1/s, x0/s], axis=1))
def get_boundary_gradient(self, x, class_index):
if not self.grad_session:
self.grad_session = tf.Session()
with self.grad_session.as_default():
self.build_for_eval()
self.grad = list()
for i in range(2):
self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0])
self.grad_x = self.x_for_eval
return self.grad_session.run(self.grad[class_index], {self.grad_x: x})
def has_modified_loss(self):
return self.use_modified_loss
def name(self):
return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes]))
| 2.671875 | 3 |
py_tdlib/constructors/get_chat_member.py | Mr-TelegramBot/python-tdlib | 24 | 1018 | from ..factory import Method
class getChatMember(Method):
chat_id = None # type: "int53"
user_id = None # type: "int32"
| 1.796875 | 2 |
src/phrase_manager/phrase_manager.py | Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering | 0 | 1019 | import numpy
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from src.support import support
class PhraseManager:
def __init__(self, configuration):
self.train_phrases, self.train_labels = self._read_train_phrases()
self.test_phrases, self.test_labels = self._read_test_phrases()
self.configuration = configuration
self.tokenizer = None
def get_phrases_train(self):
return self.train_phrases, self.train_labels
def get_phrases_test(self):
return self.test_phrases, self.test_labels
def get_dataset(self, level = None):
if level == support.WORD_LEVEL:
return self._word_process(self.configuration[support.WORD_MAX_LENGTH])
elif level == support.CHAR_LEVEL:
return self._char_process(self.configuration[support.CHAR_MAX_LENGTH])
else:
return self.train_phrases, self.train_labels, self.test_phrases, self.test_labels
def _word_process(self, word_max_length):
tokenizer = Tokenizer(num_words=self.configuration[support.QUANTITY_WORDS])
tokenizer.fit_on_texts(self.train_phrases)
x_train_sequence = tokenizer.texts_to_sequences(self.train_phrases)
x_test_sequence = tokenizer.texts_to_sequences(self.test_phrases)
x_train = sequence.pad_sequences(x_train_sequence, maxlen=word_max_length, padding='post', truncating='post')
x_test = sequence.pad_sequences(x_test_sequence, maxlen=word_max_length, padding='post', truncating='post')
y_train = numpy.array(self.train_labels)
y_test = numpy.array(self.test_labels)
return x_train, y_train, x_test, y_test
def _char_process(self, max_length):
embedding_w, embedding_dic = self._onehot_dic_build()
x_train = []
for i in range(len(self.train_phrases)):
doc_vec = self._doc_process(self.train_phrases[i].lower(), embedding_dic, max_length)
x_train.append(doc_vec)
x_train = numpy.asarray(x_train, dtype='int64')
y_train = numpy.array(self.train_labels, dtype='float32')
x_test = []
for i in range(len( self.test_phrases)):
doc_vec = self._doc_process( self.test_phrases[i].lower(), embedding_dic, max_length)
x_test.append(doc_vec)
x_test = numpy.asarray(x_test, dtype='int64')
y_test = numpy.array(self.test_labels, dtype='float32')
del embedding_w, embedding_dic
return x_train, y_train, x_test, y_test
def _doc_process(self, doc, embedding_dic, max_length):
min_length = min(max_length, len(doc))
doc_vec = numpy.zeros(max_length, dtype='int64')
for j in range(min_length):
if doc[j] in embedding_dic:
doc_vec[j] = embedding_dic[doc[j]]
else:
doc_vec[j] = embedding_dic['UNK']
return doc_vec
def _onehot_dic_build(self):
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
embedding_dic = {}
embedding_w = []
embedding_dic["UNK"] = 0
embedding_w.append(numpy.zeros(len(alphabet), dtype='float32'))
for i, alpha in enumerate(alphabet):
onehot = numpy.zeros(len(alphabet), dtype='float32')
embedding_dic[alpha] = i + 1
onehot[i] = 1
embedding_w.append(onehot)
embedding_w = numpy.array(embedding_w, dtype='float32')
return embedding_w, embedding_dic
def get_tokenizer(self):
if self.tokenizer is None:
self.tokenizer = Tokenizer(num_words=self.configuration[support.QUANTITY_WORDS])
self.tokenizer.fit_on_texts(self.train_phrases)
return self.tokenizer
def text_to_vector_word(self, text):
vector_sequence = self.get_tokenizer().texts_to_sequences([text])
result = sequence.pad_sequences(vector_sequence, maxlen=self.configuration[support.WORD_MAX_LENGTH], padding='post', truncating='post')
return result
def text_to_vector_word_all(self, texts):
vector_sequence = self.get_tokenizer().texts_to_sequences(texts)
result = sequence.pad_sequences(vector_sequence, maxlen=self.configuration[support.WORD_MAX_LENGTH], padding='post', truncating='post')
return result
def text_to_vector_char(self, text):
embedding_dictionary = self._get_embedding_dictionary()
max_length = self.configuration[support.CHAR_MAX_LENGTH]
min_length = min(max_length, len(text))
text_vector = numpy.zeros(max_length, dtype="int64")
for j in range(min_length):
if text[j] in embedding_dictionary:
text_vector[j] = embedding_dictionary[text[j]]
else:
text_vector[j] = embedding_dictionary["UNK"]
return text_vector
def text_to_vector_char_all(self, texts):
embedding_w, embedding_dic = self._onehot_dic_build()
result = []
for i in range(len(texts)):
doc_vec = self.text_to_vector_char(texts[i].lower())
result.append(doc_vec)
result = numpy.asarray(result, dtype="int64")
del embedding_w, embedding_dic
return result
def _get_embedding_dictionary(self):
return {'UNK': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10,
'k': 11, 'l': 12,
'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22,
'w': 23, 'x': 24,
'y': 25, 'z': 26, '0': 27, '1': 28, '2': 29, '3': 30, '4': 31, '5': 32, '6': 33, '7': 34,
'8': 35, '9': 36,
'-': 60, ',': 38, ';': 39, '.': 40, '!': 41, '?': 42, ':': 43, "'": 44, '"': 45, '/': 46,
'\\': 47, '|': 48,
'_': 49, '@': 50, '#': 51, '$': 52, '%': 53, '^': 54, '&': 55, '*': 56, '~': 57, '`': 58,
'+': 59, '=': 61,
'<': 62, '>': 63, '(': 64, ')': 65, '[': 66, ']': 67, '{': 68, '}': 69}
def get_classes(self):
pass
def _read_train_phrases(self):
pass
def _read_test_phrases(self):
pass
class Phrase:
def __init__(self, text, classification):
self.text = text
self.classification = classification
def __str__(self):
return "Classification: " + str(self.classification) + "\nText: " + self.text
| 2.796875 | 3 |
setup.py | fonar/paypalhttp_python | 0 | 1020 | from setuptools import setup
version = "1.0.0"
long_description = """
PayPalHttp is a generic http client designed to be used with code-generated projects.
"""
setup(
name="paypalhttp",
long_description=long_description,
version=version,
author="PayPal",
packages=["paypalhttp", "paypalhttp/testutils", "paypalhttp/serializers"],
install_requires=['requests>=2.0.0', 'six>=1.0.0', 'pyopenssl>=0.15'],
license="MIT",
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 1.4375 | 1 |
ooobuild/csslo/xml/__init__.py | Amourspirit/ooo_uno_tmpl | 0 | 1021 | <reponame>Amourspirit/ooo_uno_tmpl<filename>ooobuild/csslo/xml/__init__.py
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...lo.xml.attribute import Attribute as Attribute
from ...lo.xml.attribute_container import AttributeContainer as AttributeContainer
from ...lo.xml.attribute_data import AttributeData as AttributeData
from ...lo.xml.export_filter import ExportFilter as ExportFilter
from ...lo.xml.fast_attribute import FastAttribute as FastAttribute
from ...lo.xml.import_filter import ImportFilter as ImportFilter
from ...lo.xml.namespace_container import NamespaceContainer as NamespaceContainer
from ...lo.xml.para_user_defined_attributes_supplier import ParaUserDefinedAttributesSupplier as ParaUserDefinedAttributesSupplier
from ...lo.xml.text_user_defined_attributes_supplier import TextUserDefinedAttributesSupplier as TextUserDefinedAttributesSupplier
from ...lo.xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier
from ...lo.xml.x_export_filter import XExportFilter as XExportFilter
from ...lo.xml.x_import_filter import XImportFilter as XImportFilter
from ...lo.xml.x_import_filter2 import XImportFilter2 as XImportFilter2
from ...lo.xml.xml_export_filter import XMLExportFilter as XMLExportFilter
from ...lo.xml.xml_import_filter import XMLImportFilter as XMLImportFilter
| 1.054688 | 1 |
bluebottle/tasks/migrations/0012_merge.py | terrameijar/bluebottle | 10 | 1022 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-27 15:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0011_auto_20160919_1508'),
('tasks', '0011_auto_20160920_1019'),
]
operations = [
]
| 1.359375 | 1 |
bat_train/evaluate.py | bgotthold-usgs/batdetect | 59 | 1023 | import numpy as np
from sklearn.metrics import roc_curve, auc
def compute_error_auc(op_str, gt, pred, prob):
# classification error
pred_int = (pred > prob).astype(np.int)
class_acc = (pred_int == gt).mean() * 100.0
# ROC - area under curve
fpr, tpr, thresholds = roc_curve(gt, pred)
roc_auc = auc(fpr, tpr)
print op_str, ', class acc = %.3f, ROC AUC = %.3f' % (class_acc, roc_auc)
#return class_acc, roc_auc
def calc_average_precision(recall, precision):
precision[np.isnan(precision)] = 0
recall[np.isnan(recall)] = 0
# pascal'12 way
mprec = np.hstack((0, precision, 0))
mrec = np.hstack((0, recall, 1))
for ii in range(mprec.shape[0]-2, -1,-1):
mprec[ii] = np.maximum(mprec[ii], mprec[ii+1])
inds = np.where(np.not_equal(mrec[1:], mrec[:-1]))[0]+1
ave_prec = ((mrec[inds] - mrec[inds-1])*mprec[inds]).sum()
return ave_prec
def remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size):
# this filters out predictions and gt that are close to the end
# this is a bit messy because of the shapes of gt_pos_o
nms_pos = []
nms_prob = []
gt_pos = []
for ii in range(len(nms_pos_o)):
valid_time = durations[ii] - win_size
gt_cur = gt_pos_o[ii]
if gt_cur.shape[0] > 0:
gt_pos.append(gt_cur[:, 0][gt_cur[:, 0] < valid_time][..., np.newaxis])
else:
gt_pos.append(gt_cur)
valid_preds = nms_pos_o[ii] < valid_time
nms_pos.append(nms_pos_o[ii][valid_preds])
nms_prob.append(nms_prob_o[ii][valid_preds, 0][..., np.newaxis])
return nms_pos, nms_prob, gt_pos
def prec_recall_1d(nms_pos_o, nms_prob_o, gt_pos_o, durations, detection_overlap, win_size, remove_eof=True):
"""
nms_pos, nms_prob, and gt_pos are lists of numpy arrays specifying detection
position, detection probability and GT position.
Each list entry is a different file.
Each entry in nms_pos is an array of length num_entries. For nms_prob and
gt_pos its an array of size (num_entries, 1).
durations is a array of the length of the number of files with each entry
containing that file length in seconds.
detection_overlap determines if a prediction is counted as correct or not.
win_size is used to ignore predictions and ground truth at the end of an
audio file.
returns
precision: fraction of retrieved instances that are relevant.
recall: fraction of relevant instances that are retrieved.
"""
if remove_eof:
# filter out the detections in both ground truth and predictions that are too
# close to the end of the file - dont count them during eval
nms_pos, nms_prob, gt_pos = remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size)
else:
nms_pos = nms_pos_o
nms_prob = nms_prob_o
gt_pos = gt_pos_o
# loop through each file
true_pos = [] # correctly predicts the ground truth
false_pos = [] # says there is a detection but isn't
for ii in range(len(nms_pos)):
num_preds = nms_pos[ii].shape[0]
if num_preds > 0: # check to make sure it contains something
num_gt = gt_pos[ii].shape[0]
# for each set of predictions label them as true positive or false positive (i.e. 1-tp)
tp = np.zeros(num_preds)
distance_to_gt = np.abs(gt_pos[ii].ravel()-nms_pos[ii].ravel()[:, np.newaxis])
within_overlap = (distance_to_gt <= detection_overlap)
# remove duplicate detections - assign to valid detection with highest prob
for jj in range(num_gt):
inds = np.where(within_overlap[:, jj])[0] # get the indices of all valid predictions
if inds.shape[0] > 0:
max_prob = np.argmax(nms_prob[ii][inds])
selected_pred = inds[max_prob]
within_overlap[selected_pred, :] = False
tp[selected_pred] = 1 # set as true positives
true_pos.append(tp)
false_pos.append(1 - tp)
# calc precision and recall - sort confidence in descending order
# PASCAL style
conf = np.concatenate(nms_prob)[:, 0]
num_gt = np.concatenate(gt_pos).shape[0]
inds = np.argsort(conf)[::-1]
true_pos_cat = np.concatenate(true_pos)[inds].astype(float)
false_pos_cat = np.concatenate(false_pos)[inds].astype(float) # i.e. 1-true_pos_cat
if (conf == conf[0]).sum() == conf.shape[0]:
# all the probability values are the same therefore we will not sweep
# the curve and instead will return a single value
true_pos_sum = true_pos_cat.sum()
false_pos_sum = false_pos_cat.sum()
recall = np.asarray([true_pos_sum / float(num_gt)])
precision = np.asarray([(true_pos_sum / (false_pos_sum + true_pos_sum))])
elif inds.shape[0] > 0:
# otherwise produce a list of values
true_pos_cum = np.cumsum(true_pos_cat)
false_pos_cum = np.cumsum(false_pos_cat)
recall = true_pos_cum / float(num_gt)
precision = (true_pos_cum / (false_pos_cum + true_pos_cum))
return precision, recall
| 2.296875 | 2 |
azure-mgmt/tests/test_mgmt_network.py | SUSE/azure-sdk-for-python | 2 | 1024 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.network.models
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
class MgmtNetworkTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtNetworkTest, self).setUp()
self.network_client = self.create_mgmt_client(
azure.mgmt.network.NetworkManagementClient
)
if not self.is_playback():
self.create_resource_group()
@record
def test_network_interface_card(self):
vnet_name = self.get_resource_name('pyvnet')
subnet_name = self.get_resource_name('pysubnet')
nic_name = self.get_resource_name('pynic')
# Create VNet
async_vnet_creation = self.network_client.virtual_networks.create_or_update(
self.group_name,
vnet_name,
{
'location': self.region,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
)
async_vnet_creation.wait()
# Create Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
subnet_name,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
# Create NIC
async_nic_creation = self.network_client.network_interfaces.create_or_update(
self.group_name,
nic_name,
{
'location': self.region,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet_info.id
}
}]
}
)
nic_info = async_nic_creation.result()
nic_info = self.network_client.network_interfaces.get(
self.group_name,
nic_info.name
)
nics = list(self.network_client.network_interfaces.list(
self.group_name
))
self.assertEqual(len(nics), 1)
nics = list(self.network_client.network_interfaces.list_all())
self.assertGreater(len(nics), 0)
async_delete = self.network_client.network_interfaces.delete(
self.group_name,
nic_info.name
)
async_delete.wait()
@record
def test_load_balancers(self):
public_ip_name = self.get_resource_name('pyipname')
frontend_ip_name = self.get_resource_name('pyfipname')
addr_pool_name = self.get_resource_name('pyapname')
probe_name = self.get_resource_name('pyprobename')
lb_name = self.get_resource_name('pylbname')
front_end_id = ('/subscriptions/{}'
'/resourceGroups/{}'
'/providers/Microsoft.Network'
'/loadBalancers/{}'
'/frontendIPConfigurations/{}').format(
self.settings.SUBSCRIPTION_ID,
self.group_name,
lb_name,
frontend_ip_name
)
back_end_id = ('/subscriptions/{}'
'/resourceGroups/{}'
'/providers/Microsoft.Network'
'/loadBalancers/{}'
'/backendAddressPools/{}').format(
self.settings.SUBSCRIPTION_ID,
self.group_name,
lb_name,
addr_pool_name
)
probe_id = ('/subscriptions/{}'
'/resourceGroups/{}'
'/providers/Microsoft.Network'
'/loadBalancers/{}'
'/probes/{}').format(
self.settings.SUBSCRIPTION_ID,
self.group_name,
lb_name,
probe_name
)
# Create PublicIP
public_ip_parameters = {
'location': self.region,
'public_ip_allocation_method': 'static',
'idle_timeout_in_minutes': 4
}
async_publicip_creation = self.network_client.public_ip_addresses.create_or_update(
self.group_name,
public_ip_name,
public_ip_parameters
)
public_ip_info = async_publicip_creation.result()
# Building a FrontEndIpPool
frontend_ip_configurations = [{
'name': frontend_ip_name,
'private_ip_allocation_method': 'Dynamic',
'public_ip_address': {
'id': public_ip_info.id
}
}]
# Building a BackEnd adress pool
backend_address_pools = [{
'name': addr_pool_name
}]
# Building a HealthProbe
probes = [{
'name': probe_name,
'protocol': 'Http',
'port': 80,
'interval_in_seconds': 15,
'number_of_probes': 4,
'request_path': 'healthprobe.aspx'
}]
# Building a LoadBalancer rule
load_balancing_rules = [{
'name': 'azure-sample-lb-rule',
'protocol': 'tcp',
'frontend_port': 80,
'backend_port': 80,
'idle_timeout_in_minutes': 4,
'enable_floating_ip': False,
'load_distribution': 'Default',
'frontend_ip_configuration': {
'id': front_end_id
},
'backend_address_pool': {
'id': back_end_id
},
'probe': {
'id': probe_id
}
}]
# Building InboundNATRule1
inbound_nat_rules = [{
'name': 'azure-sample-netrule1',
'protocol': 'tcp',
'frontend_port': 21,
'backend_port': 22,
'enable_floating_ip': False,
'idle_timeout_in_minutes': 4,
'frontend_ip_configuration': {
'id': front_end_id
}
}]
# Building InboundNATRule2
inbound_nat_rules.append({
'name': 'azure-sample-netrule2',
'protocol': 'tcp',
'frontend_port': 23,
'backend_port': 22,
'enable_floating_ip': False,
'idle_timeout_in_minutes': 4,
'frontend_ip_configuration': {
'id': front_end_id
}
})
# Creating Load Balancer
lb_async_creation = self.network_client.load_balancers.create_or_update(
self.group_name,
lb_name,
{
'location': self.region,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'probes': probes,
'load_balancing_rules': load_balancing_rules,
'inbound_nat_rules' :inbound_nat_rules
}
)
lb_info = lb_async_creation.result()
# Get it
lb_info = self.network_client.load_balancers.get(
self.group_name,
lb_name
)
# List all
lbs = self.network_client.load_balancers.list_all()
lbs = list(lbs)
self.assertGreater(len(lbs), 0)
# List RG
lbs = self.network_client.load_balancers.list(self.group_name)
lbs = list(lbs)
self.assertGreater(len(lbs), 0)
# Delete
async_lb_delete = self.network_client.load_balancers.delete(
self.group_name,
lb_name
)
async_lb_delete.wait()
@record
def test_public_ip_addresses(self):
public_ip_name = self.get_resource_name('pyipname')
params_create = azure.mgmt.network.models.PublicIPAddress(
location=self.region,
public_ip_allocation_method=azure.mgmt.network.models.IPAllocationMethod.dynamic,
tags={
'key': 'value',
},
)
result_create = self.network_client.public_ip_addresses.create_or_update(
self.group_name,
public_ip_name,
params_create,
)
result_create.wait() # AzureOperationPoller
#self.assertEqual(result_create.status_code, HttpStatusCode.OK)
result_get = self.network_client.public_ip_addresses.get(
self.group_name,
public_ip_name,
)
#self.assertEqual(result_get.status_code, HttpStatusCode.OK)
self.assertEqual(result_get.location, self.region)
self.assertEqual(result_get.tags['key'], 'value')
result_list = self.network_client.public_ip_addresses.list(self.group_name)
#self.assertEqual(result_list.status_code, HttpStatusCode.OK)
result_list = list(result_list)
self.assertEqual(len(result_list), 1)
result_list_all = self.network_client.public_ip_addresses.list_all()
#self.assertEqual(result_list_all.status_code, HttpStatusCode.OK)
result_list_all = list(result_list_all)
self.assertGreater(len(result_list_all), 0)
result_delete = self.network_client.public_ip_addresses.delete(
self.group_name,
public_ip_name,
)
result_delete.wait() # AzureOperationPoller
#self.assertEqual(result_delete.status_code, HttpStatusCode.OK)
result_list = self.network_client.public_ip_addresses.list(self.group_name)
#self.assertEqual(result_list.status_code, HttpStatusCode.OK)
result_list = list(result_list)
self.assertEqual(len(result_list), 0)
@record
def test_virtual_networks(self):
network_name = self.get_resource_name('pyvnet')
subnet1_name = self.get_resource_name('pyvnetsubnetone')
subnet2_name = self.get_resource_name('pyvnetsubnettwo')
params_create = azure.mgmt.network.models.VirtualNetwork(
location=self.region,
address_space=azure.mgmt.network.models.AddressSpace(
address_prefixes=[
'10.0.0.0/16',
],
),
dhcp_options=azure.mgmt.network.models.DhcpOptions(
dns_servers=[
'10.1.1.1',
'10.1.2.4',
],
),
subnets=[
azure.mgmt.network.models.Subnet(
name=subnet1_name,
address_prefix='10.0.1.0/24',
),
azure.mgmt.network.models.Subnet(
name=subnet2_name,
address_prefix='10.0.2.0/24',
),
],
)
result_create = self.network_client.virtual_networks.create_or_update(
self.group_name,
network_name,
params_create,
)
vnet = result_create.result()
vnet = self.network_client.virtual_networks.get(
self.group_name,
vnet.name,
)
ip_availability = self.network_client.virtual_networks.check_ip_address_availability(
self.group_name,
vnet.name,
'10.0.1.35' # Should be available since new VNet sor Subnet 1
)
self.assertTrue(ip_availability.available)
result_list = list(self.network_client.virtual_networks.list(
self.group_name,
))
self.assertEqual(len(result_list), 1)
result_list_all = list(self.network_client.virtual_networks.list_all())
async_delete = self.network_client.virtual_networks.delete(
self.group_name,
network_name,
)
async_delete.wait()
@record
def test_dns_availability(self):
result_check = self.network_client.check_dns_name_availability(
self.region,
'pydomain',
)
#self.assertEqual(result_check.status_code, HttpStatusCode.OK)
self.assertTrue(result_check)
@record
def test_subnets(self):
network_name = self.get_resource_name('pysubnet')
subnet1_name = self.get_resource_name('pysubnetone')
subnet2_name = self.get_resource_name('pysubnettwo')
params_create = azure.mgmt.network.models.VirtualNetwork(
location=self.region,
address_space=azure.mgmt.network.models.AddressSpace(
address_prefixes=[
'10.0.0.0/16',
],
),
dhcp_options=azure.mgmt.network.models.DhcpOptions(
dns_servers=[
'10.1.1.1',
'10.1.2.4',
],
),
subnets=[
azure.mgmt.network.models.Subnet(
name=subnet1_name,
address_prefix='10.0.1.0/24',
),
],
)
result_create = self.network_client.virtual_networks.create_or_update(
self.group_name,
network_name,
params_create,
)
result_create.wait() # AzureOperationPoller
params_create = azure.mgmt.network.models.Subnet(
name=subnet2_name,
address_prefix='10.0.2.0/24',
)
result_create = self.network_client.subnets.create_or_update(
self.group_name,
network_name,
subnet2_name,
params_create,
)
result_create.wait() # AzureOperationPoller
result_get = self.network_client.virtual_networks.get(
self.group_name,
network_name,
)
self.assertEqual(len(result_get.subnets), 2)
result_get = self.network_client.subnets.get(
self.group_name,
network_name,
subnet2_name,
)
result_list = self.network_client.subnets.list(
self.group_name,
network_name,
)
subnets = list(result_list)
result_delete = self.network_client.subnets.delete(
self.group_name,
network_name,
subnet2_name,
)
result_delete.wait()
@record
def test_network_security_groups(self):
security_group_name = self.get_resource_name('pysecgroup')
security_rule_name = self.get_resource_name('pysecgrouprule')
params_create = azure.mgmt.network.models.NetworkSecurityGroup(
location=self.region,
security_rules=[
azure.mgmt.network.models.SecurityRule(
name=security_rule_name,
access=azure.mgmt.network.models.SecurityRuleAccess.allow,
description='Test security rule',
destination_address_prefix='*',
destination_port_range='123-3500',
direction=azure.mgmt.network.models.SecurityRuleDirection.inbound,
priority=500,
protocol=azure.mgmt.network.models.SecurityRuleProtocol.tcp,
source_address_prefix='*',
source_port_range='655',
),
],
)
result_create = self.network_client.network_security_groups.create_or_update(
self.group_name,
security_group_name,
params_create,
)
result_create.wait() # AzureOperationPoller
result_get = self.network_client.network_security_groups.get(
self.group_name,
security_group_name,
)
result_list = list(self.network_client.network_security_groups.list(
self.group_name,
))
self.assertEqual(len(result_list), 1)
result_list_all = list(self.network_client.network_security_groups.list_all())
# Security Rules
new_security_rule_name = self.get_resource_name('pynewrule')
async_security_rule = self.network_client.security_rules.create_or_update(
self.group_name,
security_group_name,
new_security_rule_name,
{
'access':azure.mgmt.network.models.SecurityRuleAccess.allow,
'description':'New Test security rule',
'destination_address_prefix':'*',
'destination_port_range':'123-3500',
'direction':azure.mgmt.network.models.SecurityRuleDirection.outbound,
'priority':400,
'protocol':azure.mgmt.network.models.SecurityRuleProtocol.tcp,
'source_address_prefix':'*',
'source_port_range':'655',
}
)
security_rule = async_security_rule.result()
security_rule = self.network_client.security_rules.get(
self.group_name,
security_group_name,
security_rule.name
)
self.assertEqual(security_rule.name, new_security_rule_name)
new_security_rules = list(self.network_client.security_rules.list(
self.group_name,
security_group_name
))
self.assertEqual(len(new_security_rules), 2)
result_delete = self.network_client.security_rules.delete(
self.group_name,
security_group_name,
new_security_rule_name
)
result_delete.wait()
# Delete NSG
result_delete = self.network_client.network_security_groups.delete(
self.group_name,
security_group_name,
)
result_delete.wait()
@record
def test_routes(self):
route_table_name = self.get_resource_name('pyroutetable')
route_name = self.get_resource_name('pyroute')
async_route_table = self.network_client.route_tables.create_or_update(
self.group_name,
route_table_name,
{'location': self.region}
)
route_table = async_route_table.result()
route_table = self.network_client.route_tables.get(
self.group_name,
route_table.name
)
self.assertEqual(route_table.name, route_table_name)
route_tables = list(self.network_client.route_tables.list(
self.group_name
))
self.assertEqual(len(route_tables), 1)
route_tables = list(self.network_client.route_tables.list_all())
self.assertGreater(len(route_tables), 0)
async_route = self.network_client.routes.create_or_update(
self.group_name,
route_table.name,
route_name,
{
'address_prefix': '10.1.0.0/16',
'next_hop_type': 'None'
}
)
route = async_route.result()
route = self.network_client.routes.get(
self.group_name,
route_table.name,
route.name
)
self.assertEqual(route.name, route_name)
routes = list(self.network_client.routes.list(
self.group_name,
route_table.name
))
self.assertEqual(len(routes), 1)
async_route_delete = self.network_client.routes.delete(
self.group_name,
route_table.name,
route.name
)
async_route_delete.wait()
async_route_table_delete = self.network_client.route_tables.delete(
self.group_name,
route_table_name
)
async_route_table_delete.wait()
@record
def test_usages(self):
usages = list(self.network_client.usages.list(self.region))
self.assertGreater(len(usages), 1)
self.assertTrue(all(hasattr(u, 'name') for u in usages))
@record
def test_express_route_service_providers(self):
ersp = list(self.network_client.express_route_service_providers.list())
self.assertGreater(len(ersp), 0)
self.assertTrue(all(hasattr(u, 'bandwidths_offered') for u in ersp))
@record
def test_express_route_circuit(self):
express_route_name = self.get_resource_name('pyexpressroute')
async_express_route = self.network_client.express_route_circuits.create_or_update(
self.group_name,
express_route_name,
{
"location": self.region,
"sku": {
"name": "Standard_MeteredData",
"tier": "Standard",
"family": "MeteredData"
},
"service_provider_properties": {
"service_provider_name": "Comcast",
"peering_location": "Chicago",
"bandwidth_in_mbps": 100
}
}
)
express_route = async_express_route.result()
express_route = self.network_client.express_route_circuits.get(
self.group_name,
express_route_name
)
routes = list(self.network_client.express_route_circuits.list(
self.group_name
))
self.assertEqual(len(routes), 1)
routes = list(self.network_client.express_route_circuits.list_all())
self.assertGreater(len(routes), 0)
stats = self.network_client.express_route_circuits.get_stats(
self.group_name,
express_route_name
)
self.assertIsNotNone(stats)
async_peering = self.network_client.express_route_circuit_peerings.create_or_update(
self.group_name,
express_route_name,
'AzurePublicPeering',
{
"peering_type": "AzurePublicPeering",
"peer_asn": 100,
"primary_peer_address_prefix": "192.168.1.0/30",
"secondary_peer_address_prefix": "192.168.2.0/30",
"vlan_id": 200,
}
)
peering = async_peering.result()
peering = self.network_client.express_route_circuit_peerings.get(
self.group_name,
express_route_name,
'AzurePublicPeering'
)
peerings = list(self.network_client.express_route_circuit_peerings.list(
self.group_name,
express_route_name
))
self.assertEqual(len(peerings), 1)
stats = self.network_client.express_route_circuits.get_peering_stats(
self.group_name,
express_route_name,
'AzurePublicPeering'
)
self.assertIsNotNone(stats)
auth_name = self.get_resource_name('pyauth')
async_auth = self.network_client.express_route_circuit_authorizations.create_or_update(
self.group_name,
express_route_name,
auth_name,
{}
)
auth = async_auth.result()
auth = self.network_client.express_route_circuit_authorizations.get(
self.group_name,
express_route_name,
auth_name
)
auths = list(self.network_client.express_route_circuit_authorizations.list(
self.group_name,
express_route_name
))
self.assertEqual(len(auths), 1)
async_auth = self.network_client.express_route_circuit_authorizations.delete(
self.group_name,
express_route_name,
auth_name
)
async_auth.wait()
async_peering = self.network_client.express_route_circuit_peerings.delete(
self.group_name,
express_route_name,
'AzurePublicPeering'
)
async_peering.wait()
async_express_route = self.network_client.express_route_circuits.delete(
self.group_name,
express_route_name
)
async_express_route.wait()
@record
def test_virtual_network_gateway_operations(self):
# https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-portal
vnet_name = self.get_resource_name('pyvirtnet')
fe_name = self.get_resource_name('pysubnetfe')
be_name = self.get_resource_name('pysubnetbe')
gateway_name = self.get_resource_name('pysubnetga')
# Create VNet
async_vnet_creation = self.network_client.virtual_networks.create_or_update(
self.group_name,
vnet_name,
{
'location': self.region,
'address_space': {
'address_prefixes': [
'10.11.0.0/16',
'10.12.0.0/16'
]
}
}
)
async_vnet_creation.wait()
# Create Front End Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
fe_name,
{'address_prefix': '10.11.0.0/24'}
)
fe_subnet_info = async_subnet_creation.result()
# Create Back End Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
be_name,
{'address_prefix': '10.12.0.0/24'}
)
be_subnet_info = async_subnet_creation.result()
# Create Gateway Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
'GatewaySubnet',
{'address_prefix': '10.12.255.0/27'}
)
gateway_subnet_info = async_subnet_creation.result()
# Public IP Address
public_ip_name = self.get_resource_name('pyipname')
params_create = azure.mgmt.network.models.PublicIPAddress(
location=self.region,
public_ip_allocation_method=azure.mgmt.network.models.IPAllocationMethod.dynamic,
tags={
'key': 'value',
},
)
result_create = self.network_client.public_ip_addresses.create_or_update(
self.group_name,
public_ip_name,
params_create,
)
public_ip_address = result_create.result()
# Gateway itself
vng_name = self.get_resource_name('pyvng')
gw_params = {
'location': self.region,
'gateway_type': 'VPN',
'vpn_type': 'RouteBased',
'enable_bgp': False,
'sku': {
'tier': 'Standard',
'capacity': 2,
'name': 'Standard'},
'ip_configurations':[{
'name': 'default',
'private_ip_allocation_method': 'Dynamic',
'subnet': {
'id': gateway_subnet_info.id
},
'public_ip_address': {
'id': public_ip_address.id
}
}],
}
async_create = self.network_client.virtual_network_gateways.create_or_update(
self.group_name,
vng_name,
gw_params
)
vng = async_create.result()
self.assertEquals(vng.name, vng_name)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 2 | 2 |
Networks/Threading/server.py | polbebe/PinkPanther | 0 | 1025 | import gym
import gym.spaces as spaces
import sys
import socket
from _thread import *
import os
import numpy as np
import pandas as pd
import math as m
import time
import random
class NetEnv(gym.Env):
def __init__(self):
# Robot State values that will be bounced with client
self.robot_state = None
self.pos = None
self.message = np.array(12345, dtype=np.float32)
# Socket Conneciton
# MAC find WiFi IP - ipconfig getifaddr en0
HOST = '192.168.1.29'
# Port to listen on (non-privileged ports are > 1023)
PORT = 65432
self.ThreadCount = 0
print('Connected')
# Set up Socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.bind((HOST, PORT))
except socket.error as e:
print(str(e))
print('Waiting for connection[s]...')
self.s.listen()
self.start = 0
# Wait for client[s] to join socket
self.conn1, addr1 = self.s.accept()
print('Connected by: ', addr1)
start_new_thread(self.main_client_thread, (self.conn1, ))
self.conn2, addr2 = self.s.accept()
print('Connected by: ', addr2)
start_new_thread(self.cam_client_thread, (self.conn2, ))
def main_client_thread(self, conn):
data = conn.recv(1024)
print('Main client says: {}'.format(data.decode('utf-8')))
conn.sendall(str.encode('Hi'))
def cam_client_thread(self, conn):
data = conn.recv(1024)
print('Cam client says: {}'.format(data.decode('utf-8')))
conn.sendall(str.encode('Hi'))
def step(self):
self.main_client_thread(self.conn1)
self.cam_client_thread(self.conn2)
if __name__ == '__main__':
# Construct MAIN SERVER object
env = NetEnv()
# WALK
for i in range(100000):
env.step()
print('Done')
| 2.71875 | 3 |
backend/app/app/db/session.py | zhkuo24/full-stack-fastapi-demo | 7 | 1026 | <reponame>zhkuo24/full-stack-fastapi-demo<gh_stars>1-10
# -*- coding: utf-8 -*-
# @File : session.py
# @Author : zhkuo
# @Time : 2021/1/3 9:12 下午
# @Desc :
from sqlalchemy import create_engine
# from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
"""
参考:
https://www.osgeo.cn/sqlalchemy/orm/session_basics.html
https://landybird.github.io/python/2020/03/02/fastapi%E4%B8%8Easgi(5)/
处理session的不同方法 https://github.com/tiangolo/fastapi/issues/726
处理数据库session的方法
1. sqlalchemy.orm 自带的 scoped_session
2. 采用中间件的方法,每个请求建立一个 db 连接
3. dependency 依赖的方法(官方文档推荐方法)
"""
# 创建连接数据库的 engine
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, connect_args={"check_same_thread": False})
# 为了保证线程安全,需使用scoped_session方法
# db_session = scoped_session(
# sessionmaker(autocommit=False, autoflush=False, bind=engine)
# )
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
| 2.09375 | 2 |
src/tkdialog/dialog.py | KosukeMizuno/tkdialog | 0 | 1027 | <reponame>KosukeMizuno/tkdialog
from pathlib import Path
import pickle
import tkinter as tk
import tkinter.filedialog
def open_dialog(**opt):
"""Parameters
----------
Options will be passed to `tkinter.filedialog.askopenfilename`.
See also tkinter's document.
Followings are example of frequently used options.
- filetypes=[(label, ext), ...]
- label: str
- ext: str, semicolon separated extentions
- initialdir: str, default Path.cwd()
- multiple: bool, default False
Returns
--------
filename, str
"""
root = tk.Tk()
root.withdraw()
root.wm_attributes("-topmost", True)
opt_default = dict(initialdir=Path.cwd())
_opt = dict(opt_default, **opt)
return tk.filedialog.askopenfilename(**_opt)
def saveas_dialog(**opt):
"""Parameters
----------
Options will be passed to `tkinter.filedialog.asksaveasfilename`.
See also tkinter's document.
Followings are example of frequently used options.
- filetypes=[(label, ext), ...]
- label: str
- ext: str, semicolon separated extentions
- initialdir: str, default Path.cwd()
- initialfile: str, default isn't set
Returns
--------
filename, str
"""
root = tk.Tk()
root.withdraw()
root.wm_attributes("-topmost", True)
opt_default = dict(initialdir=Path.cwd())
_opt = dict(opt_default, **opt)
return tk.filedialog.asksaveasfilename(**_opt)
def load_pickle_with_dialog(mode='rb', **opt):
"""Load a pickled object with a filename assigned by tkinter's open dialog.
kwargs will be passed to saveas_dialog.
"""
opt_default = dict(filetypes=[('pickled data', '*.pkl'), ('all', '*')])
_opt = dict(opt_default, **opt)
fn = open_dialog(**_opt)
if fn == '': # canceled
return None
with Path(fn).open(mode) as f:
data = pickle.load(f)
return data
def dump_pickle_with_dialog(obj, mode='wb', **opt):
"""Pickle an object with a filename assigned by tkinter's saveas dialog.
kwargs will be passed to saveas_dialog.
Returns
--------
filename: str
"""
opt_default = dict(filetypes=[('pickled data', '*.pkl'), ('all', '*')])
_opt = dict(opt_default, **opt)
fn = saveas_dialog(**_opt)
if fn == '': # canceled
return ''
# note: 上書き確認はtkinterがやってくれるのでここではチェックしない
with Path(fn).open(mode) as f:
pickle.dump(obj, f)
return fn
| 3.484375 | 3 |
cinder/tests/unit/fake_group_snapshot.py | lightsey/cinder | 571 | 1028 | <filename>cinder/tests/unit/fake_group_snapshot.py
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from cinder import objects
from cinder.tests.unit import fake_constants as fake
def fake_db_group_snapshot(**updates):
db_group_snapshot = {
'id': fake.GROUP_SNAPSHOT_ID,
'name': 'group-1',
'status': 'available',
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'group_type_id': fake.GROUP_TYPE_ID,
'group_id': fake.GROUP_ID,
}
for name, field in objects.GroupSnapshot.fields.items():
if name in db_group_snapshot:
continue
if field.nullable:
db_group_snapshot[name] = None
elif field.default != fields.UnspecifiedDefault:
db_group_snapshot[name] = field.default
else:
raise Exception('fake_db_group_snapshot needs help with %s.'
% name)
if updates:
db_group_snapshot.update(updates)
return db_group_snapshot
def fake_group_snapshot_obj(context, **updates):
return objects.GroupSnapshot._from_db_object(
context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates))
| 1.992188 | 2 |
src/tree_visualizer.py | szymanskir/msi | 0 | 1029 | <gh_stars>0
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
def display_resolution_tree(resolution_tree: nx.classes.DiGraph):
_draw_resolution_tree_(resolution_tree)
plt.show()
def _draw_resolution_tree_(tree: nx.classes.DiGraph, enable_edge_labels: bool = True, rotate_edge_labels: bool = False):
plt.figure()
# graph
nodes_pos = graphviz_layout(tree, prog='dot')
nx.draw(tree, nodes_pos,
node_size=150, edge_color='#7d7d7d')
# nodes labels
pos_attrs = {}
for node, coords in nodes_pos.items():
pos_attrs[node] = (coords[0], coords[1] - 10)
custom_node_attrs = {}
for node, attr in tree.nodes.items():
custom_node_attrs[node] = str(node)
nodes_bbox = dict(facecolor="w", edgecolor="#d3d3d3", pad=6, lw=0.1)
nx.draw_networkx_labels(
tree, pos_attrs, labels=custom_node_attrs, font_size=13, bbox=nodes_bbox)
# edge labels
if enable_edge_labels:
edges_pos = graphviz_layout(tree, prog='dot')
edge_labels = nx.get_edge_attributes(tree, 'transformation')
nx.draw_networkx_edge_labels(
tree, pos=edges_pos, edge_labels=edge_labels, font_size=13, rotate=rotate_edge_labels)
| 2.5625 | 3 |
setup.py | SilicalNZ/canvas | 7 | 1030 | <filename>setup.py
import setuptools
setuptools.setup(
name = 'sili-canvas',
version = '0.0.1',
license = 'MIT',
url = 'https://github.com/SilicalNZ/canvas',
description = 'A series of easy to use classes to perform complex 2D array transformations',
long_description = '',
author = 'SilicalNZ',
packages = ['canvas', 'canvas.common', 'canvas.tools']
)
| 1.101563 | 1 |
tests/viz_tests.py | theoretical-olive/incubator-superset | 2 | 1031 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_length": 0,
"all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"groupby": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_format": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_df_returns_empty_df(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = "epoch_ms"
result = test_viz.get_df(query_obj)
import logging
logger.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]})
mock_dttm_col.python_date_format = "%Y-%m-%d"
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
)
def test_cache_timeout(self):
datasource = self.get_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
class TableVizTestCase(SupersetTestCase):
def test_get_data_applies_percentage(self):
form_data = {
"groupby": ["groupA", "groupB"],
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"count",
"avg__C",
],
"percent_metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"avg__B",
],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"SUM(value1)": [15, 20, 25, 40],
"avg__B": [10, 20, 5, 15],
"avg__C": [11, 22, 33, 44],
"count": [6, 7, 8, 9],
"groupA": ["A", "B", "C", "C"],
"groupB": ["x", "x", "y", "z"],
}
)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data and computes percents
self.assertEqual(
[
"groupA",
"groupB",
"SUM(value1)",
"count",
"avg__C",
"%SUM(value1)",
"%avg__B",
],
list(data["columns"]),
)
expected = [
{
"groupA": "A",
"groupB": "x",
"SUM(value1)": 15,
"count": 6,
"avg__C": 11,
"%SUM(value1)": 0.15,
"%avg__B": 0.2,
},
{
"groupA": "B",
"groupB": "x",
"SUM(value1)": 20,
"count": 7,
"avg__C": 22,
"%SUM(value1)": 0.2,
"%avg__B": 0.4,
},
{
"groupA": "C",
"groupB": "y",
"SUM(value1)": 25,
"count": 8,
"avg__C": 33,
"%SUM(value1)": 0.25,
"%avg__B": 0.1,
},
{
"groupA": "C",
"groupB": "z",
"SUM(value1)": 40,
"count": 9,
"avg__C": 44,
"%SUM(value1)": 0.4,
"%avg__B": 0.3,
},
]
self.assertEqual(expected, data["records"])
def test_parse_adhoc_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "SUM(value1)",
"operator": "<",
"comparator": "10",
},
{
"expressionType": "SQL",
"clause": "HAVING",
"sqlExpression": "SUM(value1) > 5",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual(
[{"op": "<", "val": "10", "col": "SUM(value1)"}],
query_obj["extras"]["having_druid"],
)
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
"having": "SUM(value1) > 5",
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual([], query_obj["extras"]["having_druid"])
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("", query_obj["extras"]["having"])
def test_query_obj_merges_percent_metrics(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["sum__A", "count", "avg__C"],
"percent_metrics": ["sum__A", "avg__B", "max__Y"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
["sum__A", "count", "avg__C", "avg__B", "max__Y"], query_obj["metrics"]
)
def test_query_obj_throws_columns_and_metrics(self):
datasource = self.get_datasource_mock()
form_data = {"all_columns": ["A", "B"], "metrics": ["x", "y"]}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
del form_data["metrics"]
form_data["groupby"] = ["B", "C"]
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_merges_all_columns(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
"all_columns": ["colA", "colB", "colC"],
"order_by_cols": ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
"columns": ["colD", "colC"],
"groupby": ["colA", "colB"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data["all_columns"], query_obj["columns"])
self.assertEqual([], query_obj["groupby"])
self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
def test_query_obj_uses_sortby(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["colA", "colB"],
"order_desc": False,
}
def run_test(metric):
form_data["timeseries_limit_metric"] = metric
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(["colA", "colB", metric], query_obj["metrics"])
self.assertEqual([(metric, True)], query_obj["orderby"])
run_test("simple_metric")
run_test(
{
"label": "adhoc_metric",
"expressionType": "SIMPLE",
"aggregate": "SUM",
"column": {"column_name": "sort_column",},
}
)
def test_should_be_timeseries_raises_when_no_granularity(self):
datasource = self.get_datasource_mock()
form_data = {"include_time": True}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.should_be_timeseries()
def test_adhoc_metric_with_sortby(self):
metrics = [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "sum_value",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
]
form_data = {
"metrics": metrics,
"timeseries_limit_metric": {
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"order_desc": False,
}
df = pd.DataFrame({"SUM(value1)": [15], "sum_value": [15]})
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
self.assertEqual(["sum_value"], data["columns"])
class DistBarVizTestCase(SupersetTestCase):
def test_groupby_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "anchovies", None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("votes", data["key"])
expected_values = [
{"x": "pepperoni", "y": 5},
{"x": "cheese", "y": 3},
{"x": NULL_STRING, "y": 2},
{"x": "anchovies", "y": 1},
]
self.assertEqual(expected_values, data["values"])
def test_groupby_nans(self):
form_data = {
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["beds"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("count", data["key"])
expected_values = [
{"x": "1.0", "y": 42},
{"x": "0.0", "y": 30},
{"x": "2.0", "y": 29},
{"x": NULL_STRING, "y": 3},
]
self.assertEqual(expected_values, data["values"])
def test_column_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": ["role"],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
"role": ["engineer", "engineer", None, None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)
expected = [
{
"key": NULL_STRING,
"values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
},
{
"key": "engineer",
"values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
},
]
self.assertEqual(expected, data)
class PairedTTestTestCase(SupersetTestCase):
def test_get_data_transforms_dataframe(self):
form_data = {
"groupby": ["groupA", "groupB", "groupC"],
"metrics": ["metric1", "metric2", "metric3"],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"metric1": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 4},
{"x": 200, "y": 5},
{"x": 300, "y": 6},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 7},
{"x": 200, "y": 8},
{"x": 300, "y": 9},
],
"group": ("c1", "c2", "c3"),
},
],
"metric2": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 40},
{"x": 200, "y": 50},
{"x": 300, "y": 60},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 70},
{"x": 200, "y": 80},
{"x": 300, "y": 90},
],
"group": ("c1", "c2", "c3"),
},
],
"metric3": [
{
"values": [
{"x": 100, "y": 100},
{"x": 200, "y": 200},
{"x": 300, "y": 300},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 400},
{"x": 200, "y": 500},
{"x": 300, "y": 600},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 700},
{"x": 200, "y": 800},
{"x": 300, "y": 900},
],
"group": ("c1", "c2", "c3"),
},
],
}
self.assertEqual(data, expected)
def test_get_data_empty_null_keys(self):
form_data = {"groupby": [], "metrics": ["", None]}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[""] = [1, 2, 3]
raw[None] = [10, 20, 30]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"N/A": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": "All",
}
],
"NULL": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": "All",
}
],
}
self.assertEqual(data, expected)
class PartitionVizTestCase(SupersetTestCase):
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_time_series_option(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj["is_timeseries"])
test_viz.form_data["time_series_option"] = "agg_sum"
query_obj = test_viz.query_obj()
self.assertTrue(query_obj["is_timeseries"])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
time_op = "agg_sum"
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
"metric1": {"a1": 6, "b1": 15, "c1": 24},
"metric2": {"a1": 60, "b1": 150, "c1": 240},
"metric3": {"a1": 600, "b1": 1500, "c1": 2400},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
time_op = "agg_mean"
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 200.0,
"metric1": 5.0,
"metric2": 50.0,
"metric3": 500.0,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
"metric1": {"a1": 2, "b1": 5, "c1": 8},
"metric2": {"a1": 20, "b1": 50, "c1": 80},
"metric3": {"a1": 200, "b1": 500, "c1": 800},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {})
time_op = "point_diff"
levels = test_viz.levels_for_diff(time_op, groups, df)
expected = {"metric1": 6, "metric2": 60, "metric3": 600}
self.assertEqual(expected, levels[0].to_dict())
expected = {
"metric1": {"a1": 2, "b1": 2, "c1": 2},
"metric2": {"a1": 20, "b1": 20, "c1": 20},
"metric3": {"a1": 200, "b1": 200, "c1": 200},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(4, len(levels))
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_time_calls_process_data_and_drops_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {"groupby": groups})
def return_args(df_drop, aggregate):
return df_drop
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, df)
self.assertEqual(4, len(levels))
cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
cols += ["groupA"]
self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
cols += ["groupB"]
self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
cols += ["groupC"]
self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
self.assertEqual(4, len(test_viz.process_data.mock_calls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
levels = test_viz.levels_for("agg_sum", groups, df)
nest = test_viz.nest_values(levels)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
metrics = ["metric1", "metric2", "metric3"]
procs = {}
for i in range(0, 4):
df_drop = df.drop(groups[i:], 1)
pivot = df_drop.pivot_table(
index=DTTM_ALIAS, columns=groups[:i], values=metrics
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(None, nest[i].get("val"))
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(3, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
self.assertEqual(
1, len(nest[0]["children"][0]["children"][0]["children"][0]["children"])
)
def test_get_data_calls_correct_method(self):
test_viz = viz.PartitionViz(Mock(), {})
df = Mock()
with self.assertRaises(ValueError):
test_viz.get_data(df)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data["groupby"] = ["groups"]
test_viz.form_data["time_series_option"] = "not_time"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "agg_sum"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "agg_mean"
test_viz.get_data(df)
self.assertEqual("agg_mean", test_viz.levels_for.mock_calls[2][1][0])
test_viz.form_data["time_series_option"] = "point_diff"
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.get_data(df)
self.assertEqual("point_diff", test_viz.levels_for_diff.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "point_percent"
test_viz.get_data(df)
self.assertEqual("point_percent", test_viz.levels_for_diff.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "point_factor"
test_viz.get_data(df)
self.assertEqual("point_factor", test_viz.levels_for_diff.mock_calls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data["time_series_option"] = "adv_anal"
test_viz.get_data(df)
self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
test_viz.form_data["time_series_option"] = "time_series"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[3][1][0])
self.assertEqual(7, len(test_viz.nest_values.mock_calls))
class RoseVisTestCase(SupersetTestCase):
def test_rose_vis_get_data(self):
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
t3 = pd.Timestamp("2004")
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df = pd.DataFrame(raw)
fd = {"metrics": ["metric1"], "groupby": ["groupA"]}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd["metrics"]
res = test_viz.get_data(df)
expected = {
946684800000000000: [
{"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
{"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
{"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
],
1009843200000000000: [
{"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
{"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
{"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
],
1072915200000000000: [
{"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
{"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
{"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
],
}
self.assertEqual(expected, res)
class TimeSeriesTableVizTestCase(SupersetTestCase):
def test_get_data_metrics(self):
form_data = {"metrics": ["sum__A", "count"], "groupby": []}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t2]
raw["sum__A"] = [15, 20]
raw["count"] = [6, 7]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["count", "sum__A"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"sum__A": 15, "count": 6},
t2.strftime(time_format): {"sum__A": 20, "count": 7},
}
self.assertEqual(expected, data["records"])
def test_get_data_group_by(self):
form_data = {"metrics": ["sum__A"], "groupby": ["groupby1"]}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw["sum__A"] = [15, 20, 25, 30, 35, 40]
raw["groupby1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"a1": 15, "a2": 20, "a3": 25},
t2.strftime(time_format): {"a1": 30, "a2": 35, "a3": 40},
}
self.assertEqual(expected, data["records"])
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["a"]}
super_query_obj.return_value = {}
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
form_data["metrics"] = ["x", "y"]
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
class BaseDeckGLVizTestCase(SupersetTestCase):
def test_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == [form_data.get("size")]
form_data = {}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == []
def test_scatterviz_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"}
result = test_viz_deckgl.get_metrics()
assert result == ["int"]
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {}
result = test_viz_deckgl.get_metrics()
assert result == []
def test_get_js_columns(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_js_columns(mock_d)
assert result == {"color": None}
def test_get_properties(self):
mock_d = {}
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(NotImplementedError) as context:
test_viz_deckgl.get_properties(mock_d)
self.assertTrue("" in str(context.exception))
def test_process_spatial_query_obj(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_key = "spatial_key"
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(ValueError) as context:
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
self.assertTrue("Bad spatial key" in str(context.exception))
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": ["lon", "lat"],
"delimited_key": ["lonlat"],
"geohash_key": ["geo"],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
assert expected_results.get(mock_key) == mock_gb
def test_geojson_query_obj(self):
form_data = load_fixture("deck_geojson_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
results = test_viz_deckgl.query_obj()
assert results["metrics"] == []
assert results["groupby"] == []
assert results["columns"] == ["test_col"]
def test_parse_coordinates(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
viz_instance = viz.BaseDeckGLViz(datasource, form_data)
coord = viz_instance.parse_coordinates("1.23, 3.21")
self.assertEqual(coord, (1.23, 3.21))
coord = viz_instance.parse_coordinates("1.23 3.21")
self.assertEqual(coord, (1.23, 3.21))
self.assertEqual(viz_instance.parse_coordinates(None), None)
self.assertEqual(viz_instance.parse_coordinates(""), None)
def test_parse_coordinates_raises(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("NULL")
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj")
@patch("superset.utils.core.uuid.uuid4")
def test_filter_nulls(self, mock_uuid4):
mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678")
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lat",
"isExtra": False,
},
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lon",
"isExtra": False,
},
],
"delimited_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lonlat",
"isExtra": False,
}
],
"geohash_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "geo",
"isExtra": False,
}
],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.copy())
test_viz_deckgl.spatial_control_keys = [mock_key]
test_viz_deckgl.add_null_filters()
adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"]
assert expected_results.get(mock_key) == adhoc_filters
class TimeSeriesVizTestCase(SupersetTestCase):
def test_timeseries_unicode_data(self):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["name"], "metrics": ["sum__payout"]}
raw = {}
raw["name"] = [
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid Basket",
"Real Madrid Basket",
]
raw["__timestamp"] = [
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
]
raw["sum__payout"] = [2, 2, 4, 4]
df = pd.DataFrame(raw)
test_viz = viz.NVD3TimeSeriesViz(datasource, form_data)
viz_data = {}
viz_data = test_viz.get_data(df)
expected = [
{
u"values": [
{u"y": 4, u"x": u"2018-02-20T00:00:00"},
{u"y": 4, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid Basket",),
},
{
u"values": [
{u"y": 2, u"x": u"2018-02-20T00:00:00"},
{u"y": 2, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",),
},
]
self.assertEqual(expected, viz_data)
def test_process_data_resample(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"__timestamp": pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 5.0, 7.0],
}
)
self.assertEqual(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "sum", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0],
)
np.testing.assert_equal(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "asfreq", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0],
)
def test_apply_rolling(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
index=pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
data={"y": [1.0, 2.0, 3.0, 4.0]},
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "cumsum",
"rolling_periods": 0,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 6.0, 10.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "sum",
"rolling_periods": 2,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 5.0, 7.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "mean",
"rolling_periods": 10,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 1.5, 2.0, 2.5],
)
class BigNumberVizTestCase(SupersetTestCase):
def test_get_data(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 3.0, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
self.assertEqual(data[2], {DTTM_ALIAS: pd.Timestamp("2019-01-05"), "y": 3})
def test_get_data_with_none(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, None, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
assert np.isnan(data[2]["y"])
| 1.617188 | 2 |
chord_sim/modules/taskqueue.py | ryogrid/FunnelKVS | 8 | 1032 | # coding:utf-8
from typing import Dict, List, Optional, cast, TYPE_CHECKING
from .chord_util import ChordUtil, InternalControlFlowException, NodeIsDownedExceptiopn
if TYPE_CHECKING:
from .chord_node import ChordNode
class TaskQueue:
JOIN_PARTIAL = "join_partial"
def __init__(self, existing_node : 'ChordNode'):
self.tqueue : List[str] = []
self.existing_node = existing_node
def append_task(self, task_code : str):
self.tqueue.append(task_code)
# キュー内の最初のタスクを実行する
# 処理が失敗した場合は先頭に戻す
def exec_first(self):
if len(self.tqueue) > 0:
ChordUtil.dprint("exec_first_0," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + "," + str(self.tqueue))
task_code : str = self.tqueue.pop()
if task_code == TaskQueue.JOIN_PARTIAL:
# try:
#self.existing_node.stabilizer.partial_join_op()
ret = self.existing_node.stabilizer.partial_join_op()
if (ret.is_ok):
pass
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE
# 実行に失敗したため再実行すべく先頭に戻す
self.tqueue.insert(0, task_code)
ChordUtil.dprint(
"exec_first_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "INTERNAL_CONTROL_FLOW_EXCEPTION_OCCURED")
# except (InternalControlFlowException, NodeIsDownedExceptiopn):
# # 実行に失敗したため再実行すべく先頭に戻す
# self.tqueue.insert(0, task_code)
# ChordUtil.dprint("exec_first_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
# + "INTERNAL_CONTROL_FLOW_EXCEPTION_OCCURED")
| 2.5 | 2 |
surname_rnn/surname/containers.py | sudarshan85/nlpbook | 0 | 1033 | #!/usr/bin/env python
import pandas as pd
from pathlib import Path
from torch.utils.data import DataLoader
class ModelContainer(object):
def __init__(self, model, optimizer, loss_fn, scheduler=None):
self.model = model
self.optimizer = optimizer
self.loss_fn = loss_fn
self.scheduler = scheduler
class DataContainer(object):
def __init__(self, df_with_split: pd.DataFrame, dataset_class, vectorizer_file: Path, batch_size:
int, with_test=True, is_load: bool=True) -> None:
self.train_df = df_with_split.loc[df_with_split['split'] == 'train']
self.val_df = df_with_split.loc[df_with_split['split'] == 'val']
self._bs = batch_size
self.with_test = with_test
self.is_load = is_load
self._lengths = {'train_size': len(self.train_df), 'val_size': len(self.val_df)}
self._n_batches = [self._lengths['train_size'] // self._bs, self._lengths['val_size'] //
self._bs]
if not self.is_load:
print("Creating and saving vectorizer")
train_ds = dataset_class.load_data_and_create_vectorizer(self.train_df)
train_ds.save_vectorizer(vectorizer_file)
self.train_ds = dataset_class.load_data_and_vectorizer_from_file(self.train_df, vectorizer_file)
self.vectorizer = self.train_ds.vectorizer
self.surname_vocab = self.vectorizer.surname_vocab
self.nationality_vocab = self.vectorizer.nationality_vocab
self.train_dl = DataLoader(self.train_ds, self._bs, shuffle=True, drop_last=True)
self.val_ds = dataset_class.load_data_and_vectorizer(self.val_df, self.vectorizer)
self.val_dl = DataLoader(self.val_ds, self._bs, shuffle=True, drop_last=True)
if self.with_test:
self.test_df = df_with_split.loc[df_with_split['split'] == 'test']
self._lengths['test_size'] = len(self.test_df)
self._n_batches.append(self._lengths['test_size'] // self._bs)
self.test_ds = dataset_class.load_data_and_vectorizer(self.test_df, self.vectorizer)
self.test_dl = DataLoader(self.test_ds, self._bs, shuffle=True, drop_last=True)
def get_loaders(self):
return self.train_dl, self.val_dl, self.test_dl
@property
def train_batches(self):
return self._n_batches[0]
@property
def val_batches(self):
return self._n_batches[1]
@property
def test_batches(self):
if not self.with_test:
raise NameError("No test dataset was provided")
return self._n_batches[2]
@property
def vocab_size(self):
return len(self.surname_vocab)
@property
def n_classes(self):
return len(self.nationality_vocab)
@property
def sizes(self):
return self._lengths
| 2.515625 | 3 |
AudioLib/__init__.py | yNeshy/voice-change | 11 | 1034 | <reponame>yNeshy/voice-change
from AudioLib.AudioEffect import AudioEffect
| 0.96875 | 1 |
programs/buck_logging.py | lakshmi2005/buck | 1 | 1035 | #!/usr/bin/env python
from __future__ import print_function
import logging
import os
def setup_logging():
# Set log level of the messages to show.
level_name = os.environ.get('BUCK_WRAPPER_LOG_LEVEL', 'INFO')
level_name_to_level = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
level = level_name_to_level.get(level_name.upper(), logging.INFO)
logging.basicConfig(
level=level,
format=(
'%(asctime)s [%(levelname)s][%(filename)s:%(lineno)d] %(message)s'
))
| 2.484375 | 2 |
CONTENT/DS-n-Algos/ALGO/__PYTHON/celeb.py | Web-Dev-Collaborative/DS-ALGO-OFFICIAL | 11 | 1036 | <reponame>Web-Dev-Collaborative/DS-ALGO-OFFICIAL<gh_stars>10-100
def orangesRotting(elemnts):
if not elemnts or len(elemnts) == 0:
return 0
n = len(elemnts)
m = len(elemnts[0])
rotten = []
for i in range(n):
for j in range(m):
if elemnts[i][j] == 2:
rotten.append((i, j))
mins = 0
def dfs(rotten):
count = []
for i, j in rotten:
if i > 0 and rotten[i - 1][j] == 1:
count.append((i - 1, j))
elemnts[i - 1][j] = 2
if j > 0 and rotten[i][j - 1] == 1:
count.append((i, j - 1))
elemnts[i][j - 1] = 2
if i < n - 1 and rotten[i][j] == 1:
count.append((i, j))
elemnts[i][j] = 2
if j < m - 1 and rotten[i][j] == 1:
count.append((i, j))
elemnts[i][j] = 2
return count
while rotten:
rotten = dfs(rotten)
if not rotten:
break
mins += 1
for i in range(n):
for j in range(m):
if elemnts[i][j] == 1:
return -1
return mins
| 3.109375 | 3 |
official/cv/c3d/src/c3d_model.py | leelige/mindspore | 77 | 1037 | <gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import mindspore.nn as nn
import mindspore.ops as P
from mindspore.common import initializer as init
from src.utils import default_recurisive_init, KaimingNormal
class C3D(nn.Cell):
"""
C3D network definition.
Args:
num_classes (int): Class numbers. Default: 1000.
Returns:
Tensor, infer output tensor.
Examples:
>>> C3D(num_classes=1000)
"""
def __init__(self, num_classes=1000):
super(C3D, self).__init__()
self.conv1 = nn.Conv3d(in_channels=3, out_channels=64, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool1 = P.MaxPool3D(kernel_size=(1, 2, 2), strides=(1, 2, 2), pad_mode='same')
self.conv2 = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool2 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv3a = nn.Conv3d(in_channels=128, out_channels=256, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv3b = nn.Conv3d(in_channels=256, out_channels=256, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool3 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv4a = nn.Conv3d(in_channels=256, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv4b = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool4 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv5a = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv5b = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool5 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.fc6 = nn.Dense(in_channels=8192, out_channels=4096)
self.fc7 = nn.Dense(in_channels=4096, out_channels=4096)
self.fc8 = nn.Dense(in_channels=4096, out_channels=num_classes, bias_init=init.Normal(0.02))
self.dropout = nn.Dropout(keep_prob=0.5)
self.relu = nn.ReLU()
self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)), mode="CONSTANT")
self.__init_weight()
def __init_weight(self):
default_recurisive_init(self)
self.custom_init_weight()
def construct(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = x.view(-1, 512 * 2, 7, 7)
x = self.pad(x)
x = x.view(-1, 512, 2, 8, 8)
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
def custom_init_weight(self):
"""
Init the weight of Conv3d and Dense in the net.
"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv3d):
cell.weight.set_data(init.initializer(
KaimingNormal(a=math.sqrt(5), mode='fan_out', nonlinearity='relu'),
cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(init.initializer(
init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
| 1.953125 | 2 |
blmath/geometry/apex.py | metabolize/blmath | 6 | 1038 | <filename>blmath/geometry/apex.py
import numpy as np
from blmath.numerics import vx
def apex(points, axis):
'''
Find the most extreme point in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array.
'''
coords_on_axis = points.dot(axis)
return points[np.argmax(coords_on_axis)]
def inflection_points(points, axis, span):
'''
Find the list of vertices that preceed inflection points in a curve. The curve is differentiated
with respect to the coordinate system defined by axis and span.
axis: A vector representing the vertical axis of the coordinate system.
span: A vector representing the the horiztonal axis of the coordinate system.
returns: a list of points in space corresponding to the vertices that
immediately preceed inflection points in the curve
'''
coords_on_span = points.dot(span)
dx = np.gradient(coords_on_span)
coords_on_axis = points.dot(axis)
# Take the second order finite difference of the curve with respect to the
# defined coordinate system
finite_difference_2 = np.gradient(np.gradient(coords_on_axis, dx), dx)
# Compare the product of all neighboring pairs of points in the second derivative
# If a pair of points has a negative product, then the second derivative changes sign
# at one of those points, signalling an inflection point
is_inflection_point = [finite_difference_2[i] * finite_difference_2[i + 1] <= 0 for i in range(len(finite_difference_2) - 1)]
inflection_point_indices = [i for i, b in enumerate(is_inflection_point) if b]
if len(inflection_point_indices) == 0: # pylint: disable=len-as-condition
return []
return points[inflection_point_indices]
def farthest(from_point, to_points):
'''
Find the farthest point among the inputs, to the given point.
Return a tuple: farthest_point, index_of_farthest_point.
'''
absolute_distances = vx.magnitude(to_points - from_point)
index_of_farthest_point = np.argmax(absolute_distances)
farthest_point = to_points[index_of_farthest_point]
return farthest_point, index_of_farthest_point
| 3.34375 | 3 |
examples/client/main.py | TheFarGG/Discode | 3 | 1039 | <filename>examples/client/main.py<gh_stars>1-10
import os
import discode
TOKEN = os.environ.get("TOKEN")
# The token from the developer portal.
client = discode.Client(token=TOKEN, intents=discode.Intents.default())
@client.on_event("ready")
async def on_ready():
print(client.user, "is ready!")
# The ready listener gets fired when the bot/client is completely ready for use.
@client.on_event("message_create")
async def on_message(message: discode.Message):
msg: str = msg.content
if msg.startswith("?hi"):
await message.channel.send("Hi!!!")
# The message_create listener is fired whenever a message is sent to any channel that the bot has access to.
| 2.890625 | 3 |
timm/models/layers/__init__.py | kkahatapitiya/pytorch-image-models | 0 | 1040 | <reponame>kkahatapitiya/pytorch-image-models<filename>timm/models/layers/__init__.py
from .activations import *
from .adaptive_avgmax_pool import \
adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
from .blur_pool import BlurPool2d
from .classifier import ClassifierHead, create_classifier
from .cond_conv2d import CondConv2d, get_condconv_initializer
from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
set_layer_config
from .conv2d_same import Conv2dSame, conv2d_same
from .conv_bn_act import ConvBnAct
from .create_act import create_act_layer, get_act_layer, get_act_fn
from .create_attn import get_attn, create_attn
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act
from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
from .evo_norm import EvoNormBatch2d, EvoNormSample2d
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible
from .inplace_abn import InplaceAbn
from .involution import Involution
from .linear import Linear
from .mixed_conv2d import MixedConv2d
from .mlp import Mlp, GluMlp, GatedMlp, ConvMlpGeneral, ConvMlpGeneralv2
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .norm import GroupNorm, LayerNorm2d
from .norm_act import BatchNormAct2d, GroupNormAct
from .padding import get_padding, get_same_padding, pad_same
from .patch_embed import PatchEmbed
from .pool2d_same import AvgPool2dSame, create_pool2d
from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
from .selective_kernel import SelectiveKernel
from .separable_conv import SeparableConv2d, SeparableConvBnAct
from .space_to_depth import SpaceToDepthModule
from .split_attn import SplitAttn
from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
from .test_time_pool import TestTimePoolHead, apply_test_time_pool
from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_
| 1.460938 | 1 |
riccipy/metrics/bondi_2.py | cjayross/riccipy | 4 | 1041 | """
Name: Bondi
References: Bondi, Proc. Roy. Soc. Lond. A, v282, p303, (1964)
Coordinates: Spherical
Symmetry: Spherical
Notes: Outgoing Coordinates
"""
from sympy import Function, diag, sin, symbols
coords = symbols("r v theta phi", real=True)
variables = ()
functions = symbols("C M", cls=Function)
r, v, th, ph = coords
C, M = functions
metric = diag(0, -C(r, v) ** 2 * (1 - 2 * M(r, v) / r), r ** 2, r ** 2 * sin(th) ** 2)
metric[0, 1] = metric[1, 0] = -C(r, v)
| 2.5625 | 3 |
cfgov/ask_cfpb/tests/test_views.py | atuggle/cfgov-refresh | 0 | 1042 | from __future__ import unicode_literals
import json
from django.apps import apps
from django.core.urlresolvers import NoReverseMatch, reverse
from django.http import Http404, HttpRequest, QueryDict
from django.test import TestCase, override_settings
from django.utils import timezone
from wagtail.wagtailcore.models import Site
from wagtailsharing.models import SharingSite
import mock
from model_mommy import mommy
from ask_cfpb.models import ENGLISH_PARENT_SLUG, SPANISH_PARENT_SLUG
from ask_cfpb.views import annotate_links, ask_search, redirect_ask_search
from v1.util.migrations import get_or_create_page
now = timezone.now()
class AnswerPagePreviewCase(TestCase):
def setUp(self):
from v1.models import HomePage
from ask_cfpb.models import Answer
self.ROOT_PAGE = HomePage.objects.get(slug='cfgov')
self.english_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Ask CFPB',
ENGLISH_PARENT_SLUG,
self.ROOT_PAGE,
language='en',
live=True)
self.spanish_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Obtener respuestas',
SPANISH_PARENT_SLUG,
self.ROOT_PAGE,
language='es',
live=True)
self.test_answer = mommy.make(
Answer,
answer="Test answer.",
question="Test question.",
slug='test-question',
update_english_page=True,
update_spanish_page=False)
self.site = mommy.make(
Site,
root_page=self.ROOT_PAGE,
hostname='localhost',
port=8000,
is_default_site=True)
self.sharing_site = mommy.make(
SharingSite,
site=self.site,
hostname='preview.localhost',
port=8000)
@mock.patch('ask_cfpb.views.ServeView.serve_latest_revision')
def test_preview_page(self, mock_serve):
from ask_cfpb.views import view_answer
page = self.test_answer.english_page
revision = page.save_revision()
revision.publish()
test_request = HttpRequest()
test_request.META['SERVER_NAME'] = 'preview.localhost'
test_request.META['SERVER_PORT'] = 8000
view_answer(
test_request, 'test-question', 'en', self.test_answer.pk)
self.assertEqual(mock_serve.call_count, 1)
def test_answer_page_not_live(self):
from ask_cfpb.views import view_answer
page = self.test_answer.english_page
page.live = False
page.save()
test_request = HttpRequest()
with self.assertRaises(Http404):
view_answer(
test_request,
'test-question',
'en',
self.test_answer.pk)
class AnswerViewTestCase(TestCase):
def setUp(self):
from v1.models import HomePage
self.ROOT_PAGE = HomePage.objects.get(slug='cfgov')
self.english_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Ask CFPB',
ENGLISH_PARENT_SLUG,
self.ROOT_PAGE,
language='en',
live=True)
self.spanish_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Obtener respuestas',
SPANISH_PARENT_SLUG,
self.ROOT_PAGE,
language='es',
live=True)
def test_annotate_links(self):
mock_answer = (
'<p>Answer with a <a href="http://fake.com">fake link.</a></p>')
(annotated_answer, links) = annotate_links(mock_answer)
self.assertEqual(
annotated_answer,
'<html><body><p>Answer with a <a href="http://fake.com">fake '
'link.</a><sup>1</sup></p></body></html>')
self.assertEqual(links, [(1, str('http://fake.com'))])
def test_annotate_links_no_href(self):
mock_answer = (
'<p>Answer with a <a>fake link.</a></p>')
(annotated_answer, links) = annotate_links(mock_answer)
self.assertEqual(links, [])
def test_annotate_links_no_site(self):
site = Site.objects.get(is_default_site=True)
site.is_default_site = False
site.save()
with self.assertRaises(RuntimeError) as context:
annotate_links('answer')
self.assertIn('no default wagtail site', str(context.exception))
def test_bad_language_search(self):
with self.assertRaises(NoReverseMatch):
self.client.get(reverse(
'ask-search-en',
kwargs={'language': 'zz'}), {'q': 'payday'})
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_en_search_results_page_not_created(self, mock_filter):
mock_queryset = mock.Mock()
mock_queryset.count.return_value = 0
mock_filter.return_value = [mock_queryset]
response = self.client.get(reverse(
'ask-search-en'), {'q': 'payday'})
self.assertEqual(mock_filter.call_count, 1)
self.assertTrue(mock_filter.called_with(language='en', q='payday'))
self.assertEqual(response.status_code, 404)
@mock.patch('ask_cfpb.views.SearchQuerySet')
def test_en_search(self, mock_sqs):
from v1.util.migrations import get_or_create_page
mock_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.ROOT_PAGE,
language='en')
mock_return = mock.Mock()
mock_return.url = 'mockcfpb.gov'
mock_return.autocomplete = 'A mock question'
mock_return.text = 'Mock answer text.'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_sqs_instance = mock_sqs.return_value.models.return_value
mock_sqs_instance.filter.return_value = mock_queryset
mock_sqs_instance.spelling_suggestion.return_value = 'payday'
response = self.client.get(reverse(
'ask-search-en'), {'q': 'payday'})
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context_data['page'],
mock_page)
self.assertEqual(
response.context_data['page'].suggestion,
None)
self.assertEqual(mock_sqs_instance.filter.call_count, 1)
self.assertTrue(mock_sqs_instance.filter.called_with(
language='en', q='payday'))
@mock.patch('ask_cfpb.views.SearchQuerySet')
def test_en_search_no_term(self, mock_sqs):
from v1.util.migrations import get_or_create_page
mock_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.ROOT_PAGE,
language='en')
response = self.client.get(reverse(
'ask-search-en'), {'q': ''})
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context_data['page'],
mock_page)
self.assertEqual(
response.context_data['page'].query,
'')
self.assertEqual(
response.context_data['page'].result_query,
'')
@override_settings(FLAGS={'ASK_SEARCH_TYPOS': {'boolean': True}})
@mock.patch('ask_cfpb.views.SearchQuerySet')
def test_en_search_suggestion(self, mock_sqs):
from v1.util.migrations import get_or_create_page
mock_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.english_parent_page,
language='en',
live=True)
mock_return = mock.Mock()
mock_return.url = 'mockcfpb.gov'
mock_return.autocomplete = 'A mock question'
mock_return.text = 'Mock answer text.'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 0
mock_sqs_instance = mock_sqs.return_value.models.return_value
mock_sqs_instance.filter.return_value = mock_queryset
mock_sqs_instance.spelling_suggestion.return_value = 'payday'
response = self.client.get(reverse(
'ask-search-en'), {'q': 'paydya'})
self.assertEqual(response.status_code, 200)
response_page = response.context_data['page']
self.assertEqual(response_page, mock_page)
self.assertEqual(response_page.suggestion, 'paydya')
self.assertEqual(response_page.result_query, 'payday')
self.assertEqual(response_page.query, 'paydya')
@mock.patch('ask_cfpb.views.redirect_ask_search')
def test_ask_search_encounters_facets(self, mock_redirect):
request = HttpRequest()
request.GET['selected_facets'] = 'category_exact:my_category'
ask_search(request)
self.assertEqual(mock_redirect.call_count, 1)
@mock.patch('ask_cfpb.views.redirect')
def test_redirect_ask_search_passes_query_string(self, mock_redirect):
request = HttpRequest()
request.GET['q'] = 'hoodoo'
redirect_ask_search(request)
self.assertEqual(mock_redirect.call_count, 1)
@mock.patch('ask_cfpb.views.redirect')
def test_spanish_redirect_ask_search_passes_query_string(
self, mock_redirect):
request = HttpRequest()
request.GET['selected_facets'] = 'category_exact:my_categoria'
redirect_ask_search(request, language='es')
self.assertEqual(mock_redirect.call_count, 1)
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_es_search(self, mock_filter):
get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock Spanish results page',
'respuestas',
self.spanish_parent_page,
language='es',
live=True)
mock_return = mock.Mock()
mock_return.url = 'mockcfpb.gov'
mock_return.autocomplete = 'A mock question'
mock_return.text = 'Mock answer text.'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
self.client.get(reverse(
'ask-search-es', kwargs={'language': 'es'}), {'q': 'payday'})
self.assertEqual(mock_filter.call_count, 1)
self.assertTrue(mock_filter.called_with(language='es', q='payday'))
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_search_page_en_selection(self, mock_filter):
page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.english_parent_page,
language='en',
live=True)
mock_return = mock.Mock()
mock_return.url = 'url'
mock_return.autocomplete = 'question text'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
self.client.get(reverse(
'ask-search-en'), {'q': 'tuition'})
self.assertEqual(mock_filter.call_count, 1)
self.assertEqual(page.language, 'en')
self.assertEqual(page.answers, [])
self.assertEqual(
page.get_template(HttpRequest()),
'ask-cfpb/answer-search-results.html')
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_search_page_es_selection(self, mock_filter):
page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock Spanish results page',
'respuestas',
self.spanish_parent_page,
language='es',
live=True)
mock_return = mock.Mock()
mock_return.url = 'url'
mock_return.autocomplete = 'question text'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
self.client.get(reverse(
'ask-search-es', kwargs={'language': 'es'}), {'q': 'hipotecas'})
self.assertEqual(mock_filter.call_count, 1)
self.assertEqual(page.language, 'es')
self.assertEqual(page.answers, [])
self.assertEqual(
page.get_template(HttpRequest()),
'ask-cfpb/answer-search-spanish-results.html')
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_json_response(self, mock_filter):
get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.english_parent_page,
language='en',
live=True)
mock_return = mock.Mock()
mock_return.url = "inscisive_url.com"
mock_return.autocomplete = "inscisive question"
mock_return.text = "inscisive text"
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
response = self.client.get(reverse(
'ask-search-en-json',
kwargs={'as_json': 'json'}), {'q': 'tuition'})
self.assertEqual(response.status_code, 200)
self.assertEqual(mock_filter.call_count, 1)
self.assertEqual(json.loads(response.content)['query'], 'tuition')
def test_autocomplete_en_blank_term(self):
result = self.client.get(reverse(
'ask-autocomplete-en'), {'term': ''})
output = json.loads(result.content)
self.assertEqual(output, [])
def test_autocomplete_es_blank_term(self):
result = self.client.get(reverse(
'ask-autocomplete-es',
kwargs={'language': 'es'}), {'term': ''})
output = json.loads(result.content)
self.assertEqual(output, [])
@mock.patch('ask_cfpb.views.SearchQuerySet.autocomplete')
def test_autocomplete_en(self, mock_autocomplete):
mock_search_result = mock.Mock()
mock_search_result.autocomplete = 'question'
mock_search_result.url = 'url'
mock_autocomplete.return_value = [mock_search_result]
result = self.client.get(reverse(
'ask-autocomplete-en'), {'term': 'question'})
self.assertEqual(mock_autocomplete.call_count, 1)
output = json.loads(result.content)
self.assertEqual(
sorted(output[0].keys()),
['question', 'url'])
@mock.patch('ask_cfpb.views.SearchQuerySet.autocomplete')
def test_autocomplete_es(self, mock_autocomplete):
mock_search_result = mock.Mock()
mock_search_result.autocomplete = 'question'
mock_search_result.url = 'url'
mock_autocomplete.return_value = [mock_search_result]
result = self.client.get(reverse(
'ask-autocomplete-es',
kwargs={'language': 'es'}), {'term': 'question'})
self.assertEqual(mock_autocomplete.call_count, 1)
output = json.loads(result.content)
self.assertEqual(
sorted(output[0].keys()),
['question', 'url'])
class RedirectAskSearchTestCase(TestCase):
def test_redirect_search_no_facets(self):
request = HttpRequest()
with self.assertRaises(Http404):
redirect_ask_search(request)
def test_redirect_search_blank_facets(self):
request = HttpRequest()
request.GET['selected_facets'] = ''
with self.assertRaises(Http404):
redirect_ask_search(request)
def test_redirect_search_no_query(self):
request = HttpRequest()
request.GET['q'] = ' '
with self.assertRaises(Http404):
redirect_ask_search(request)
def test_redirect_search_with_category(self):
category_querystring = (
'selected_facets=category_exact:my_category'
'&selected_facets=category_exact:my_category2'
'&selected_facets=audience_exact:Older+Americans'
'&selected_facets=audience_exact:my_audience2'
'&selected_facets=tag_exact:mytag1'
'&selected_facets=tag_exact:mytag2')
request = HttpRequest()
request.GET = QueryDict(category_querystring)
result = redirect_ask_search(request)
self.assertEqual(result.get('location'),
'/ask-cfpb/category-my_category/')
def test_redirect_search_with_audience(self):
audience_querystring = (
'selected_facets=audience_exact:Older+Americans'
'&selected_facets=audience_exact:my_audience2')
request = HttpRequest()
request.GET = QueryDict(audience_querystring)
result = redirect_ask_search(request)
self.assertEqual(
result.get('location'),
'/ask-cfpb/audience-older-americans/')
def test_spanish_redirect_search_with_tag(self):
target_tag = 'spanishtag1'
tag_querystring = (
'selected_facets=tag_exact:{}'
'&selected_facets=tag_exact:spanishtag2'.format(target_tag))
request = HttpRequest()
request.GET = QueryDict(tag_querystring)
result = redirect_ask_search(request, language='es')
self.assertEqual(
result.get('location'),
'/es/obtener-respuestas/buscar-por-etiqueta/{}/'.format(
target_tag))
def test_english_redirect_search_with_tag(self):
target_tag = 'englishtag1'
tag_querystring = (
'selected_facets=tag_exact:{}'
'&selected_facets=tag_exact:englishtag2'.format(target_tag))
request = HttpRequest()
request.GET = QueryDict(tag_querystring)
result = redirect_ask_search(request, language='en')
self.assertEqual(
result.get('location'),
'/ask-cfpb/search-by-tag/{}/'.format(
target_tag))
def test_redirect_search_with_unrecognized_facet_raises_404(self):
querystring = \
'sort=-updated_at&selected_facets=imtkfidycqszgfdb&page=60'
request = HttpRequest()
request.GET = QueryDict(querystring)
with self.assertRaises(Http404):
redirect_ask_search(request)
| 1.835938 | 2 |
setup.py | bcongdon/instapaper-to-sqlite | 1 | 1043 | import os
from setuptools import setup
VERSION = "0.2"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="instapaper-to-sqlite",
description="Save data from Instapaper to a SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/bcongdon/instapaper-to-sqlite",
project_urls={
"Source": "https://github.com/bcongdon/instapaper-to-sqlite",
"Issues": "https://github.com/bcongdon/instapaper-to-sqlite/issues",
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Database",
],
keywords="instapaper sqlite export dogsheep",
version=VERSION,
packages=["instapaper_to_sqlite"],
entry_points="""
[console_scripts]
instapaper-to-sqlite=instapaper_to_sqlite.cli:cli
""",
install_requires=[
"click",
"requests",
"sqlite-utils~=3.17",
"pyinstapaper @ git+https://github.com/bcongdon/pyinstapaper#egg=pyinstapaper",
],
extras_require={"test": ["pytest"]},
tests_require=["instapaper-to-sqlite[test]"],
)
| 1.820313 | 2 |
pybm/commands/compare.py | nicholasjng/pybm | 12 | 1044 | from typing import List
from pybm import PybmConfig
from pybm.command import CLICommand
from pybm.config import get_reporter_class
from pybm.exceptions import PybmError
from pybm.reporters import BaseReporter
from pybm.status_codes import ERROR, SUCCESS
from pybm.util.path import get_subdirs
class CompareCommand(CLICommand):
"""
Report benchmark results from specified sources.
"""
usage = "pybm compare <run> <anchor-ref> <compare-refs> [<options>]\n"
def __init__(self):
super(CompareCommand, self).__init__(name="compare")
self.config = PybmConfig.load()
def add_arguments(self):
self.parser.add_argument(
"run",
type=str,
metavar="<run>",
help="Benchmark run to report results for. "
"To report the preceding run, use the "
'"latest" keyword. To report results '
"of the n-th preceding run "
"(i.e., n runs ago), "
'use the "latest^{n}" syntax.',
)
self.parser.add_argument(
"refs",
nargs="+",
metavar="<refs>",
help="Benchmarked refs to compare. The first "
"given ref will be treated as the "
"anchor ref, relative to which all "
"differences are reported. An error is "
"raised if any of the given "
"refs are not present in the run.",
)
reporter: BaseReporter = get_reporter_class(config=self.config)
reporter_args = reporter.additional_arguments()
if reporter_args:
reporter_name = self.config.get_value("reporter.name")
reporter_group_desc = (
f"Additional options from configured reporter class {reporter_name!r}"
)
reporter_group = self.parser.add_argument_group(reporter_group_desc)
# add builder-specific options into the group
for arg in reporter_args:
reporter_group.add_argument(arg.pop("flags"), **arg)
def run(self, args: List[str]) -> int:
if not args:
self.parser.print_help()
return ERROR
self.add_arguments()
options = self.parser.parse_args(args)
reporter: BaseReporter = get_reporter_class(config=self.config)
# TODO: Parse run to fit schema
run = options.run
refs: List[str] = options.refs
result_dir = reporter.result_dir
# TODO: Make this dynamic to support other run identifiers
result = sorted(get_subdirs(result_dir))[-1]
result_path = result_dir / result
if result_path.exists():
reporter.compare(
*refs,
result=result,
target_filter=options.target_filter,
benchmark_filter=options.benchmark_filter,
context_filter=options.context_filter,
)
else:
raise PybmError(
f"No benchmark results found for the requested run {run!r}."
)
return SUCCESS
| 2.28125 | 2 |
dddm/recoil_rates/halo.py | JoranAngevaare/dddm | 0 | 1045 | """
For a given detector get a WIMPrate for a given detector (not taking into
account any detector effects
"""
import numericalunits as nu
import wimprates as wr
import dddm
export, __all__ = dddm.exporter()
@export
class SHM:
"""
class used to pass a halo model to the rate computation
must contain:
:param v_esc -- escape velocity (multiplied by units)
:param rho_dm -- density in mass/volume of dark matter at the Earth (multiplied by units)
The standard halo model also allows variation of v_0
:param v_0 -- v0 of the velocity distribution (multiplied by units)
:function velocity_dist -- function taking v,t giving normalised
velocity distribution in earth rest-frame.
"""
def __init__(self, v_0=None, v_esc=None, rho_dm=None):
self.v_0 = 230 * nu.km / nu.s if v_0 is None else v_0
self.v_esc = 544 * nu.km / nu.s if v_esc is None else v_esc
self.rho_dm = (0.3 * nu.GeV / nu.c0 ** 2 / nu.cm ** 3
if rho_dm is None else rho_dm)
def __str__(self):
# Standard Halo Model (shm)
return 'shm'
def velocity_dist(self, v, t):
"""
Get the velocity distribution in units of per velocity,
:param v: v is in units of velocity
:return: observed velocity distribution at earth
"""
return wr.observed_speed_dist(v, t, self.v_0, self.v_esc)
def parameter_dict(self):
"""Return a dict of readable parameters of the current settings"""
return dict(
v_0=self.v_0 / (nu.km / nu.s),
v_esc=self.v_esc / (nu.km / nu.s),
rho_dm=self.rho_dm / (nu.GeV / nu.c0 ** 2 / nu.cm ** 3),
)
| 3.046875 | 3 |
picket/rvae/train_eval_models.py | rekords-uw/Picket | 10 | 1046 | <filename>picket/rvae/train_eval_models.py<gh_stars>1-10
#!/usr/bin/env python3
import torch
from torch import optim
import torch.nn.functional as F
import argparse
from sklearn.metrics import mean_squared_error
import numpy as np
import json
from . import utils
from .model_utils import get_pi_exact_vec, rnn_vae_forward_one_stage, rnn_vae_forward_two_stage
def training_phase(model, optimizer, train_loader, args, epoch, mute=True):
model.train()
train_loss_vae, train_nll_vae, train_z_kld_vae, train_w_kld_vae = 4*[0]
train_loss_seq, train_nll_seq, train_z_kld_seq, train_w_kld_seq = 4*[0]
train_total_loss_seq_vae, train_loss_seq_vae, train_nll_seq_vae, train_z_kld_seq_vae, train_w_kld_seq_vae = 5*[0]
for batch_idx, unpack in enumerate(train_loader):
data_input = unpack[0]
if args.cuda_on:
data_input = data_input.cuda()
optimizer.zero_grad()
## first foward-pass
p_params, q_params, q_samples = model(data_input, n_epoch=epoch-1)
if not args.AVI:
get_pi_exact_vec(model, data_input, p_params, q_params, args, logit_ret=True) # get pi, saves to q_params (with no_grad)
vae_loss, vae_nll, vae_z_kld, vae_w_kld = model.loss_function(data_input, p_params, q_params, q_samples)
train_loss_vae += vae_loss.item()
train_nll_vae += vae_nll.item()
train_z_kld_vae += vae_z_kld.item()
train_w_kld_vae += vae_w_kld.item()
if args.inference_type == 'vae':
vae_loss.backward()
elif args.inference_type == 'seqvae':
if args.seqvae_bprop: # NOTE: rolls out iterations through time and bprops
params_in = (p_params, q_params, q_samples)
seq_loss_pack, _, _ = rnn_vae_forward_one_stage(params_in, data_input, model, vae_loss, args,
number_steps=args.seqvae_steps, loss_per_iter=True, epoch_id=epoch)
seq_total_loss, seq_final_loss, seq_final_nll, seq_final_z_kld, seq_final_w_kld = seq_loss_pack
train_total_loss_seq_vae += seq_total_loss.item()
train_loss_seq_vae += seq_final_loss.item()
train_nll_seq_vae += seq_final_nll.item()
train_z_kld_seq_vae += seq_final_z_kld.item()
train_w_kld_seq_vae += seq_final_w_kld.item()
else:
vae_loss.backward()
train_total_loss_seq_vae += vae_loss.item()
train_loss_seq_vae += vae_loss.item()
train_nll_seq_vae += vae_nll.item()
train_z_kld_seq_vae += vae_z_kld.item()
train_w_kld_seq_vae += vae_w_kld.item()
seq_total_loss = torch.tensor(0.0)
seq_final_loss = torch.tensor(0.0)
seq_final_nll = torch.tensor(0.0)
seq_final_z_kld = torch.tensor(0.0)
seq_final_w_kld = torch.tensor(0.0)
optimizer.step()
if batch_idx % args.log_interval == 0 and not mute:
print('\n\nTrain Epoch: {} [{}/{} ({:.0f}%)]\tVAE Loss: {:.3f}\tVAE NLL: {:.3f}\tVAE KLD_Z: {:.3f}\tVAE KLD_W: {:.3f}'.format(
epoch, batch_idx * len(data_input), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
vae_loss.item()/len(data_input), vae_nll.item()/len(data_input),
vae_z_kld.item()/len(data_input), vae_w_kld.item()/len(data_input)))
if args.inference_type == 'seqvae':
print('\n')
print('\n\nAdditional Info:\tTotal Seq Loss: {:.3f}\tFinal Seq Loss: {:.3f}\tFinal Sep NLL: {:.3f}\tFinal Sep KLD_Z: {:.3f}\tFinal Sep KLD_W: {:.3f}\n'.format(
seq_total_loss.item()/len(data_input), seq_final_loss.item()/len(data_input),
seq_final_nll.item()/len(data_input), seq_final_z_kld.item()/len(data_input),
seq_final_w_kld.item()/len(data_input)))
dataset_len = float(len(train_loader.dataset))
ret = {'train_loss_vae': train_loss_vae/dataset_len, 'train_nll_vae': train_nll_vae/dataset_len,
'train_z_kld_vae': train_z_kld_vae/dataset_len, 'train_w_kld_vae': train_w_kld_vae/dataset_len}
if args.inference_type == "seqvae":
ret_seq = {'train_loss_seq': train_loss_seq_vae/dataset_len, 'train_nll_seq': train_nll_seq_vae/dataset_len,
'train_z_kld_seq': train_z_kld_seq_vae/dataset_len,'train_w_kld_seq': train_w_kld_seq_vae/dataset_len,
'train_total_loss_seq':train_total_loss_seq_vae/dataset_len}
ret = {**ret, **ret_seq}
return ret
def evaluation_phase(model, data_eval, dataset_obj, args, epoch,
clean_comp_show=False, data_eval_clean=False, logit_pi_prev=torch.tensor([]), w_conv=False, mask_err=None):
# if args.cuda_on:
# model.cpu()
if type(mask_err) != type(None):
mask_err = mask_err.bool()
model.eval()
p_params, q_params, q_samples = model(data_eval)
if not args.AVI:
get_pi_exact_vec(model, data_eval, p_params, q_params, args, logit_ret=True) # get pi
vae_loss, vae_nll, vae_z_kld, vae_w_kld = model.loss_function(data_eval, p_params,
q_params, q_samples)
eval_data_len = data_eval.shape[0]
losses = {'eval_loss_vae': vae_loss.item()/eval_data_len, 'eval_nll_vae':vae_nll.item()/eval_data_len,
'eval_z_kld_vae': vae_z_kld.item()/eval_data_len, 'eval_w_kld_vae':vae_w_kld.item()/eval_data_len}
# SEQ-VAE
if args.inference_type == 'seqvae':
#with torch.no_grad():
params_in = (p_params, q_params, q_samples)
if args.seqvae_two_stage:
seq_loss_pack, _, seq_param_pack = rnn_vae_forward_two_stage(params_in, data_eval, model, vae_loss, args,
number_steps=args.seqvae_steps, number_steps_second_stage=args.steps_2stage,
loss_per_iter=True, mask_err=mask_err, epoch_id=epoch)
else:
seq_loss_pack, _, seq_param_pack = rnn_vae_forward_one_stage(params_in, data_eval, model, vae_loss, args,
number_steps=args.seqvae_steps, loss_per_iter=True, mask_err=mask_err, epoch_id=epoch)
seq_total_loss, seq_final_loss, seq_final_nll, seq_final_z_kld, seq_final_w_kld = seq_loss_pack
p_params_final, q_params_final, q_samples_final = seq_param_pack
losses_seq_vae = {'eval_loss_seq': seq_final_loss.item()/eval_data_len, 'eval_nll_seq': seq_final_nll.item()/eval_data_len,
'eval_z_kld_seq': seq_final_z_kld.item()/eval_data_len, 'eval_w_kld_seq': seq_final_w_kld.item()/eval_data_len,
'eval_total_loss_seq': seq_total_loss.item()/eval_data_len}
losses = {**losses, **losses_seq_vae}
if args.inference_type == 'seqvae':
p_params_metric, q_params_metric, q_samples_metric = p_params_final, q_params_final, q_samples_final
else:
p_params_metric, q_params_metric, q_samples_metric = p_params, q_params, q_samples
#Getting scores and clean component if neededin_aux_samples
with torch.no_grad():
if args.outlier_model == "VAE": # VAE models only (no w's or pi's)
# generative model only p(x|z, ...)
nll_score_mat = utils.generate_score_outlier_matrix(p_params_metric, data_eval, dataset_obj)
pi_score_mat = -10
converg_norm_w = -10
else:
if clean_comp_show:
loss_clean, nll_clean, z_kld_clean, w_kld_clean = model.loss_function(data_eval, p_params_metric,
q_params_metric, q_samples_metric,
clean_comp_only=True,
data_eval_clean=data_eval_clean)
losses_add = {'eval_loss_final_clean': loss_clean.item()/eval_data_len,
'eval_nll_final_clean': nll_clean.item()/eval_data_len,
'eval_z_kld_final_clean': z_kld_clean.item()/eval_data_len,
'eval_w_kld_final_clean': w_kld_clean.item()/eval_data_len
}
losses = {**losses, **losses_add}
# q(w|x, ...) param (pi), used in outlier score
pi_score_mat = torch.sigmoid(q_params_metric['w']['logit_pi']).clamp(1e-6, 1-1e-6)
# -log p(x|z, ...) used as outlier score
nll_score_mat = utils.generate_score_outlier_matrix(p_params_metric, data_eval, dataset_obj)
# check convergence of weights (pi's)
if w_conv:
if logit_pi_prev.nelement() == 0:
logit_pi_prev = torch.zeros_like(q_params_metric['w']['logit_pi'])
converg_norm_w = (q_params_metric['w']['logit_pi'] - logit_pi_prev).norm().item()
logit_pi_prev = q_params_metric['w']['logit_pi'].clone().detach()
else:
converg_norm_w = -10
# insert here measurement of calibration of pi's using MSE or cross-entropy
if isinstance(mask_err, torch.Tensor):
pi_mtx = pi_score_mat
pi_mtx_true = (~mask_err).float()
err_pi = ((pi_mtx - pi_mtx_true)**2).mean()
ce_pi = F.binary_cross_entropy(pi_mtx, pi_mtx_true)
print('MSE on pi pred: {}'.format(err_pi))
print('CE on pi pred: {}'.format(ce_pi))
print('dirt pi median: {} std: {}'.format(torch.sigmoid(q_params_metric['w']['logit_pi'][mask_err]).median(), torch.sigmoid(q_params_metric['w']['logit_pi'][mask_err]).std()))
print('clean pi median: {} std: {}'.format(torch.sigmoid(q_params_metric['w']['logit_pi'][~mask_err]).median(), torch.sigmoid(q_params_metric['w']['logit_pi'][~mask_err]).std()))
metrics = {'nll_score': nll_score_mat, 'pi_score': pi_score_mat, 'converg_norm_w': converg_norm_w}
return losses, metrics
def repair_phase(model, data_dirty, data_clean, dataset_obj, args, mask, mode, epoch):
model.eval()
# model params with input: dirty data
if args.inference_type == 'seqvae':
p_params_xd, q_params_xd, q_samples_xd = model(data_dirty)
if not args.AVI:
get_pi_exact_vec(model, data_dirty, p_params_xd, q_params_xd, args, logit_ret=True)
params_xd_in = (p_params_xd, q_params_xd, q_samples_xd)
if args.seqvae_two_stage:
_, _, (p_params_xd, q_params_xd, q_samples_xd) = rnn_vae_forward_two_stage(params_xd_in, data_dirty, model,
torch.tensor(0.0, device=data_dirty.device),
args, number_steps=args.seqvae_steps,
number_steps_second_stage=args.steps_2stage,
loss_per_iter=True, epoch_id=epoch)
else:
_, _, (p_params_xd, q_params_xd, q_samples_xd) = rnn_vae_forward_one_stage(params_xd_in, data_dirty, model,
torch.tensor(0.0, device=data_dirty.device),
args, number_steps=args.seqvae_steps, loss_per_iter=True, epoch_id=epoch)
else: # standard 'vae' type inference
p_params_xd, q_params_xd, q_samples_xd = model(data_dirty)
if not args.AVI:
get_pi_exact_vec(model, data_dirty, p_params_xd, q_params_xd, args, logit_ret=True) # get pi
# model params with input: underlying clean data
if args.inference_type == 'seqvae':
p_params_xc, q_params_xc, q_samples_xc = model(data_clean)
if not args.AVI:
get_pi_exact_vec(model, data_dirty, p_params_xc, q_params_xc, args, logit_ret=True)
params_xc_in = (p_params_xc, q_params_xc, q_samples_xc)
if args.seqvae_two_stage:
_, _, (p_params_xc, q_params_xc, q_samples_xc) = rnn_vae_forward_two_stage(params_xc_in, data_clean, model,
torch.tensor(0.0, device=data_clean.device),
args, number_steps=args.seqvae_steps,
number_steps_second_stage=args.steps_2stage,
loss_per_iter=True, epoch_id=epoch)
else:
_, _, (p_params_xc, q_params_xc, q_samples_xc) = rnn_vae_forward_one_stage(params_xc_in, data_clean, model,
torch.tensor(0.0, device=data_clean.device),
args, number_steps=args.seqvae_steps, loss_per_iter=True, epoch_id=epoch)
else: # 'vae' type inference
p_params_xc, q_params_xc, q_samples_xc = model(data_clean)
# no need to get pi, not used after
# error (MSE) lower bound, on dirty cell positions only
error_lb_dc, error_lb_dc_per_feat = utils.error_computation(model, data_clean, p_params_xc['x'], mask) # x_truth - f_vae(x_clean)
# error repair, on dirty cell positions only
error_repair_dc, error_repair_dc_per_feat = utils.error_computation(model, data_clean, p_params_xd['x'], mask) # x_truth - f_vae(x_dirty)
print("\n\n {} REPAIR ERROR (DIRTY POS):{}".format(mode, error_repair_dc))
# error upper bound, on dirty cell positions only
error_up_dc, error_up_dc_per_feat = utils.error_computation(model, data_clean, data_dirty, mask, x_input_size=True) # x_truth - x_dirty
# error on clean cell positions only (to test impact on dirty cells on clean cells under model)
error_repair_cc, error_repair_cc_per_feat = utils.error_computation(model, data_clean, p_params_xd['x'], 1-mask)
print("\n\n {} REPAIR ERROR (CLEAN POS):{}".format(mode, error_repair_cc))
# Get NLL (predict. posterior approx) under dirty data
dict_slice = lambda dict_op, row_pos: {key:(value[row_pos,:] \
if value.shape[0]==data_dirty.shape[0] else value) for key, value in dict_op.items()}
dirty_row_pos = mask.any(dim=1).bool()
n_dirty_rows = dirty_row_pos.sum().item()
p_params_xd_sliced = dict_slice(p_params_xd, dirty_row_pos)
q_params_xd_sliced = dict()
if args.outlier_model == 'RVAE':
q_params_xd_sliced['w'] = dict_slice(q_params_xd['w'], dirty_row_pos)
q_params_xd_sliced['z'] = dict_slice(q_params_xd['z'], dirty_row_pos)
q_samples_xd_sliced = dict_slice(q_samples_xd, dirty_row_pos)
vae_loss_dc, vae_nll_dc, vae_z_kld_dc, vae_w_kld_dc = model.loss_function(data_clean[dirty_row_pos,:], p_params_xd_sliced,
q_params_xd_sliced, q_samples_xd_sliced,
clean_comp_only=True,
data_eval_clean=True)
clean_row_pos = ~dirty_row_pos
n_clean_rows = clean_row_pos.sum().item()
p_params_xd_sliced = dict_slice(p_params_xd, clean_row_pos)
q_params_xd_sliced = dict()
if args.outlier_model == 'RVAE':
q_params_xd_sliced['w'] = dict_slice(q_params_xd['w'], clean_row_pos)
q_params_xd_sliced['z'] = dict_slice(q_params_xd['z'], clean_row_pos)
q_samples_xd_sliced = dict_slice(q_samples_xd, clean_row_pos)
vae_loss_cc, vae_nll_cc, vae_z_kld_cc, vae_w_kld_cc = model.loss_function(data_clean[clean_row_pos,:], p_params_xd_sliced,
q_params_xd_sliced, q_samples_xd_sliced,
clean_comp_only=True,
data_eval_clean=True)
eval_data_len = data_dirty.shape[0]
losses = {'eval_loss_final_clean_dc': vae_loss_dc.item()/n_dirty_rows, 'eval_nll_final_clean_dc':vae_nll_dc.item()/n_dirty_rows,
'eval_z_kld_final_clean_dc': vae_z_kld_dc.item()/n_dirty_rows, 'eval_w_kld_final_clean_dc':vae_w_kld_dc.item()/n_dirty_rows,
'eval_loss_final_clean_cc': vae_loss_cc.item()/n_clean_rows, 'eval_nll_final_clean_cc':vae_nll_cc.item()/n_clean_rows,
'eval_z_kld_final_clean_cc': vae_z_kld_cc.item()/n_clean_rows, 'eval_w_kld_final_clean_cc':vae_w_kld_cc.item()/n_clean_rows,
'eval_loss_final_clean_all': (vae_loss_cc+vae_loss_dc).item()/eval_data_len, 'eval_nll_final_clean_all':(vae_nll_cc+vae_nll_dc).item()/eval_data_len,
'eval_z_kld_final_clean_all': (vae_z_kld_cc+vae_z_kld_dc).item()/eval_data_len, 'eval_w_kld_final_clean_all':(vae_w_kld_cc+vae_w_kld_dc).item()/eval_data_len,
'mse_lower_bd_dirtycells': error_lb_dc.item(), 'mse_upper_bd_dirtycells': error_up_dc.item() , 'mse_repair_dirtycells': error_repair_dc.item(),
'mse_repair_cleancells': error_repair_cc.item(),
'errors_per_feature': [error_lb_dc_per_feat, error_repair_dc_per_feat, error_up_dc_per_feat, error_repair_cc_per_feat]}
return losses
| 1.992188 | 2 |
setup.py | nopipifish/bert4keras | 1 | 1047 | <gh_stars>1-10
#! -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bert4keras',
version='0.8.4',
description='an elegant bert4keras',
long_description='bert4keras: https://github.com/bojone/bert4keras',
license='Apache License 2.0',
url='https://github.com/bojone/bert4keras',
author='bojone',
author_email='<EMAIL>',
install_requires=['keras<=2.3.1'],
packages=find_packages()
)
| 1.171875 | 1 |
sztuczna_inteligencja/3-lab/backtrackingSolve.py | Magikis/Uniwersity | 12 | 1048 | <reponame>Magikis/Uniwersity
# import cProfile
# import pstats
# import io
from picture import *
# pr = cProfile.Profile()
# pr.enable()
def out(p):
for i in range(2):
print([len(x) for x in p.perms[i]])
if __name__ == '__main__':
p = Picture()
p.genPerms()
p.detuctAll()
p.backtrackLoop()
p.saveOtput()
# pr.disable()
# s = io.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue())
| 2.328125 | 2 |
benchmark/generate_examples_strprose.py | HALOCORE/SynGuar | 1 | 1049 | # imports
import os
import json
import subprocess
abs_join = lambda p1, p2 : os.path.abspath(os.path.join(p1, p2))
# constants
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SEED_RELPATH = "./strprose/example_files/_seeds.json"
SEED_FULLPATH = abs_join(SCRIPT_DIR, SEED_RELPATH)
SEED_INFO = None
with open(SEED_FULLPATH, 'r') as f:
SEED_INFO = json.load(f)
TOOL_RELPATH = "../StrPROSE-synthesizer/StrPROSE/bin/Debug/netcoreapp3.1/StrPROSE.dll"
TOOL_FULLPATH = abs_join(SCRIPT_DIR, TOOL_RELPATH)
TARGET_RELDIR = "./strprose/targets"
TARGET_FULLDIR = abs_join(SCRIPT_DIR, TARGET_RELDIR)
MAX_SAMPLE_SIZE = 2000
EXAMPLE_RELDIR = "./strprose/example_files"
EXAMPLE_FULLDIR = abs_join(SCRIPT_DIR, EXAMPLE_RELDIR)
TIME_OUT = 120
# methods
def generate_examples(bench_id, seed):
command_line_args = [
"dotnet",
TOOL_FULLPATH,
"--samplegen",
TARGET_FULLDIR,
str(bench_id),
str(seed),
str(MAX_SAMPLE_SIZE),
EXAMPLE_FULLDIR
]
try:
print(f"# -------- Start Process ({bench_id}, {seed}) --------")
done_result = subprocess.run(command_line_args, timeout=TIME_OUT)
print(f"# ^^^^^^^^ Done: {done_result.returncode} ({bench_id}, {seed}) ^^^^^^^^")
except subprocess.TimeoutExpired:
print('# Error: subprocess TIMEOUT !!!')
if __name__ == "__main__":
for bench_id in SEED_INFO["bench_seeds"]:
for seed in SEED_INFO["bench_seeds"][bench_id]:
generate_examples(bench_id, seed) | 2.0625 | 2 |
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py | rimmartin/cctbx_project | 0 | 1050 | from __future__ import division
from mmtbx.tls import tools
import math
import time
pdb_str_1 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 3.000 0.000 0.000 1.00 0.00 C
"""
pdb_str_2 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 3.000 0.000 1.00 0.00 C
"""
pdb_str_3 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 0.000 3.000 1.00 0.00 C
"""
pdb_str_4 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 1.000 2.000 3.000 1.00 0.00 C
"""
def exercise_03():
sqrt = math.sqrt
vs = []
vs.append( [(sqrt(2)/2, sqrt(2)/2, 0), (-sqrt(2)/2, sqrt(2)/2, 0), (0,0,1)] )
vs.append( [(1,0,0), (0, sqrt(2)/2, sqrt(2)/2), (0, -sqrt(2)/2, sqrt(2)/2)] )
vs.append( [(sqrt(3)/2, 1/2, 0), (-1/2, sqrt(3)/2, 0), (0,0,1)] )
vs.append( [(1,0,0), (0, sqrt(3)/2, 1/2), (0, -1/2, sqrt(3)/2)] )
for pdb_str in [pdb_str_1, pdb_str_2, pdb_str_3, pdb_str_4]:
for vs_ in vs:
vx,vy,vz = vs_
print vx,vy,vz
tools.u_tls_vs_u_ens(pdb_str=pdb_str,
tx=0.05,ty=0.07,tz=0.09,
vx=vx, vy=vy, vz=vz,
n_models=1000)
if (__name__ == "__main__"):
t0 = time.time()
exercise_03()
print "Time: %6.4f"%(time.time()-t0)
print "OK"
| 2.203125 | 2 |
elliesite/context_processors.py | davidkartuzinski/ellieplatformsite | 1 | 1051 | import sys
from django.urls import resolve
def global_vars(request):
return {
'GLOBAL_TWITTER_ACCOUNT': '@open_apprentice',
'ORGANIZATION_NAME': 'Open Apprentice Foundation',
'ORGANIZATION_WEBSITE': 'https://openapprentice.org',
'ORGANIZATION_LOGO': '/static/img/ellie/open-apprentice-logo-full.png', # relative URL with pre /,
'SITE_LOGO_URL': '/static/img/ellie/ellie-platform-logo.png', # relative URL with pre /
'APPNAME': sys.modules[resolve(request.path_info).func.__module__].__package__,
}
| 1.914063 | 2 |
tools/train_net_step.py | va1shn9v/Detectron.pytorch | 0 | 1052 | """ Training script for steps_with_decay policy"""
import argparse
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
import _init_paths # pylint: disable=unused-import
import nn as mynn
import utils.net as net_utils
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets.roidb import combined_roidb_for_training
from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch
from modeling.model_builder import Generalized_RCNN
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import setup_logging
from utils.timer import Timer
from utils.training_stats import TrainingStats
# Set up logging and load config options
logger = setup_logging(__name__)
logging.getLogger('roi_data.loader').setLevel(logging.INFO)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--num_classes', dest='num_classes',
help='Number of classes in your custom dataset',
default=None, type=int)
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=20, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--iter_size',
help='Update once every iter_size steps, as in Caffe.',
default=1, type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
# Epoch
parser.add_argument(
'--start_step',
help='Starting step count for training epoch. 0-indexed.',
default=0, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def save_ckpt(output_dir, args, step, train_size, model, optimizer):
"""Save checkpoint"""
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
torch.save({
'step': step,
'train_size': train_size,
'batch_size': args.batch_size,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "custom_dataset" and args.num_classes is None:
raise ValueError("Need number of classes in your custom dataset to run!")
if args.dataset == "coco2017":
cfg.TRAIN.DATASETS = ('coco_2014_train',)
cfg.MODEL.NUM_CLASSES = 4
elif args.dataset == "keypoints_coco2017":
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif args.dataset == "voc2007":
cfg.TRAIN.DATASETS = ('voc_2007_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "voc2012":
cfg.TRAIN.DATASETS = ('voc_2012_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "custom_dataset":
cfg.TRAIN.DATASETS = ('custom_data_train',)
cfg.MODEL.NUM_CLASSES = args.num_classes
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
### Adjust learning based on batch size change linearly
# For iter_size > 1, gradients are `accumulated`, so lr is scaled based
# on batch_size instead of effective_batch_size
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
### Adjust solver steps
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS,
old_max_iter, cfg.SOLVER.MAX_ITER))
# Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function
# of `collect_and_distribute_fpn_rpn_proposals.py`
#
# post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n'
' cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
# Effective training sample size for one epoch
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(
sampler=MinibatchSampler(ratio_list, ratio_index),
batch_size=args.batch_size,
drop_last=True
)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batchSampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
gn_param_nameset = set()
for name, module in maskRCNN.named_modules():
if isinstance(module, nn.GroupNorm):
gn_param_nameset.add(name+'.weight')
gn_param_nameset.add(name+'.bias')
gn_params = []
gn_param_names = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
nograd_param_names = []
for key, value in maskRCNN.named_parameters():
if value.requires_grad:
if 'bias' in key:
bias_params.append(value)
bias_param_names.append(key)
elif key in gn_param_nameset:
gn_params.append(value)
gn_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
else:
nograd_param_names.append(key)
assert (gn_param_nameset - set(nograd_param_names) - set(bias_param_names)) == set(gn_param_names)
# Learning rate of 0 is a dummy value to be set properly at the start of training
params = [
{'params': nonbias_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': bias_params,
'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
{'params': gn_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}
]
# names of paramerters for each paramter
param_names = [nonbias_param_names, bias_param_names, gn_param_names]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint: # For backward compatibility
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d'
% (train_size, checkpoint['train_size']))
# reorder the params in optimizer checkpoint's params_groups if needed
# misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint)
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
optimizer.load_state_dict(checkpoint['optimizer'])
# misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
### Training Setups ###
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger = SummaryWriter(output_dir)
### Training Loop ###
maskRCNN.train()
CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
# Set index for decay steps
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
# Warm up
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
# Learning rate decay
if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
try:
net_outputs = maskRCNN(**input_data)
except:
continue
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr)
if (step+1) % CHECKPOINT_PERIOD == 0:
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
# ---- Training ends ----
# Save last checkpoint
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
if __name__ == '__main__':
main()
| 2.046875 | 2 |
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py | punithmadaiahkumar/try-django | 4 | 1053 | # Copyright (c) 2019-2020 hippo91 <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""Astroid hooks for numpy.core.multiarray module."""
import functools
from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member
from astroid.brain.helpers import register_module_extender
from astroid.builder import parse
from astroid.inference_tip import inference_tip
from astroid.manager import AstroidManager
from astroid.nodes.node_classes import Attribute, Name
def numpy_core_multiarray_transform():
return parse(
"""
# different functions defined in multiarray.py
def inner(a, b):
return numpy.ndarray([0, 0])
def vdot(a, b):
return numpy.ndarray([0, 0])
"""
)
register_module_extender(
AstroidManager(), "numpy.core.multiarray", numpy_core_multiarray_transform
)
METHODS_TO_BE_INFERRED = {
"array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0):
return numpy.ndarray([0, 0])""",
"dot": """def dot(a, b, out=None):
return numpy.ndarray([0, 0])""",
"empty_like": """def empty_like(a, dtype=None, order='K', subok=True):
return numpy.ndarray((0, 0))""",
"concatenate": """def concatenate(arrays, axis=None, out=None):
return numpy.ndarray((0, 0))""",
"where": """def where(condition, x=None, y=None):
return numpy.ndarray([0, 0])""",
"empty": """def empty(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
"bincount": """def bincount(x, weights=None, minlength=0):
return numpy.ndarray([0, 0])""",
"busday_count": """def busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"busday_offset": """def busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"can_cast": """def can_cast(from_, to, casting='safe'):
return True""",
"copyto": """def copyto(dst, src, casting='same_kind', where=True):
return None""",
"datetime_as_string": """def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'):
return numpy.ndarray([0, 0])""",
"is_busday": """def is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"lexsort": """def lexsort(keys, axis=-1):
return numpy.ndarray([0, 0])""",
"may_share_memory": """def may_share_memory(a, b, max_work=None):
return True""",
# Not yet available because dtype is not yet present in those brains
# "min_scalar_type": """def min_scalar_type(a):
# return numpy.dtype('int16')""",
"packbits": """def packbits(a, axis=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
# Not yet available because dtype is not yet present in those brains
# "result_type": """def result_type(*arrays_and_dtypes):
# return numpy.dtype('int16')""",
"shares_memory": """def shares_memory(a, b, max_work=None):
return True""",
"unpackbits": """def unpackbits(a, axis=None, count=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
"unravel_index": """def unravel_index(indices, shape, order='C'):
return (numpy.ndarray([0, 0]),)""",
"zeros": """def zeros(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
}
for method_name, function_src in METHODS_TO_BE_INFERRED.items():
inference_function = functools.partial(infer_numpy_member, function_src)
AstroidManager().register_transform(
Attribute,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
AstroidManager().register_transform(
Name,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
| 2.046875 | 2 |
tests/asp/weakConstraints/testcase13.bug.weakconstraints.gringo.test.py | bernardocuteri/wasp | 19 | 1054 | <filename>tests/asp/weakConstraints/testcase13.bug.weakconstraints.gringo.test.py
input = """
2 18 3 0 3 19 20 21
1 1 1 0 18
2 23 3 0 3 19 24 25
1 1 2 1 21 23
3 5 21 19 20 24 25 0 0
6 0 5 5 21 19 20 24 25 1 1 1 1 1
0
21 a
19 b
20 c
24 d
25 e
28 f
0
B+
0
B-
1
0
1
"""
output = """
COST 1@1
"""
| 1.484375 | 1 |
whoPay.py | susurigirl/susuri | 0 | 1055 | <reponame>susurigirl/susuri
import random
names_string = input("내기를 할 친구들의 이름을 적습니다. 콤마(,)로 분리해서 적습니다.\n")
names = names_string.split(",")
print(names)
n = random.randint(0, len(names))
print(f"오늘 커피는 {names[n]}가 쏩니다!")
| 3.328125 | 3 |
jp.atcoder/abc056/arc070_b/26725094.py | kagemeka/atcoder-submissions | 1 | 1056 | import sys
import typing
import numpy as np
def solve(a: np.ndarray, k: int) -> typing.NoReturn:
n = len(a)
def compute_dp(a: np.ndarray) -> np.ndarray:
dp = np.zeros((n + 1, k), np.bool8)
dp[0, 0] = True
for i in range(n):
dp[i + 1] = dp[i].copy()
dp[i + 1, a[i] :] |= dp[i, : -a[i]]
return dp
dp_l = compute_dp(a)
dp_r = compute_dp(a[::-1])[::-1]
dp_r = dp_r.astype(np.int64).cumsum(axis=1)
cnt = 0
for p in range(n):
l, r = dp_l[p], dp_r[n - p]
x = a[p]
for i in np.flatnonzero(l).tolist():
if (
not r[k - i - 1]
- (0 if k - x - i - 1 < 0 else r[k - x - i - 1])
>= 1
):
continue
cnt += 1
break
print(n - cnt)
def main() -> typing.NoReturn:
n, k = map(int, input().split())
a = np.array(sys.stdin.readline().split(), dtype=np.int64)
solve(a, k)
main()
| 2.578125 | 3 |
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py | FlorianPoot/MicroPython_ESP32_psRAM_LoBo | 838 | 1057 | import network
def conncb(task):
print("[{}] Connected".format(task))
def disconncb(task):
print("[{}] Disconnected".format(task))
def subscb(task):
print("[{}] Subscribed".format(task))
def pubcb(pub):
print("[{}] Published: {}".format(pub[0], pub[1]))
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="<PASSWORD>", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# secure connection requires more memory and may not work
# mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb)
mqtt.start()
#mqtt.config(lwt_topic='status', lwt_msg='Disconected')
'''
# Wait until status is: (1, 'Connected')
mqtt.subscribe('test')
mqtt.publish('test', 'Hi from Micropython')
mqtt.stop()
'''
# ==================
# ThingSpeak example
# ==================
import network
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb)
# or secure connection
#thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb)
thingspeakChannelId = "123456" # enter Thingspeak Channel ID
thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key
thingspeakFieldNo = 1
thingSpeakChanelFormat = "json"
pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey)
pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey)
subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
thing.start()
tmo = 0
while thing.status()[0] != 2:
utime.sleep_ms(100)
tmo += 1
if tmo > 80:
print("Not connected")
break
# subscribe to channel
thing.subscribe(subchan)
# subscribe to field
thing.subscribe(subfield)
# publish to channel
# Payload can include any of those fields separated b< ';':
# "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value"
thing.publish(pubchan, "field1=25.2;status=On line")
# Publish to field
thing.publish(pubfield, "24.5")
| 2.625 | 3 |
mlb/game/migrations/0009_game_game_type.py | atadams/mlb | 0 | 1058 | <filename>mlb/game/migrations/0009_game_game_type.py
# Generated by Django 2.2.8 on 2019-12-14 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0008_auto_20191214_1019'),
]
operations = [
migrations.AddField(
model_name='game',
name='game_type',
field=models.CharField(choices=[('E', 'Exhibition'), ('S', 'Spring Training'), ('R', 'Regular Season'), ('F', 'Wild Card'), ('D', 'Divisional Series'), ('L', 'League Championship Series'), ('W', 'World Series')], default='R', max_length=30),
),
]
| 1.929688 | 2 |
backend/main/chapters/c06_lists.py | Vman45/futurecoder | 0 | 1059 | # flake8: NOQA E501
import ast
import random
from textwrap import dedent
from typing import List
from main.exercises import generate_list, generate_string
from main.text import ExerciseStep, MessageStep, Page, Step, VerbatimStep, search_ast
from main.utils import returns_stdout
class IntroducingLists(Page):
class first_list(VerbatimStep):
"""
It's time to learn about a powerful new type of value called lists. Here's an example:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
for word in words:
print(word)
class can_contain_anything(VerbatimStep):
"""
A list is a *sequence* (an ordered collection/container) of any number of values.
The values are often referred to as *elements*.
They can be anything: numbers, strings, booleans, even lists! They can also be a mixture of types.
To create a list directly, like above:
1. Write some square brackets: `[]`
2. If you don't want an empty list, write some expressions inside to be the elements.
3. Put commas (`,`) between elements to separate them.
Here's another example of making a list:
__program_indented__
"""
def program(self):
x = 1
things = ['Hello', x, x + 3]
print(things)
class numbers_sum(VerbatimStep):
"""
As you saw above, lists are *iterable*, meaning you can iterate over them with a `for loop`.
Here's a program that adds up all the numbers in a list:
__program_indented__
"""
def program(self):
numbers = [3, 1, 4, 1, 5, 9]
total = 0
for number in numbers:
total += number
print(total)
class strings_sum(ExerciseStep):
"""
Now modify the program so that it can add up a list of strings instead of numbers.
For example, given:
words = ['This', 'is', 'a', 'list']
it should print:
Thisisalist
"""
hints = """
This is very similar to the exercises you've done building up strings character by character.
The solution is very similar to the program that adds numbers.
In fact, what happens if you try running that program with a list of strings?
The problem is that 0. You can't add 0 to a string because numbers and strings are incompatible.
Is there a similar concept among strings to 0? A blank initial value?
"""
@returns_stdout
def solution(self, words: List[str]):
total = ''
for word in words:
total += word
print(total)
tests = [
(['This', 'is', 'a', 'list'], 'Thisisalist'),
(['The', 'quick', 'brown', 'fox', 'jumps'], 'Thequickbrownfoxjumps'),
]
class double_numbers(ExerciseStep):
"""
Optional bonus challenge: extend the program to insert a separator string *between* each word.
For example, given
words = ['This', 'is', 'a', 'list']
separator = ' - '
it would output:
This - is - a - list
Lists and strings have a lot in common.
For example, you can add two lists to combine them together into a new list.
You can also create an empty list that has no elements.
Check for yourself:
numbers = [1, 2] + [3, 4]
print(numbers)
new_numbers = []
new_numbers += numbers
new_numbers += [5]
print(new_numbers)
With that knowledge, write a program which takes a list of numbers
and prints a list where each number has been doubled. For example, given:
numbers = [3, 1, 4, 1, 5, 9, 2, 6, 5]
it would print:
[6, 2, 8, 2, 10, 18, 4, 12, 10]
"""
hints = """
Remember that you can multiply numbers using `*`.
This program is structurally very similar to the programs you've written to build up strings character by character.
Make a new list, and then build it up element by element in a for loop.
Start with an empty list.
You can make a list with one element `x` by just writing `[x]`.
You can add an element to a list by adding a list containing one element.
"""
@returns_stdout
def solution(self, numbers: List[int]):
double = []
for number in numbers:
double += [number * 2]
print(double)
tests = [
([3, 1, 4, 1, 5, 9, 2, 6, 5], [6, 2, 8, 2, 10, 18, 4, 12, 10]),
([0, 1, 2, 3], [0, 2, 4, 6]),
]
class filter_numbers(ExerciseStep):
"""
Great!
When you want to add a single element to the end of a list, instead of:
some_list += [element]
it's actually more common to write:
some_list.append(element)
There isn't really a big difference between these, but `.append`
will be more familiar and readable to most people.
Now use `.append` to write a program which prints a list containing only the numbers bigger than 5.
For example, given:
numbers = [3, 1, 4, 1, 5, 9, 2, 6, 5]
it would print:
[9, 6]
"""
hints = """
This is very similar to the previous exercise.
The difference is that sometimes you should skip appending to the new list.
Use an `if` statement.
Use a comparison operator to test if a number is big enough to add.
"""
# TODO enforce not using +=
@returns_stdout
def solution(self, numbers: List[int]):
big_numbers = []
for number in numbers:
if number > 5:
big_numbers.append(number)
print(big_numbers)
tests = [
([3, 1, 4, 1, 5, 9, 2, 6, 5], [9, 6]),
([0, 2, 4, 6, 8, 10], [6, 8, 10]),
]
final_text = """
Fantastic! We're making great progress.
"""
class UsingBreak(Page):
title = "Using `break` to end a loop early"
class list_contains_exercise(ExerciseStep):
"""
Exercise: write a program which takes a list and a value and checks
if the list contains the value. For example, given:
things = ['This', 'is', 'a', 'list']
thing_to_find = 'is'
it should print `True`, but for
thing_to_find = 'other'
it should print `False`.
"""
hints = """
You will need a loop.
You will need an `if` statement.
You will need a comparison operator.
Specifically `==`.
You need a boolean variable that you print at the end.
If you find the element in the list you should set that variable to `True`.
Once you've found the element, you can't unfind it.
That means that once you set the variable to `True`, it should never be set to anything else after that.
Don't use an `else`.
There is no reason to ever set the variable to `False` inside the loop.
"""
@returns_stdout
def solution(self, things, thing_to_find):
found = False
for thing in things:
if thing == thing_to_find:
found = True
print(found)
tests = [
((['This', 'is', 'a', 'list'], 'is'), True),
((['This', 'is', 'a', 'list'], 'other'), False),
(([1, 2, 3, 4], 1), True),
(([1, 2, 3, 4], 0), False),
]
@classmethod
def generate_inputs(cls):
contained = random.choice([True, False])
things = generate_list(int)
if contained:
thing_to_find = random.choice(things)
else:
thing_to_find = random.choice([
min(things) - 1,
max(things) + 1,
])
return dict(
things=things,
thing_to_find=thing_to_find,
)
final_text = """
Nice!
A typical solution looks something like this:
found = False
for thing in things:
if thing == thing_to_find:
found = True
print(found)
Your solution is probably similar. It's fine, but it's a bit inefficient.
That's because it'll loop over the entire list even if it finds the element at the beginning.
You can stop any loop using a `break` statement, like so:
for thing in things:
if thing == thing_to_find:
found = True
break
This is just as correct but skips unnecessary iterations and checks once it finds the element.
You can use snoop to see the difference.
"""
class GettingElementsAtPosition(Page):
title = "Getting Elements at a Position"
class introducing_subscripting(VerbatimStep):
"""
Looping is great, but often you just want to retrieve a single element from the list at a known position.
Here's how:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
print(words[0])
print(words[1])
print(words[2])
print(words[3])
class index_error(Step):
"""
In general, you can get the element at the position `i` with `words[i]`. The operation is called *subscripting* or *indexing*, and the position is called the *index*.
You've probably noticed that the first index is 0, not 1. In programming, counting starts at 0. It seems weird, but that's how most programming languages do it, and it's generally agreed to be better.
This also means that the last index in this list of 4 elements is 3. What happens if you try getting an index greater than that?
"""
program = "words[4]"
def check(self):
return "IndexError" in self.result
class introducing_len_and_range(VerbatimStep):
"""
There you go. `words[4]` and beyond don't exist, so trying that will give you an error.
By the way, you can get the number of elements in a list (commonly called the *length*) using `len(words)`.
That means that the last valid index of the list is `len(words) - 1`, so the last element is `words[len(words) - 1]`. Try these for yourself.
So in general, the valid indices are:
[0, 1, 2, ..., len(words) - 2, len(words) - 1]
There's a handy built in function to give you these values, called `range`:
__program_indented__
"""
def program(self):
for i in range(10):
print(i)
class range_len(VerbatimStep):
"""
`range(n)` is similar to the list `[0, 1, 2, ..., n - 2, n - 1]`.
This gives us an alternative way to loop over a list:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
for index in range(len(words)):
print(index)
print(words[index])
class index_exercise(ExerciseStep):
"""
Let's get some exercise! Given a list `things` and a value `to_find`,
print the first index of `to_find` in the list, i.e. the lowest number `i` such that
`things[i]` is `to_find`. For example, for
things = ['on', 'the', 'way', 'to', 'the', 'store']
to_find = 'the'
your program should print `1`.
You can assume that `to_find` appears at least once.
"""
hints = """
You will need to look at all the possible indices of `things` and check which one is the answer.
To look at all possible indices, you will need a loop over `range(len(things))`.
To check if an index is the answer, you will need to use:
- `if`
- the index in a subscript
- `==`
Since you're looking for the first index, you need to stop the loop once you find one.
You learned how to stop a loop in the middle recently.
You need to use `break`.
"""
class all_indices(MessageStep, ExerciseStep):
"""
You're almost there! However, this prints all the indices,
not just the first one.
"""
@returns_stdout
def solution(self, things, to_find):
for i in range(len(things)):
if to_find == things[i]:
print(i)
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), "1\n4"),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), "6\n7"),
]
class last_index(MessageStep, ExerciseStep):
"""
You're almost there! However, this prints the *last* index,
not the first one.
"""
@returns_stdout
def solution(self, things, to_find):
answer = None
for i in range(len(things)):
if to_find == things[i]:
answer = i
print(answer)
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), 4),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), 7),
]
@returns_stdout
def solution(self, things, to_find):
for i in range(len(things)):
if to_find == things[i]:
print(i)
break
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), 1),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), 6),
]
@classmethod
def generate_inputs(cls):
things = generate_list(str)
to_find = generate_string()
things += [to_find] * random.randint(1, 3)
random.shuffle(things)
return dict(
things=things,
to_find=to_find,
)
class zip_exercise(ExerciseStep):
"""
Nice!
By the way, indexing and `len()` also work on strings. Try them out in the shell.
Here's another exercise. Given two strings of equal length, e.g:
string1 = "Hello"
string2 = "World"
print them vertically side by side, with a space between each character:
H W
e o
l r
l l
o d
"""
hints = """
Did you experiment with indexing and `len()` with strings in the shell?
Forget loops for a moment. How would you print just the first line, which has the first character of each of the two strings?
In the second line you want to print the second character of each string, and so on.
You will need a `for` loop.
You will need indexing (subscripting).
You will need `range`.
You will need `len`.
You will need `+`.
You will need to index both strings.
You will need to pass the same index to both strings each time to retrieve matching characters.
"""
@returns_stdout
def solution(self, string1, string2):
for i in range(len(string1)):
char1 = string1[i]
char2 = string2[i]
print(char1 + ' ' + char2)
tests = {
("Hello", "World"): dedent("""\
H W
e o
l r
l l
o d
"""),
("Having", "ablast"): dedent("""\
H a
a b
v l
i a
n s
g t
"""),
}
@classmethod
def generate_inputs(cls):
length = random.randrange(5, 11)
return dict(
string1=generate_string(length),
string2=generate_string(length),
)
class zip_longest_exercise(ExerciseStep):
"""
Incredible!
Your solution probably looks something like this:
for i in range(len(string1)):
char1 = string1[i]
char2 = string2[i]
print(char1 + ' ' + char2)
This doesn't work so well if the strings have different lengths.
In fact, it goes wrong in different ways depending on whether `string1` or `string2` is longer.
Your next challenge is to fix this problem by filling in 'missing' characters with spaces.
For example, for:
string1 = "Goodbye"
string2 = "World"
output:
G W
o o
o r
d l
b d
y
e
and for:
string1 = "Hello"
string2 = "Elizabeth"
output:
H E
e l
l i
l z
o a
b
e
t
h
"""
hints = [
"The solution has the same overall structure and "
"essential elements of the previous solution, "
"but it's significantly longer and will require "
"a few additional ideas and pieces.",
dedent("""
In particular, it should still contain something like:
for i in range(...):
...
print(char1 + ' ' + char2)
"""),
"What should go inside `range()`? Neither `len(string1)` nor `len(string2)` is good enough.",
"You want a loop iteration for every character in the longer string.",
"That means you need `range(<length of the longest string>)`",
"In other words you need to find the biggest of the two values "
"`len(string1)` and `len(string2)`. You've already done an exercise like that.",
"Once you've sorted out `for i in range(...)`, `i` will sometimes be too big "
"to be a valid index for both strings. You will need to check if it's too big before indexing.",
"Remember, the biggest valid index for `string1` is `len(string1) - 1`. "
"`len(string)` is too big.",
"You will need two `if` statements, one for each string.",
"You will need to set e.g. `char1 = ' '` when `string1[i]` is not valid.",
]
# TODO catch user writing string1 < string2
@returns_stdout
def solution(self, string1, string2):
length1 = len(string1)
length2 = len(string2)
if length1 > length2:
length = length1
else:
length = length2
for i in range(length):
if i < len(string1):
char1 = string1[i]
else:
char1 = ' '
if i < len(string2):
char2 = string2[i]
else:
char2 = ' '
print(char1 + ' ' + char2)
tests = {
("Goodbye", "World"): dedent("""\
G W
o o
o r
d l
b d
y
e
"""),
("Hello", "Elizabeth"): dedent("""\
H E
e l
l i
l z
o a
b
e
t
h
"""),
}
@classmethod
def generate_inputs(cls):
length1 = random.randrange(5, 11)
length2 = random.randrange(12, 20)
if random.choice([True, False]):
length1, length2 = length2, length1
return dict(
string1=generate_string(length1),
string2=generate_string(length2),
)
final_text = """
Magnificent! Take a break, you've earned it!
"""
class CallingFunctionsTerminology(Page):
title = "Terminology: Calling functions and methods"
class print_functions(VerbatimStep):
"""
It's time to expand your vocabulary some more.
`print` and `len` are ***functions***. See for yourself:
__program_indented__
"""
def program(self):
print(len)
print(print)
class introducing_callable(VerbatimStep):
"""
An expression like `len(things)` or `print(things)` is a function ***call*** - when you write that, you are ***calling*** the function `len` or `print`. The fact that this is possible means that functions are ***callable***:
__program_indented__
"""
def program(self):
print(callable(len))
class not_callable(VerbatimStep):
"""
Most things are not callable, so trying to call them will give you an error:
__program_indented__
"""
# noinspection PyCallingNonCallable
def program(self):
f = 'a string'
print(callable(f))
f()
class print_returns_none(VerbatimStep):
"""
In the call `len(things)`, `things` is an ***argument***. Sometimes you will also see the word ***parameter***, which means basically the same thing as argument. It's a bit like you're giving the argument to the function - specifically we say that the argument `things` is *passed* to `len`, and `len` *accepts* or *receives* the argument.
`len(things)` will evaluate to a number such as 3, in which case we say that `len` ***returned*** 3.
All calls have to return something...even if it's nothing. For example, `print`'s job is to display something on screen, not to return a useful value. So it returns something useless instead:
__program_indented__
"""
# noinspection PyNoneFunctionAssignment
def program(self):
things = [1, 2, 3]
length = len(things)
printed = print(length)
print(printed)
class len_of_none(VerbatimStep):
"""
`None` is a special 'null' value which can't do anything interesting. It's a common placeholder that represents the lack of a real useful value. Functions that don't want to return anything return `None` by default. If you see an error message about `None` or `NoneType`, it often means you assigned the wrong thing to a variable:
__program_indented__
"""
# noinspection PyNoneFunctionAssignment,PyUnusedLocal,PyTypeChecker
def program(self):
things = print([1, 2, 3])
length = len(things)
class methods_of_str(VerbatimStep):
"""
A ***method*** is a function which belongs to a type, and can be called on all values of that type using `.`. For example, `upper` and `lower` are methods of strings, which are called with e.g. `word.upper()`:
__program_indented__
"""
def program(self):
word = 'Hello'
print(word.upper)
print(word.upper())
class no_append_for_str(VerbatimStep):
"""
Another example is that `append` is a method of lists. But you can't use `.upper` on a list or `.append` on a string:
__program_indented__
"""
# noinspection PyUnresolvedReferences
def program(self):
word = 'Hello'
word.append('!')
final_text = """
The word 'attribute' in the error message refers to the use of `.` - the error actually comes just from `word.append`, without even a call.
"""
class FunctionsAndMethodsForLists(Page):
# TODO this is quite the information dump and I'd like it to be a little more interactive,
# but users don't need to know these functions off by heart.
class sum_list(Step):
"""
Let's review how to work with lists. Suppose we have a list `nums = [1, 2, 3]`. You can use:
- **`append`**: Add an element to the end of the list. `nums.append(4)` changes the list to `[1, 2, 3, 4]`.
- **`len`**: Returns the number of elements. `len(nums)` is `3`.
- **`range`**: `range(n)` is an object similar to the list of numbers from 0 to `n - 1`. In particular, `range(len(nums))` is like `[0, 1, 2]`.
- **`subscripting`**: Get a value at an index. `nums[0]` is 1, `nums[1]` is 2, `nums[2]` is 3.
- **`+`**: Concatenates lists. `nums + [4, 5]` is `[1, 2, 3, 4, 5]`.
Here's some new things. Try them out in the shell.
- **`subscript assignment`**: Set a value at an index. `nums[0] = 9` changes the list to `[9, 2, 3]`.
- **`join`**: Add a list of strings with a separator in between. This is a method of strings (the separator) which takes an iterable of strings as an argument. `'--'.join(['apples', 'oranges', 'bananas'])` returns `'apples--oranges--bananas'`. You can also use an empty string if you don't want a separator, e.g. `''.join(['apples', 'oranges', 'bananas'])` returns `'applesorangesbananas'`.
- **`sum`**: Add a list of numbers. `sum(nums)` is 6.
- **`in`**: A comparison operator that checks if a value is in a list. `2 in nums` is `True`, but `4 in nums` is `False`.
- **`index`**: Returns the first index of a value in a list. `[7, 8, 9, 8].index(8)` is 1. Raises an error if the value isn't there.
You may recognise some of these from your exercises. I assure you that those exercises were not pointless, as you've now learned valuable fundamental skills. For example, you can use `in` to check if a list contains 5, but there's no similarly easy way to check for a number bigger than 5.
It's useful to know these functions, but it's not easy to learn them all, and there's many more. A more important skill is being able to look things up. For example, here are some typical ways you might Google the above functions if you forgot their names:
- `append`
- python add element to list
- python add item at end of list
- `len`
- python size of list
- python number of elements in list
- python how many characters in string
- `join`
- python combine list of strings with separator
- python add together list of strings with string in between
- `sum`
- python add list of numbers
- python total of numbers
- `in`
- python check if list contains value
- python test if list has element
- `index`
- python get position of element
- python get index of value
Let's practice this skill now. Find a function/method that returns the value in a list which is bigger than any other value. For example, given the list `[21, 55, 4, 91, 62, 49]`, it will return `91`. You should write the answer in the shell as a single small expression. For example, if you were looking for the function `sum`, you could write `sum([21, 55, 4, 91, 62, 49])`. Don't solve this manually with a loop.
"""
hints = """
Use the words 'python' and 'list' in your search query.
In one word, what's special about `91` in the list `[21, 55, 4, 91, 62, 49]`?
'biggest' or 'largest'
'python biggest value in list'
"""
program = "max([21, 55, 4, 91, 62, 49])"
def check(self):
return search_ast(
self.stmt,
ast.Call(func=ast.Name(id='max')),
)
class list_insert(Step):
"""
Good find! Let's do one more. If you have a list:
nums = [1, 2, 3, 4, 5]
You could write `nums.append(9)` and `nums` would change to:
[1, 2, 3, 4, 5, 9]
But suppose you don't want the 9 to be at the end, you want it to go between the second and third elements:
[1, 2, 9, 3, 4, 5]
Call the right function/method in the shell to do that.
"""
hints = """
Use the words 'python' and 'list' in your search query.
Instead of putting the value at the beginning or end, we want to put it ____________?
'in the middle' or 'at an index' or 'at a particular position'
'python add value at index'
"""
program = "nums.insert(2, 9)"
def check(self):
return search_ast(
self.stmt,
ast.Call(func=ast.Attribute(attr='insert'),
args=[ast.Constant(value=2),
ast.Constant(value=9)]),
)
class dir_list(VerbatimStep):
"""
Perfect!
It can also be useful to Google things like "python list tutorial", e.g. if:
- Googling a specific method has failed so you want to find it manually.
- You're still confused about lists after this course.
- It's been a while since you learned about lists and you need a reminder.
- You're struggling to solve a problem with lists and you need to go back to basics and strengthen your foundations.
There are also ways to find information without any googling. Try `__program__` in the shell.
"""
program = "dir([])"
final_text = """
`dir()` returns a list of the argument's attributes, which are mostly methods. Many will start with `__` which you can ignore for now - scroll to the end of the list and you'll see some familiar methods.
Here are a few more useful functions/methods. Suppose `nums = [28, 99, 10, 81, 59, 64]`
- **`sorted`**: Takes an iterable and returns a list of the elements in order. `sorted(nums)` returns `[10, 28, 59, 64, 81, 99]`.
- **`pop`**: Removes and returns an element at a given index. `nums.pop(3)` removes `nums[3]` (`81`) from the list and returns it. Without an argument, i.e. just `nums.pop()`, it will remove and return the last element.
- **`remove`**: Removes the first occurrence of the given element. `nums.remove(10)` will leave `nums` as `[28, 99, 81, 59, 64]`. Raises an error if the value doesn't exist. Equivalent to `nums.pop(nums.index(10))`.
- **`count`**: Returns the number of times the argument appears in the list. `[1, 2, 3, 2, 7, 2, 5].count(2)` is 3.
You've already seen that `len` and subscripting work with strings, a bit as if strings are lists of characters. Strings also support some of the new methods we've learned, not just for characters but for any substring. For example:
- `'the' in 'feed the dog and the cat'` is `True`
- `'feed the dog and the cat'.count('the')` is 2
- `'feed the dog and the cat'.index('the')` is 5
Note that in most cases, methods which modify a list in place (`append`, `insert`, `remove`) merely return `None`, while the remaining functions/methods return a new useful value without changing the original argument. The only exception is the `pop` method.
Modifying a value directly is called *mutation* - types of values which can be mutated are *mutable*, while those that can't are *immutable*. Strings are immutable - they don't have any methods like `append` or even subscript assignment. You simply can't change a string - you can only create new strings and use those instead. That means that this is a useless statement on its own:
word.upper()
The string referred to by `word` isn't modified, instead `word.upper()` returned a new string which was immediately discarded. If you want to change the value that `word` refers to, you have to assign a new value to the variable:
word = word.upper()
Or you can use `word.upper()` immediately in a larger expression, e.g.
if word.lower() == 'yes':
"""
class UnderstandingProgramsWithPythonTutor(Page):
final_text = """
It's time to learn about another tool to explore programs. Put some code in the editor and then click the new "Python Tutor" button. Here's some example code if you want:
all_numbers = [2, 4, 8, 1, 9, 7]
small_numbers = []
big_numbers = []
for number in all_numbers:
if number <= 5:
small_numbers.append(number)
else:
big_numbers.append(number)
print(small_numbers)
print(big_numbers)
The button will open a new tab with a visualisation from [pythontutor.com](http://pythontutor.com).
There you can navigate through the program step by step with the "Prev" or "Next" buttons, or drag
the slider left or right. You can also see the values of variables on the right.
"""
class EqualsVsIs(Page):
title = "`==` vs `is`"
class two_separate_lists(VerbatimStep):
"""
It's time to learn some technical details that are often misunderstood and lead to errors.
Run this program:
__program_indented__
"""
def program(self):
list1 = [1, 2, 3]
list2 = [1, 2, 3]
print(list1)
print(list2)
print(list1 == list2)
print(list1 is list2)
list1.append(4)
print(list1)
print(list2)
class same_list(VerbatimStep):
"""
This program is quite straightforward and mostly consists of things you're familiar with.
We create two variables which refer to lists.
The lists have the same elements, so they are equal: `list1 == list2` is `True`.
But then there's a new comparison operator: `is`. Here `list1 is list2` is `False`.
That means that regardless of the two lists being equal,
they are still two separate, distinct, individual lists.
As a result, when you append 4 to `list1`, only `list1` changes.
Now change `list2 = [1, 2, 3]` to `list2 = list1` and see what difference it makes.
"""
program_in_text = False
def program(self):
list1 = [1, 2, 3]
list2 = list1
print(list1)
print(list2)
print(list1 == list2)
print(list1 is list2)
list1.append(4)
print(list1)
print(list2)
final_text = """
Now `list1 is list2` is `True`, because *there is only one list*, and the two variables
`list1` and `list2` both refer to that same list. `list1.append(4)` appends to the one list
and the result can be seen in both `print(list1)` and `print(list2)` because both lines
are now just different ways of printing the same list.
I recommend running both versions with Python Tutor to see how it visualises the difference.
In the second case, the two variables both have arrows pointing to a single list object.
`list2 = list1` doesn't create an eternal link between the variables. If you assign a new value
to *either* of the variables, e.g. `list1 = [7, 8, 9]`, the other variable will be unaffected
and will still point to the original list.
Basically, an assignment like:
list2 = <expression>
means 'make the variable `list2` refer to whatever `<expression>` evaluates to'.
It doesn't make a copy of that value, which is how both variables can end up pointing to the same list.
But as we've learned before, `list2` doesn't remember `<expression>`, only the value.
It doesn't know about other variables.
You can copy a list with the `copy` method:
list2 = list1.copy()
This will make the program behave like the first version again.
If you come across this kind of problem and you're still having trouble understanding this stuff, read the essay [Facts and myths about Python names and values](https://nedbatchelder.com/text/names.html).
"""
class ModifyingWhileIterating(Page):
final_text = """
Consider this program. It loops through a numbers and removes the ones smaller than 10. Or at least, it tries to. I recommend running it with Python Tutor.
numbers = [10, 7, 8, 3, 12, 15]
for i in range(len(numbers)):
number = numbers[i]
if number <= 10:
numbers.pop(i)
print(numbers)
(remember that `numbers.pop(i)` removes the element from `numbers` at index `i`)
As it runs, it clearly skips even looking at 7 or 3 and doesn't remove them, and at the end it fails when it tries to access an index that's too high. Can you see why this happens?
The index variable `i` runs through the usual values 0, 1, 2, ... as it's supposed to, but as the list changes those are no longer the positions we want. For example in the first iteration `i` is 0 and `number` is 10, which gets removed. This shifts the rest of the numbers left one position, so now 7 is in position 0. But then in the next iteration `i` is 1, and `numbers[i]` is 8. 7 got skipped.
We could try writing the program to use `remove` instead of `pop` so we don't have to use indices. It even looks nicer this way.
numbers = [10, 7, 8, 3, 12, 15]
for number in numbers:
if number <= 10:
numbers.remove(number)
print(numbers)
But it turns out this does the same thing, for the same reason. Iterating over a list still goes through the indices under the hood.
The lesson here is to ***never modify something while you iterate over it***. Keep mutation and looping separate.
The good news is that there are many ways to solve this. You can instead just loop over a copy, as in:
for number in numbers.copy():
Now the list being modified and the list being itererated over are separate objects, even if they start out with equal contents.
Similarly, you could loop over the original and modify a copy:
numbers = [10, 7, 8, 3, 12, 15]
big_numbers = numbers.copy()
for number in numbers:
if number <= 10:
big_numbers.remove(number)
print(big_numbers)
Or you could build up a new list from scratch. In this case, we've already done a similar thing in an exercise:
numbers = [10, 7, 8, 3, 12, 15]
big_numbers = []
for number in numbers:
if number > 10:
big_numbers.append(number)
print(big_numbers)
"""
| 4.375 | 4 |
redisSeed.py | bigmacd/miscPython | 0 | 1060 | <filename>redisSeed.py
import time
import redis
import json
import argparse
""" Follows the StackExchange best practice for creating a work queue.
Basically push a task and publish a message that a task is there."""
def PushTask(client, queue, task, topic):
client.lpush(queue, task)
client.publish(topic, queue)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--queue", help="The queue from which workers will grab tasks")
parser.add_argument("-t", "--task", help="The task data")
parser.add_argument("-o", "--topic", help="The topic to which workers are subscribed")
parser.add_argument("-s", "--server", help="redis server host or IP")
parser.add_argument("-p",
"--port",
help="redis server port (default is 6379)",
type=int,
default=6379)
args = parser.parse_args()
if args.queue is None
or args.task is None
or args.topic is None
or args.server is None:
parser.print_help()
else:
client=redis.StrictRedis(host=args.server, args.port)
PushTask(client, args.queue, args.task, args.topic)
| 3.09375 | 3 |
app/celery.py | TIHLDE/Lepton | 7 | 1061 | <reponame>TIHLDE/Lepton<filename>app/celery.py
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
app = Celery("app")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
app.conf.update(
task_serializer="json",
accept_content=["json"], # Ignore other content
result_serializer="json",
timezone="Europe/Oslo",
enable_utc=True,
)
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
| 2.03125 | 2 |
src/garage/core/__init__.py | researchai/unsupervised_meta_rl | 1 | 1062 | <reponame>researchai/unsupervised_meta_rl<filename>src/garage/core/__init__.py
from garage.core.serializable import Serializable
from garage.core.parameterized import Parameterized # noqa: I100
__all__ = ['Serializable', 'Parameterized']
| 1.164063 | 1 |
formidable/forms/boundfield.py | jayvdb/django-formidable | 0 | 1063 | <reponame>jayvdb/django-formidable
from django.forms import forms
class FormatBoundField(forms.BoundField):
"""
The format field skips the rendering with the label attribute
in the form level (i.e => form.as_p() doesn't have to generate any label
for format field).
This boundfield has this main goal.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This attribute is used to generate (or not) the final label
# with html tags. We force the label to None to avoid the label
# generation:
self.label = None
class HelpTextBoundField(FormatBoundField):
def value(self):
return self.field.text
class TitleBoundField(FormatBoundField):
def value(self):
return self.field.label
class SeparatorBoundField(FormatBoundField):
def value(self):
return None
| 2.890625 | 3 |
algorithm_training/abc87.py | hirotosuzuki/algorithm_training | 0 | 1064 | <filename>algorithm_training/abc87.py
class TaskA:
def run(self):
V, A, B, C = map(int, input().split())
pass
class TaskB:
def run(self):
A = int(input())
B = int(input())
C = int(input())
X = int(input())
counter = 0
for a in range(A+1):
for b in range(B+1):
for c in range(C+1):
total = 500 * a + 100 * b + 50 * c
if total == X:
counter += 1
print(counter)
class TaskC:
def run(self):
pass
if __name__ == "__main__":
task = TaskB()
task.run() | 3.53125 | 4 |
serveur/serveurDroit.py | PL4typus/SysNetProject17 | 0 | 1065 | <gh_stars>0
#!/usr/bin/python
import socket,sys,os
TCP_IP = '127.0.0.1'
TCP_PORT = 6262
BUFFER_SIZE = 1024
s= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((TCP_IP,TCP_PORT))
s.listen(5)
conn, addr = s.accept()
print('Connection entrante :', addr)
data = conn.recv(BUFFER_SIZE)
if data == "m" :
os.popen("chmod +w $PWD")
else :
os.popen("chmod -w $PWD")
while 1 :
data = conn.recv(BUFFER_SIZE)
print data
if data == "1":
break
rep = os.popen(data+" 2>&1")
conn.send("reponse : \n"+rep.read())
conn.close()
| 2.78125 | 3 |
BE/common/helpers.py | kosior/ngLearn-1 | 1 | 1066 | <gh_stars>1-10
from rest_framework_jwt.utils import jwt_decode_handler
from users.models import User
from users.serializers import UserSerializer
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
def get_token_from_request(request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
return auth[1]
return None
def get_user_from_token(token):
data = jwt_decode_handler(token)
user_id = data.get('user_id')
if user_id:
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
return None
def get_user_from_request(request):
token = get_token_from_request(request)
if token:
return get_user_from_token(token)
return None
| 2.4375 | 2 |
src/finn/util/basic.py | quetric/finn-base-1 | 0 | 1067 | <reponame>quetric/finn-base-1
# Copyright (c) 2020 Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Xilinx nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import random
import string
import subprocess
import tempfile
import warnings
from finn.core.datatype import DataType
# mapping from PYNQ board names to FPGA part names
pynq_part_map = dict()
pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e"
pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1"
pynq_part_map["Pynq-Z2"] = "xc7z020clg400-1"
pynq_part_map["ZCU102"] = "xczu9eg-ffvb1156-2-e"
pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e"
# native AXI HP port width (in bits) for PYNQ boards
pynq_native_port_width = dict()
pynq_native_port_width["Pynq-Z1"] = 64
pynq_native_port_width["Pynq-Z2"] = 64
pynq_native_port_width["Ultra96"] = 128
pynq_native_port_width["ZCU102"] = 128
pynq_native_port_width["ZCU104"] = 128
# Alveo device and platform mappings
alveo_part_map = dict()
alveo_part_map["U50"] = "xcu50-fsvh2104-2L-e"
alveo_part_map["U200"] = "xcu200-fsgd2104-2-e"
alveo_part_map["U250"] = "xcu250-figd2104-2L-e"
alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e"
alveo_default_platform = dict()
alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3"
alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2"
alveo_default_platform["U250"] = "xilinx_u250_xdma_201830_2"
alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3"
def get_rtlsim_trace_depth():
"""Return the trace depth for rtlsim via PyVerilator. Controllable
via the RTLSIM_TRACE_DEPTH environment variable. If the env.var. is
undefined, the default value of 1 is returned. A trace depth of 1
will only show top-level signals and yield smaller .vcd files.
The following depth values are of interest for whole-network stitched IP
rtlsim:
- level 1 shows top-level input/output streams
- level 2 shows per-layer input/output streams
- level 3 shows per full-layer I/O including FIFO count signals
"""
try:
return int(os.environ["RTLSIM_TRACE_DEPTH"])
except KeyError:
return 1
def get_remote_vivado():
"""Return the address of the remote Vivado synthesis server as set by the,
REMOTE_VIVADO environment variable, otherwise return None"""
try:
return os.environ["REMOTE_VIVADO"]
except KeyError:
return None
def get_num_default_workers():
"""Return the number of workers for parallel transformations. Controllable
via the NUM_DEFAULT_WORKERS environment variable. If the env.var. is
undefined, the default value of 1 is returned.
"""
try:
return int(os.environ["NUM_DEFAULT_WORKERS"])
except KeyError:
return 1
def get_finn_root():
"Return the root directory that FINN is cloned into."
try:
return os.environ["FINN_ROOT"]
except KeyError:
raise Exception(
"""Environment variable FINN_ROOT must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
def get_execution_error_thresh():
"Return the max error that is allowed for rounding in FINN execution."
try:
return float(os.environ["ERROR_THRESH"])
except KeyError:
return 1e-2
def get_sanitize_quant_tensors():
"""Return whether tensors with quantization annotations should be sanitized.
Enabled by default, disabling will yield faster ONNX execution but may give
incorrect results. Use with caution."""
try:
return int(os.environ["SANITIZE_QUANT_TENSORS"])
except KeyError:
# enabled by default
return 1
def make_build_dir(prefix=""):
"""Creates a temporary folder with given prefix to be used as a build dir.
Use this function instead of tempfile.mkdtemp to ensure any generated files
will survive on the host after the FINN Docker container exits."""
try:
inst_prefix = os.environ["FINN_INST_NAME"] + "/"
return tempfile.mkdtemp(prefix=inst_prefix + prefix)
except KeyError:
raise Exception(
"""Environment variable FINN_INST_NAME must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
def get_by_name(container, name, name_field="name"):
"""Return item from container by .name field if it exists, None otherwise.
Will throw an Exception if multiple items are found, since this violates the
ONNX standard."""
names = [getattr(x, name_field) for x in container]
inds = [i for i, e in enumerate(names) if e == name]
if len(inds) > 1:
raise Exception("Found multiple get_by_name matches, undefined behavior")
elif len(inds) == 0:
return None
else:
ind = inds[0]
return container[ind]
def remove_by_name(container, name, name_field="name"):
"""Remove item from container by .name field if it exists."""
item = get_by_name(container, name, name_field)
if item is not None:
container.remove(item)
def random_string(stringLength=6):
"""Randomly generate a string of letters and digits."""
lettersAndDigits = string.ascii_letters + string.digits
return "".join(random.choice(lettersAndDigits) for i in range(stringLength))
def interleave_matrix_outer_dim_from_partitions(matrix, n_partitions):
"""Interleave the outermost dimension of a matrix from given
partitions (n_partitions)."""
if type(matrix) != np.ndarray or matrix.dtype != np.float32:
# try to convert to a float numpy array (container dtype is float)
matrix = np.asarray(matrix, dtype=np.float32)
shp = matrix.shape
ndim = matrix.ndim
# ensure # partitions evenly divide the outermost dimension
assert (
shp[0] % n_partitions == 0
), """The outermost dimension is not divisable
by the number of partitions."""
# only tested for matrices
assert (
ndim == 2
), """The dimension of the matrix is not 2. Currently this function
only works for matrices."""
# interleave rows between PEs using reshape + transpose
matrix_r = matrix.reshape(-1, n_partitions, shp[1]).transpose((1, 0, 2))
matrix_r = matrix_r.reshape(n_partitions, -1, shp[1])
return matrix_r
def roundup_to_integer_multiple(x, factor):
"""Round up integer x to the nearest integer multiple of integer factor.
Returns x if factor is set to -1. Both x and factor must otherwise be
positive."""
# ensure integers
assert int(x) == x, "The input x is not an integer."
assert int(factor) == factor, "The input factor is not an integer."
# use -1 to indicate no padding needed
if factor == -1:
return x
# ensure positive values
assert factor > 0 and x > 0, "Factor and x are <= 0."
if x < factor:
return factor
else:
if x % factor == 0:
return x
else:
return x + (factor - (x % factor))
def pad_tensor_to_multiple_of(ndarray, pad_to_dims, val=0, distr_pad=False):
"""Pad each dimension of given NumPy ndarray using val, so that each
dimension is a multiple of the respective value in pad_to_dims. -1 means
do not pad that particular dimension. If distr_pad is False, all padding
will be inserted after the existing values; otherwise it will be split
evenly between before and after the existing values, with one extra value
inserted after if the padding amount is not divisible by two."""
if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
# try to convert to a float numpy array (container dtype is float)
ndarray = np.asarray(ndarray, dtype=np.float32)
assert ndarray.ndim == len(
pad_to_dims
), """The dimensions of the input
array don't match the length of the pad_to_dims value."""
# compute the desired shape
desired = zip(list(ndarray.shape), list(pad_to_dims))
desired = map(lambda x: roundup_to_integer_multiple(x[0], x[1]), desired)
desired = np.asarray(list(desired), dtype=np.int32)
current = np.asarray(ndarray.shape, dtype=np.int32)
pad_amt = desired - current
# add padding to get to the desired shape
if distr_pad:
pad_before = (pad_amt // 2).astype(np.int32)
pad_after = pad_amt - pad_before
pad_amt = list(zip(pad_before, pad_after))
else:
# all padding is added after the existing values
pad_amt = list(map(lambda x: (0, x), pad_amt))
ret = np.pad(ndarray, pad_amt, mode="constant", constant_values=val)
assert (
np.asarray(ret.shape, dtype=np.int32) == desired
).all(), """The
calculated output array doesn't match the desired/expected one."""
return ret
def calculate_matvec_accumulator_range(matrix, vec_dt):
"""Calculate the minimum and maximum possible result (accumulator) values
for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW)
with datatype vec_dt. Returns (acc_min, acc_max).
"""
min_weight = matrix.min()
max_weight = matrix.max()
perceptive_field_elems = matrix.shape[0]
min_input = vec_dt.min()
max_input = vec_dt.max()
# calculate minimum and maximum values of accumulator
# assume inputs span the whole range of the input datatype
acc_min = perceptive_field_elems * min(
min_weight * max_input,
min_weight * min_input,
max_weight * max_input,
max_weight * min_input,
)
acc_max = perceptive_field_elems * max(
min_weight * max_input,
min_weight * min_input,
max_weight * max_input,
max_weight * min_input,
)
return (acc_min, acc_max)
def gen_finn_dt_tensor(finn_dt, tensor_shape):
"""Generates random tensor in given shape and with given FINN DataType."""
if type(tensor_shape) == list:
tensor_shape = tuple(tensor_shape)
if finn_dt == DataType.BIPOLAR:
tensor_values = np.random.randint(2, size=tensor_shape)
tensor_values = 2 * tensor_values - 1
elif finn_dt == DataType.BINARY:
tensor_values = np.random.randint(2, size=tensor_shape)
elif "INT" in finn_dt.name or finn_dt == DataType.TERNARY:
tensor_values = np.random.randint(
finn_dt.min(), high=finn_dt.max() + 1, size=tensor_shape
)
else:
raise ValueError(
"Datatype {} is not supported, no tensor could be generated".format(finn_dt)
)
# always use float type as container
return tensor_values.astype(np.float32)
def calculate_signed_dot_prod_range(dt_a, dt_b, len):
"""Returns the (min,max) values a dot product between two signed vectors of
types dt_a and dt_b of len elements can take."""
assert (
dt_a.signed() and dt_b.signed()
), """The input values are not both
signed vectors."""
min_prod = 2 ** 30
max_prod = -(2 ** 30)
for a_val in [dt_a.min(), dt_a.max()]:
for b_val in [dt_b.min(), dt_b.max()]:
prod = a_val * b_val * len
if prod < min_prod:
min_prod = prod
if prod > max_prod:
max_prod = prod
return (min_prod, max_prod)
def sanitize_quant_values(model, node_tensors, execution_context, check_values=False):
"""Sanitize given list of tensors in execution_context by rounding values
that are supposed to be integers (as indicated by their quantization
annotation). Will raise an assertion if the amount of rounding is too large.
Returns the sanitized execution context.
If check_values is specified, an extra DataType.allowed() check will be
performed on any rounded tensors.
Background:
FINN uses floating point tensors as a carrier data type to represent
integers. Floating point arithmetic can introduce rounding errors, e.g.
(int_num * float_scale) / float_scale is not always equal to int_num.
We use this function to ensure that the values that are supposed to be
integers are indeed integers.
"""
for tensor in node_tensors:
dtype = model.get_tensor_datatype(tensor)
# floats don't need sanitization, skip to next
# introduces less quicker runtime
if dtype == DataType.FLOAT32:
continue
current_values = execution_context[tensor]
updated_values = current_values
has_to_be_rounded = False
# TODO: vectorize with numpy
for value in np.nditer(current_values):
if not dtype.allowed(value):
has_to_be_rounded = True
break
if has_to_be_rounded:
updated_values = np.round(current_values)
warnings.warn(
"The values of tensor {} can't be represented "
"with the set FINN datatype ({}), they will be rounded to match the "
"FINN datatype.".format(tensor, dtype)
)
# check if rounded values are not too far from original values
max_error = max(np.abs(current_values - updated_values).flatten())
if max_error <= get_execution_error_thresh():
if check_values is True:
# check again if values can now be represented with set finn datatype
# TODO: vectorize with numpy
for value in np.nditer(updated_values):
if not dtype.allowed(value):
raise Exception(
"""Values can't be represented with set
finn datatype ({}) for input {}""".format(
dtype, tensor
)
)
execution_context[tensor] = updated_values
else:
raise Exception(
"""Rounding error is too high to match set FINN
datatype ({}) for input {}""".format(
dtype, tensor
)
)
return execution_context
class CppBuilder:
"""Builds the g++ compiler command to produces the executable of the c++ code
in code_gen_dir which is passed to the function build() of this class."""
def __init__(self):
self.include_paths = []
self.cpp_files = []
self.executable_path = ""
self.code_gen_dir = ""
self.compile_components = []
self.compile_script = ""
def append_includes(self, library_path):
"""Adds given library path to include_paths list."""
self.include_paths.append(library_path)
def append_sources(self, cpp_file):
"""Adds given c++ file to cpp_files list."""
self.cpp_files.append(cpp_file)
def set_executable_path(self, path):
"""Sets member variable "executable_path" to given path."""
self.executable_path = path
def build(self, code_gen_dir):
"""Builds the g++ compiler command according to entries in include_paths
and cpp_files lists. Saves it in bash script in given folder and
executes it."""
# raise error if includes are empty
self.code_gen_dir = code_gen_dir
self.compile_components.append("g++ -o " + str(self.executable_path))
for cpp_file in self.cpp_files:
self.compile_components.append(cpp_file)
for lib in self.include_paths:
self.compile_components.append(lib)
bash_compile = ""
for component in self.compile_components:
bash_compile += str(component) + " "
self.compile_script = str(self.code_gen_dir) + "/compile.sh"
with open(self.compile_script, "w") as f:
f.write("#!/bin/bash \n")
f.write(bash_compile + "\n")
bash_command = ["bash", self.compile_script]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
| 0.960938 | 1 |
mainmenu.py | jeffrypaul37/Hospital-Management-System | 0 | 1068 | <reponame>jeffrypaul37/Hospital-Management-System
from tkinter import *
from tkcalendar import Calendar
from datetime import datetime
from datetime import date
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import askyesno
import re
import sqlite3
import tkinter.messagebox
import pandas as pd
import pandas as pd
import datetime
from dateutil import rrule, parser
today = date.today()
date1 = '05-10-2021'
date2 = '12-31-2050'
datesx = pd.date_range(today, date2).tolist()
conn = sqlite3.connect('database copy.db')
c = conn.cursor()
ids = []
class Application:
def __init__(self, master):
self.master = master
self.left = Frame(master, width=1000, height=800, bg='sea green')
self.left.pack(side=LEFT)
self.right = Frame(master, width=1000, height=800, bg='steelblue')
self.right.pack(side=RIGHT)
self.heading = Label(self.left, text="Appointments", font=('arial 40 bold'), fg='black', bg='sea green')
self.heading.place(x=0, y=0)
self.name = Label(self.left, text="Patient's Name", font=('arial 18 bold'), fg='black', bg='sea green')
self.name.place(x=0, y=100)
self.age = Label(self.left, text="Age", font=('arial 18 bold'), fg='black', bg='sea green')
self.age.place(x=0, y=140)
self.gender = Label(self.left, text="Gender", font=('arial 18 bold'), fg='black', bg='sea green')
self.gender.place(x=0, y=180)
self.location = Label(self.left, text="Location", font=('arial 18 bold'), fg='black', bg='sea green')
self.location.place(x=0, y=220)
self.date = Label(self.left, text="Appointment Date", font=('arial 18 bold'), fg='black', bg='sea green')
self.date.place(x=0, y=260)
self.time = Label(self.left, text="Appointment Time", font=('arial 18 bold'), fg='black', bg='sea green')
self.time.place(x=0, y=300)
self.phone = Label(self.left, text="Phone Number", font=('arial 18 bold'), fg='black', bg='sea green')
self.phone.place(x=0, y=340)
self.allergies = Label(self.left, text="Allergies", font=('arial 18 bold'), fg='black', bg='sea green')
self.allergies.place(x=0, y=380)
self.all_ent = Entry(self.left, width=30)
self.all_ent.place(x=250, y=380)
self.all_ent.insert(0, 'NONE')
self.chronic = Label(self.left, text="Chronic Conditions", font=('arial 18 bold'), fg='black', bg='sea green')
self.chronic.place(x=0, y=420)
self.chr_ent = Entry(self.left, width=30)
self.chr_ent.place(x=250, y=420)
self.chr_ent.insert(0, 'NONE')
self.bg = Label(self.left, text="Blood Group", font=('arial 18 bold'), fg='black', bg='sea green')
self.bg.place(x=0, y=460)
self.clicked3=StringVar()
self.clicked3.set("Select Blood Group")
self.bg_ent = OptionMenu(self.left,self.clicked3,*options3)
self.bg_ent.pack()
self.bg_ent.place(x=250, y=460)
self.name_ent = Entry(self.left, width=30)
self.name_ent.place(x=250, y=100)
self.age_ent = Entry(self.left, width=30)
self.age_ent.place(x=250, y=140)
self.clicked=StringVar()
self.clicked.set("Male")
self.gender_ent = OptionMenu(self.left,self.clicked,*options)
self.gender_ent.pack()
self.gender_ent.place(x=250, y=180)
self.location_ent=Entry(self.left,width=30)
self.location_ent.place(x=250, y=220)
self.clicked1=StringVar()
self.clicked1.set("Select Date")
self.date_ent = OptionMenu(self.left,self.clicked1,*options1)
self.date_ent.pack()
self.date_ent.place(x=250, y=260)
self.clicked2=StringVar()
self.clicked2.set("Select Time")
self.time_ent = OptionMenu(self.left,self.clicked2,*options2)
self.time_ent.pack()
self.time_ent.place(x=250, y=300)
self.phone_ent = Entry(self.left, width=30)
self.phone_ent.place(x=250, y=340)
self.submit = Button(self.left, text="Add Appointment", width=20, height=2, bg='steelblue', command=self.add_appointment)
self.submit.place(x=270, y=500)
self.submit = Button(self.left, text="View Appointments", width=20, height=2, bg='steelblue', command=self.view)
self.submit.place(x=600, y=100)
self.submit = Button(self.left, text="View/Update Patient Details", width=20, height=2, bg='steelblue', command=self.update)
self.submit.place(x=600, y=200)
self.submit = Button(self.left, text="Read Names", width=20, height=2, bg='steelblue', command=self.read)
self.submit.place(x=600, y=300)
self.submit = Button(self.left, text="Exit", width=20, height=2, bg='steelblue', command=self.quit)
self.submit.place(x=600, y=400)
sql2 = "SELECT ID FROM appointments"
self.result = c.execute(sql2)
for self.row in self.result:
self.id = self.row[0]
ids.append(self.id)
self.new = sorted(ids)
self.final_id = self.new[len(ids)-1]
self.logs = Label(self.right, text="Logs", font=('arial 28 bold'), fg='white', bg='steelblue')
self.logs.place(x=0, y=0)
self.box = Text(self.right, width=62, height=45)
self.box.place(x=20, y=60)
def add_appointment(self):
self.val1 = self.name_ent.get()
self.val2 = self.age_ent.get()
self.val3 = self.clicked.get()
self.val4 = self.location_ent.get()
self.val5 = self.clicked1.get()
self.val6 = self.clicked2.get()
self.val7 = self.phone_ent.get()
self.val8 = self.all_ent.get()
self.val9 = self.chr_ent.get()
self.val10 = self.clicked3.get()
pattern=re.compile("[7-9][0-9]{9}")
pattern=re.compile("[7-9][0-9]{9}")
pattern2=re.compile("[1-9]([0-9])*")
pattern1=re.compile(r'([A-Z])(\s*[A-Z])*$')
pattern.match(self.val7)
if self.val1 == '' or self.val2 == '' or self.val3 == '' or self.val4 == '' or self.val5 == '' or self.val6=='' or self.val7=='' or self.val10=='Select Blood Group' or self.val5=='Select Date' or self.val6=='Select Time':
print("ty",self.val3)
tkinter.messagebox.showinfo("Warning", "Please Fill Up All Boxes")
print(self.val3)
elif not(pattern1.match(self.val1)) or len(self.val1)<2:
tkinter.messagebox.showinfo("Warning","INVALID Name")
elif not(pattern2.match(self.val2)) or len(self.val2)>=3:
tkinter.messagebox.showinfo("Warning","INVALID Age")
elif not(pattern.match(self.val7)) or len(self.val7)>10:
tkinter.messagebox.showinfo("Warning", "INVALID Phone Number")
else:
sql = "INSERT INTO 'appointments' (name, age, gender, location, scheduled_time, phone,date,Allergies,Chronic_Conditions,Blood_Group) VALUES(?, ?, ?, ?, ?, ?,?,?,?,?)"
c.execute(sql, (self.val1, self.val2, self.val3, self.val4, self.val6, self.val7,self.val5,self.val8,self.val9,self.val10))
conn.commit()
tkinter.messagebox.showinfo("Success", "Appointment for " + str(self.val1) + " has been created" )
self.box.insert(END, '\n Appointment fixed for ' + str(self.val1) + '\n at ' + str(self.val5)+','+str(self.val6))
self.name_ent.delete(0,END)
self.age_ent.delete(0,END)
self.location_ent.delete(0,END)
self.phone_ent.delete(0,END)
self.clicked1.set("Select Date")
self.clicked2.set("Select Time")
self.clicked3.set("Select Blood Group")
self.chr_ent.delete(0,END)
self.all_ent.delete(0,END)
self.all_ent.insert(0, 'NONE')
self.chr_ent.insert(0, 'NONE')
def view(self):
import view
view.call()
def update(self):
import update
update.buildupdate()
def read(self):
import read
read.buildread()
def quit(self):
answer = askyesno(title='Confirm Exit', message='Are you sure you want to exit?')
if answer:
root.destroy()
root = Tk()
root.title("Shalom Clinic")
#root.geometry("1200x720+0+0")
root.attributes('-fullscreen', True)
root.resizable(0, 0)
Top = Frame(root, bd=1, relief=RIDGE)
Top.pack(side=TOP, fill=X)
Form = Frame(root, height=1)
Form.pack(side=TOP, pady=1)
lbl_title = Label(Top, text = "Shalom Clinic", font=('arial', 15))
lbl_title.pack(fill=X)
options=["Male","Female"]
options1=datesx
options2=["10:00:00","11:00:00","13:00:00"]
options3=["O+","O-","A+","A-","B+","B-","AB+","AB-"]
b = Application(root)
root.resizable(False, False)
root.mainloop()
| 3.359375 | 3 |
chia_tea/discord/commands/test_wallets.py | Tea-n-Tech/chia-tea | 6 | 1069 | <gh_stars>1-10
import os
import tempfile
import unittest
from datetime import datetime
from google.protobuf.json_format import ParseDict
from ...monitoring.MonitoringDatabase import MonitoringDatabase
from ...protobuf.generated.computer_info_pb2 import ADD, UpdateEvent
from ...protobuf.generated.monitoring_service_pb2 import DataUpdateRequest
from ...utils.testing import async_test
from .wallets import wallets_cmd
class TestWalletsCmd(unittest.TestCase):
@async_test
async def test_no_wallets_case(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
db_filepath = os.path.join(tmpdir, "temp.db")
with MonitoringDatabase(db_filepath):
messages = await wallets_cmd(db_filepath)
self.assertEqual(len(messages), 1)
self.assertTrue(messages[0].startswith("No wallets"))
@async_test
async def test_not_running_wallet_not_displayed(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
db_filepath = os.path.join(tmpdir, "temp.db")
now_timestamp = datetime.now().timestamp()
with MonitoringDatabase(db_filepath) as db:
event = ParseDict(
js_dict=dict(
event_type=ADD,
wallet=dict(
is_running=False,
is_synced=True,
),
),
message=UpdateEvent(),
)
request = DataUpdateRequest(
machine_id=1,
machine_name="machine A",
timestamp=now_timestamp,
events=[event],
)
db.store_data_update_request(request)
messages = await wallets_cmd(db_filepath)
self.assertEqual(len(messages), 1)
self.assertTrue(messages[0].startswith("No wallets"))
@async_test
async def test_display_running_wallet(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
db_filepath = os.path.join(tmpdir, "tmp.db")
now_timestamp = datetime.now().timestamp()
with MonitoringDatabase(db_filepath) as db:
update_events = [
ParseDict(
js_dict=dict(
event_type=ADD,
wallet=dict(
is_running=True,
is_synced=True,
),
),
message=UpdateEvent(),
),
]
request = DataUpdateRequest(
machine_id=1,
machine_name="machine A",
timestamp=now_timestamp,
events=update_events,
)
db.store_data_update_request(request)
messages = await wallets_cmd(db_filepath)
print(messages)
# no failure
self.assertEqual(len(messages), 1)
msg = messages[0]
self.assertFalse(msg.startswith("Traceback"))
# display online harvester
self.assertTrue("machine A" in msg)
self.assertIn("synchronized", msg)
| 2.125 | 2 |
render/PC_Normalisation.py | sun-pyo/OcCo | 158 | 1070 | <reponame>sun-pyo/OcCo<gh_stars>100-1000
# Copyright (c) 2020. <NAME>, <EMAIL>
import os, open3d, numpy as np
File_ = open('ModelNet_flist_short.txt', 'w')
if __name__ == "__main__":
root_dir = "../data/ModelNet_subset/"
for root, dirs, files in os.walk(root_dir, topdown=False):
for file in files:
if '.ply' in file:
amesh = open3d.io.read_triangle_mesh(os.path.join(root, file))
out_file_name = os.path.join(root, file).replace('.ply', '_normalised.obj')
center = amesh.get_center()
amesh.translate(-center)
maxR = (np.asarray(amesh.vertices)**2).sum(axis=1).max()**(1/2)
# we found divided by (2*maxR) has best rendered visualisation results
amesh.scale(1/(2*maxR))
open3d.io.write_triangle_mesh(out_file_name, amesh)
File_.writelines(out_file_name.replace('.obj', '').replace(root_dir, '') + '\n')
print(out_file_name)
| 2.109375 | 2 |
pymatgen/apps/battery/insertion_battery.py | adozier/pymatgen | 18 | 1071 | <filename>pymatgen/apps/battery/insertion_battery.py
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module is used for analysis of materials with potential application as
intercalation batteries.
"""
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Jan 13, 2012"
__status__ = "Beta"
import itertools
from pymatgen.core.composition import Composition
from pymatgen.core.units import Charge, Time
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.apps.battery.battery_abc import AbstractElectrode, \
AbstractVoltagePair
from pymatgen.core.periodic_table import Element
from scipy.constants import N_A
class InsertionElectrode(AbstractElectrode):
"""
A set of topotactically related compounds, with different amounts of a
single element, e.g. TiO2 and LiTiO2, that can be used to define an
insertion battery electrode.
"""
def __init__(self, entries, working_ion_entry):
"""
Create a new InsertionElectrode.
Args:
entries: A list of ComputedStructureEntries (or subclasses)
representing the different topotactic states of the battery,
e.g. TiO2 and LiTiO2.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
"""
self._entries = entries
self._working_ion = working_ion_entry.composition.elements[0]
self._working_ion_entry = working_ion_entry
#Prepare to make phase diagram: determine elements and set their energy
#to be very high
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
#Set an artificial energy for each element for convex hull generation
element_energy = max([entry.energy_per_atom for entry in entries]) + 10
pdentries = []
pdentries.extend(entries)
pdentries.extend([PDEntry(Composition({el:1}), element_energy)
for el in elements])
#Make phase diagram to determine which entries are stable vs. unstable
pd = PhaseDiagram(pdentries)
lifrac = lambda e: e.composition.get_atomic_fraction(self._working_ion)
#stable entries ordered by amount of Li asc
self._stable_entries = tuple(sorted([e for e in pd.stable_entries
if e in entries], key=lifrac))
#unstable entries ordered by amount of Li asc
self._unstable_entries = tuple(sorted([e for e in pd.unstable_entries
if e in entries], key=lifrac))
#create voltage pairs
self._vpairs = tuple([InsertionVoltagePair(self._stable_entries[i],
self._stable_entries[i + 1],
working_ion_entry)
for i in range(len(self._stable_entries) - 1)])
@property
def working_ion(self):
"""
The working ion as an Element object
"""
return self._working_ion
@property
def working_ion_entry(self):
return self._working_ion_entry
@property
def voltage_pairs(self):
return self._vpairs
def get_stable_entries(self, charge_to_discharge=True):
"""
Get the stable entries.
Args:
charge_to_discharge: order from most charge to most discharged
state? Default to True.
Returns:
A list of stable entries in the electrode, ordered by amount of the
working ion.
"""
list_copy = list(self._stable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_unstable_entries(self, charge_to_discharge=True):
"""
Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion.
"""
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_all_entries(self, charge_to_discharge=True):
"""
Return all entries input for the electrode.
Args:
charge_to_discharge:
order from most charge to most discharged state? Defaults to
True.
Returns:
A list of all entries in the electrode (both stable and unstable),
ordered by amount of the working ion.
"""
all_entries = list(self.get_stable_entries())
all_entries.extend(self.get_unstable_entries())
#sort all entries by amount of working ion ASC
fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion)
all_entries = sorted([e for e in all_entries],
key=fsrt)
return all_entries if charge_to_discharge else all_entries.reverse()
@property
def fully_charged_entry(self):
"""
The most charged entry along the topotactic path.
"""
return self._stable_entries[0]
@property
def fully_discharged_entry(self):
"""
The most discharged entry along the topotactic path.
"""
return self._stable_entries[-1]
def get_max_instability(self, min_voltage=None, max_voltage=None):
"""
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return max(data) if len(data) > 0 else None
def get_min_instability(self, min_voltage=None, max_voltage=None):
"""
The minimum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Minimum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return min(data) if len(data) > 0 else None
def get_max_muO2(self, min_voltage=None, max_voltage=None):
"""
Maximum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return max(data) if len(data) > 0 else None
def get_min_muO2(self, min_voltage=None, max_voltage=None):
"""
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return min(data) if len(data) > 0 else None
def get_sub_electrodes(self, adjacent_only=True, include_myself=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set True.
include_myself: Include this identical electrode in the list of
results.
Returns:
A list of InsertionElectrode objects
"""
battery_list = []
pair_it = self._vpairs if adjacent_only \
else itertools.combinations_with_replacement(self._vpairs, 2)
ion = self._working_ion
for pair in pair_it:
entry_charge = pair.entry_charge if adjacent_only \
else pair[0].entry_charge
entry_discharge = pair.entry_discharge if adjacent_only \
else pair[1].entry_discharge
chg_frac = entry_charge.composition.get_atomic_fraction(ion)
dischg_frac = entry_discharge.composition.get_atomic_fraction(ion)
def in_range(entry):
frac = entry.composition.get_atomic_fraction(ion)
return chg_frac <= frac <= dischg_frac
if include_myself or entry_charge != self.fully_charged_entry \
or entry_discharge != self.fully_discharged_entry:
unstable_entries = filter(in_range,
self.get_unstable_entries())
stable_entries = filter(in_range, self.get_stable_entries())
all_entries = list(stable_entries)
all_entries.extend(unstable_entries)
battery_list.append(self.__class__(all_entries,
self.working_ion_entry))
return battery_list
def as_dict_summary(self, print_subelectrodes=True):
"""
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"formula_discharge": dischg_comp.reduced_formula,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability()}
if print_subelectrodes:
f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d
def __str__(self):
return self.__repr__()
def __repr__(self):
output = []
chg_form = self.fully_charged_entry.composition.reduced_formula
dischg_form = self.fully_discharged_entry.composition.reduced_formula
output.append("InsertionElectrode with endpoints at {} and {}".format(
chg_form, dischg_form))
output.append("Avg. volt. = {} V".format(self.get_average_voltage()))
output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav()))
output.append("Vol. cap. = {}".format(self.get_capacity_vol()))
return "\n".join(output)
@classmethod
def from_dict(cls, d):
from monty.json import MontyDecoder
dec = MontyDecoder()
return cls(dec.process_decoded(d["entries"]),
dec.process_decoded(d["working_ion_entry"]))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self._entries],
"working_ion_entry": self.working_ion_entry.as_dict()}
class InsertionVoltagePair(AbstractVoltagePair):
"""
Defines an Insertion Voltage Pair.
Args:
entry1: Entry corresponding to one of the entries in the voltage step.
entry2: Entry corresponding to the other entry in the voltage step.
working_ion_entry: A single ComputedEntry or PDEntry representing
the element that carries charge across the battery, e.g. Li.
"""
def __init__(self, entry1, entry2, working_ion_entry):
#initialize some internal variables
working_element = working_ion_entry.composition.elements[0]
entry_charge = entry1
entry_discharge = entry2
if entry_charge.composition.get_atomic_fraction(working_element) \
> entry2.composition.get_atomic_fraction(working_element):
(entry_charge, entry_discharge) = (entry_discharge, entry_charge)
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
ion_sym = working_element.symbol
frame_charge_comp = Composition({el: comp_charge[el]
for el in comp_charge
if el.symbol != ion_sym})
frame_discharge_comp = Composition({el: comp_discharge[el]
for el in comp_discharge
if el.symbol != ion_sym})
#Data validation
#check that the ion is just a single element
if not working_ion_entry.composition.is_element:
raise ValueError("VoltagePair: The working ion specified must be "
"an element")
#check that at least one of the entries contains the working element
if not comp_charge.get_atomic_fraction(working_element) > 0 and \
not comp_discharge.get_atomic_fraction(working_element) > 0:
raise ValueError("VoltagePair: The working ion must be present in "
"one of the entries")
#check that the entries do not contain the same amount of the workin
#element
if comp_charge.get_atomic_fraction(working_element) == \
comp_discharge.get_atomic_fraction(working_element):
raise ValueError("VoltagePair: The working ion atomic percentage "
"cannot be the same in both the entries")
#check that the frameworks of the entries are equivalent
if not frame_charge_comp.reduced_formula == \
frame_discharge_comp.reduced_formula:
raise ValueError("VoltagePair: the specified entries must have the"
" same compositional framework")
#Initialize normalization factors, charged and discharged entries
valence_list = Element(ion_sym).oxidation_states
working_ion_valence = max(valence_list)
(self.framework,
norm_charge) = frame_charge_comp.get_reduced_composition_and_factor()
norm_discharge = \
frame_discharge_comp.get_reduced_composition_and_factor()[1]
self._working_ion_entry = working_ion_entry
#Initialize normalized properties
self._vol_charge = entry_charge.structure.volume / norm_charge
self._vol_discharge = entry_discharge.structure.volume / norm_discharge
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
self._mass_charge = comp_charge.weight / norm_charge
self._mass_discharge = comp_discharge.weight / norm_discharge
self._num_ions_transferred = \
(comp_discharge[working_element] / norm_discharge) \
- (comp_charge[working_element] / norm_charge)
self._voltage = \
(((entry_charge.energy / norm_charge) -
(entry_discharge.energy / norm_discharge)) / \
self._num_ions_transferred + working_ion_entry.energy_per_atom) / working_ion_valence
self._mAh = self._num_ions_transferred * Charge(1, "e").to("C") * \
Time(1, "s").to("h") * N_A * 1000 * working_ion_valence
#Step 4: add (optional) hull and muO2 data
self.decomp_e_charge = \
entry_charge.data.get("decomposition_energy", None)
self.decomp_e_discharge = \
entry_discharge.data.get("decomposition_energy", None)
self.muO2_charge = entry_charge.data.get("muO2", None)
self.muO2_discharge = entry_discharge.data.get("muO2", None)
self.entry_charge = entry_charge
self.entry_discharge = entry_discharge
self.normalization_charge = norm_charge
self.normalization_discharge = norm_discharge
self._frac_charge = comp_charge.get_atomic_fraction(working_element)
self._frac_discharge = \
comp_discharge.get_atomic_fraction(working_element)
@property
def frac_charge(self):
return self._frac_charge
@property
def frac_discharge(self):
return self._frac_discharge
@property
def voltage(self):
return self._voltage
@property
def mAh(self):
return self._mAh
@property
def mass_charge(self):
return self._mass_charge
@property
def mass_discharge(self):
return self._mass_discharge
@property
def vol_charge(self):
return self._vol_charge
@property
def vol_discharge(self):
return self._vol_discharge
@property
def working_ion_entry(self):
return self._working_ion_entry
def __repr__(self):
output = ["Insertion voltage pair with working ion {}"
.format(self._working_ion_entry.composition.reduced_formula),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"mass_charge = {}, mass_discharge = {}"
.format(self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}"
.format(self.vol_charge, self.vol_discharge),
"frac_charge = {}, frac_discharge = {}"
.format(self.frac_charge, self.frac_discharge)]
return "\n".join(output)
def __str__(self):
return self.__repr__()
| 2.75 | 3 |
python/GafferUI/ColorSwatchPlugValueWidget.py | ddesmond/gaffer | 561 | 1072 | ##########################################################################
#
# Copyright (c) 2013, <NAME>. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import imath
import Gaffer
import GafferUI
class ColorSwatchPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plugs, **kw ) :
self.__swatch = GafferUI.ColorSwatch()
GafferUI.PlugValueWidget.__init__( self, self.__swatch, plugs, **kw )
## \todo How do set maximum height with a public API?
self.__swatch._qtWidget().setMaximumHeight( 20 )
self._addPopupMenu( self.__swatch )
self.__swatch.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
self.__swatch.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ), scoped = False )
self.__swatch.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ), scoped = False )
self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False )
self._updateFromPlugs()
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.__swatch.setHighlighted( highlighted )
def _updateFromPlugs( self ) :
with self.getContext() :
value = _colorFromPlugs( self.getPlugs() )
self.__swatch.setColor( value )
def __buttonPress( self, widget, event ) :
if event.buttons == event.Buttons.Left :
return True
return False
def __dragBegin( self, widget, event ) :
GafferUI.Pointer.setCurrent( "rgba" )
return self.__swatch.getColor()
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.setCurrent( None )
def __buttonRelease( self, widget, event ) :
if event.button != event.Buttons.Left :
return False
if not self._editable() :
return False
_ColorPlugValueDialogue.acquire( self.getPlugs() )
return True
def _colorFromPlugs( plugs ) :
if not len( plugs ) :
return imath.Color4f( 0 )
# ColorSwatch only supports one colour, and doesn't have
# an "indeterminate" state, so when we have multiple plugs
# the best we can do is take an average.
return sum( p.getValue() for p in plugs ) / len( plugs )
## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a
# PlugValueDialogue base class to share some of the work with the dialogue made by the
# SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should
# actually be functionality of CompoundEditor?
class _ColorPlugValueDialogue( GafferUI.ColorChooserDialogue ) :
def __init__( self, plugs, parentWindow ) :
GafferUI.ColorChooserDialogue.__init__(
self,
color = _colorFromPlugs( plugs )
)
# we use these to decide which actions to merge into a single undo
self.__lastChangedReason = None
self.__mergeGroupId = 0
self.__colorChangedConnection = self.colorChooser().colorChangedSignal().connect( Gaffer.WeakMethod( self.__colorChanged ), scoped = False )
self.confirmButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__plugs = plugs
self.__initialValues = { p : p.getValue() for p in self.__plugs }
nodes = { p.node() for p in self.__plugs }
self.__plugSetConnections = [ n.plugSetSignal().connect( Gaffer.WeakMethod( self.__plugSet ), scoped = False ) for n in nodes ]
for node in nodes :
node.parentChangedSignal().connect( Gaffer.WeakMethod( self.__destroy ), scoped = False )
plug = next( iter( self.__plugs ) )
if len( self.__plugs ) == 1 :
self.setTitle( plug.relativeName( plug.ancestor( Gaffer.ScriptNode ) ) )
else :
self.setTitle( "{} plugs".format( len( self.__plugs ) ) )
self.__plugSet( plug )
parentWindow.addChildWindow( self, removeOnClose = True )
@classmethod
def acquire( cls, plugs ) :
plug = next( iter( plugs ) )
script = plug.node().scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
for window in scriptWindow.childWindows() :
if isinstance( window, cls ) and window.__plugs == plugs :
window.setVisible( True )
return window
window = _ColorPlugValueDialogue( plugs, scriptWindow )
window.setVisible( True )
return False
def __plugSet( self, plug ) :
if plug in self.__plugs :
with Gaffer.BlockedConnection( self.__colorChangedConnection ) :
self.colorChooser().setColor( _colorFromPlugs( self.__plugs ) )
def __colorChanged( self, colorChooser, reason ) :
if not GafferUI.ColorChooser.changesShouldBeMerged( self.__lastChangedReason, reason ) :
self.__mergeGroupId += 1
self.__lastChangedReason = reason
with Gaffer.UndoScope(
next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ),
mergeGroup = "ColorPlugValueDialogue%d%d" % ( id( self, ), self.__mergeGroupId )
) :
with Gaffer.BlockedConnection( self.__plugSetConnections ) :
for plug in self.__plugs :
plug.setValue( self.colorChooser().getColor() )
def __buttonClicked( self, button ) :
if button is self.cancelButton :
with Gaffer.UndoScope( next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ) ) :
for p, v in self.__initialValues.items() :
p.setValue( v )
self.parent().removeChild( self )
# Workaround for https://bugreports.qt-project.org/browse/QTBUG-26761.
assert( not self.visible() )
GafferUI.WidgetAlgo.keepUntilIdle( self )
def __destroy( self, *unused ) :
self.parent().removeChild( self )
| 0.933594 | 1 |
NewsPaperD7(final)/NewsPaper/News/migrations/0001_initial.py | GregTMJ/django-files | 1 | 1073 | <reponame>GregTMJ/django-files<gh_stars>1-10
# Generated by Django 3.2 on 2021-04-15 18:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(default='select category', max_length=255, unique=True)),
('subscriber', models.ManyToManyField(related_name='subscriber', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choosing', models.BooleanField(default=False)),
('time_in', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=255, unique=True)),
('text', models.TextField(max_length=255)),
('rating', models.FloatField(default=0.0)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='News.author', verbose_name='User')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='News.category')),
],
),
]
| 1.867188 | 2 |
osh/cmd_exec_test.py | rhencke/oil | 1 | 1074 | #!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_exec_test.py: Tests for cmd_exec.py
"""
import unittest
from core import test_lib
from core.meta import syntax_asdl, Id
from osh import state
suffix_op = syntax_asdl.suffix_op
osh_word = syntax_asdl.word
word_part = syntax_asdl.word_part
def InitEvaluator():
word_ev = test_lib.MakeTestEvaluator()
state.SetLocalString(word_ev.mem, 'x', 'xxx')
state.SetLocalString(word_ev.mem, 'y', 'yyy')
return word_ev
class ExpansionTest(unittest.TestCase):
def testBraceExpand(self):
arena = test_lib.MakeArena('<cmd_exec_test.py>')
c_parser = test_lib.InitCommandParser('echo _{a,b}_', arena=arena)
node = c_parser._ParseCommandLine()
print(node)
ex = test_lib.InitExecutor(arena=arena)
#print(ex.Execute(node))
#print(ex._ExpandWords(node.words))
class VarOpTest(unittest.TestCase):
def testVarOps(self):
ev = InitEvaluator() # initializes x=xxx and y=yyy
unset_sub = word_part.BracedVarSub(syntax_asdl.token(Id.VSub_Name, 'unset'))
part_vals = []
ev._EvalWordPart(unset_sub, part_vals)
print(part_vals)
set_sub = word_part.BracedVarSub(syntax_asdl.token(Id.VSub_Name, 'x'))
part_vals = []
ev._EvalWordPart(set_sub, part_vals)
print(part_vals)
# Now add some ops
part = word_part.LiteralPart(syntax_asdl.token(Id.Lit_Chars, 'default'))
arg_word = osh_word.CompoundWord([part])
test_op = suffix_op.StringUnary(Id.VTest_ColonHyphen, arg_word)
unset_sub.suffix_op = test_op
set_sub.suffix_op = test_op
part_vals = []
ev._EvalWordPart(unset_sub, part_vals)
print(part_vals)
part_vals = []
ev._EvalWordPart(set_sub, part_vals)
print(part_vals)
if __name__ == '__main__':
unittest.main()
| 2.34375 | 2 |
blitz_api/migrations/0020_auto_20190529_1200.py | MelanieFJNR/Blitz-API | 3 | 1075 | # Generated by Django 2.0.8 on 2019-05-29 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blitz_api', '0019_merge_20190524_1719'),
]
operations = [
migrations.AlterField(
model_name='exportmedia',
name='file',
field=models.FileField(upload_to='export/%Y/%m/', verbose_name='file'),
),
]
| 1.375 | 1 |
archiveis/__init__.py | palewire/archiveis | 6 | 1076 | #!/usr/bin/env python
from .api import capture
__version__ = "0.0.7"
__all__ = ("capture",)
| 0.976563 | 1 |
temp/discrete_a2c_agent.py | linklab/link_rl | 0 | 1077 | <filename>temp/discrete_a2c_agent.py
import numpy as np
import torch
import torch.nn.functional as F
from codes.d_agents.a0_base_agent import float32_preprocessor
from codes.d_agents.on_policy.on_policy_agent import OnPolicyAgent
from codes.e_utils import rl_utils, replay_buffer
from codes.d_agents.actions import ProbabilityActionSelector
from codes.e_utils.names import DeepLearningModelName, AgentMode
class AgentDiscreteA2C(OnPolicyAgent):
"""
"""
def __init__(
self, worker_id, input_shape, num_outputs,
train_action_selector, test_and_play_action_selector, params, device
):
assert isinstance(train_action_selector, ProbabilityActionSelector)
assert isinstance(test_and_play_action_selector, ProbabilityActionSelector)
assert params.DEEP_LEARNING_MODEL in [
DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP,
DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN
]
super(AgentDiscreteA2C, self).__init__(train_action_selector, test_and_play_action_selector, params, device)
self.__name__ = "AgentDiscreteA2C"
self.worker_id = worker_id
self.model = rl_utils.get_rl_model(
worker_id=worker_id, input_shape=input_shape, num_outputs=num_outputs, params=params, device=self.device
)
if self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP:
self.actor_optimizer = rl_utils.get_optimizer(
parameters=self.model.base.actor.parameters(),
learning_rate=self.params.ACTOR_LEARNING_RATE,
params=params
)
self.critic_optimizer = rl_utils.get_optimizer(
parameters=self.model.base.critic.parameters(),
learning_rate=self.params.LEARNING_RATE,
params=params
)
elif self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN:
self.optimizer = rl_utils.get_optimizer(
parameters=list(self.model.base.common_conv.parameters()) + list(self.model.base.critic_fc.parameters()),
learning_rate=self.params.LEARNING_RATE,
params=params
)
else:
raise ValueError()
self.buffer = replay_buffer.ExperienceReplayBuffer(
experience_source=None, buffer_size=self.params.BATCH_SIZE
)
def __call__(self, states, critics=None):
if not isinstance(states, torch.FloatTensor):
states = float32_preprocessor(states).to(self.device)
logits_v = self.model.base.forward_actor(states)
probs_v = F.softmax(logits_v, dim=1)
probs = probs_v.data.cpu().numpy()
if self.agent_mode == AgentMode.TRAIN:
actions = np.array(self.train_action_selector(probs))
else:
actions = np.array(self.test_and_play_action_selector(probs))
critics = torch.zeros(size=probs_v.size())
return actions, critics
def train(self, step_idx):
# Lucky Episode에서 얻어낸 batch를 통해 학습할 때와, Unlucky Episode에서 얻어낸 batch를 통해 학습할 때마다 NN의 파라미터들이
# 서로 다른 방향으로 반복적으로 휩쓸려가듯이 학습이 됨 --> Gradients의 Variance가 매우 큼
batch = self.buffer.sample(batch_size=None)
# states_v.shape: (32, 3)
# actions_v.shape: (32, 1)
# target_action_values_v.shape: (32,)
states_v, actions_v, target_action_values_v = self.unpack_batch_for_actor_critic(
batch=batch, target_model=self.model, params=self.params
)
logits_v, value_v = self.model(states_v)
# Critic Optimization
self.critic_optimizer.zero_grad()
loss_critic_v = F.mse_loss(input=value_v.squeeze(-1), target=target_action_values_v)
loss_critic_v.backward(retain_graph=True)
#nn_utils.clip_grad_norm_(self.model.base.critic.parameters(), self.params.CLIP_GRAD)
self.critic_optimizer.step()
# Actor Optimization
self.actor_optimizer.zero_grad()
# advantage_v.shape: (32,)
advantage_v = target_action_values_v - value_v.squeeze(-1).detach()
log_pi_v = F.log_softmax(logits_v, dim=1)
log_pi_action_v = log_pi_v.gather(dim=1, index=actions_v.unsqueeze(-1)).squeeze(-1)
reinforced_log_pi_action_v = advantage_v * log_pi_action_v
#print(actions_v.size(), advantage_v.size(), log_pi_v.size(), log_pi_action_v.size(), reinforced_log_pi_action_v.size())
loss_actor_v = -1.0 * reinforced_log_pi_action_v.mean()
prob_v = F.softmax(logits_v, dim=1)
entropy_v = -1.0 * (prob_v * log_pi_v).sum(dim=1).mean()
loss_entropy_v = -1.0 * self.params.ENTROPY_LOSS_WEIGHT * entropy_v
# loss_actor_v를 작아지도록 만듦 --> log_pi_v.mean()가 커지도록 만듦
# loss_entropy_v를 작아지도록 만듦 --> entropy_v가 커지도록 만듦
loss_actor_and_entropy_v = loss_actor_v + loss_entropy_v
loss_actor_and_entropy_v.backward()
#nn_utils.clip_grad_norm_(self.model.base.actor.parameters(), self.params.CLIP_GRAD)
self.actor_optimizer.step()
gradients = self.model.get_gradients_for_current_parameters()
return gradients, loss_critic_v.item(), loss_actor_v.item() * -1.0 | 2.28125 | 2 |
edmundbotadder/cogs/webhook.py | thebeanogamer/edmund-botadder | 0 | 1078 | <filename>edmundbotadder/cogs/webhook.py
from discord.ext.commands import Bot, Cog
class Webhook(Cog):
"""
Webhook functionality
"""
def __init__(self, bot: Bot):
self.bot = bot
def setup(bot):
bot.add_cog(Webhook(bot)) | 2.3125 | 2 |
apps/core/forms.py | allexvissoci/djangoecommerce | 0 | 1079 | <reponame>allexvissoci/djangoecommerce
from django import forms
from django.core.mail import send_mail
from django.conf import settings
class ContactForm(forms.Form):
name = forms.CharField(label='Nome', required=True)
email = forms.EmailField(label='E-mail')
message = forms.CharField(label='Mensagem', widget=forms.Textarea(),
required=True)
def send_mail(self):
name = self.cleaned_data['name']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
message = 'Nome: {0}\nE-mail:{1}\n{2}'.format(name, email, message)
send_mail(
'Contato Django E-commerce',
message,
settings.DEFAULT_FROM_EMAIL,
[settings.DEFAULT_FROM_EMAIL]
)
| 1.9375 | 2 |
Fchat/Gui/AddFriendWidget.py | jamesaxl/FreeSnake | 2 | 1080 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, Gdk
class AddFriendWidget(Gtk.Box):
def __init__(self, main_window, fchat_prv, friend_list):
Gtk.Box.__init__(self, spacing=7, orientation = Gtk.Orientation.VERTICAL)
self.fchat_prv = fchat_prv
self.main_window = main_window
self.friend_list = friend_list
self.fchat_prv.add_friend_gui = self
self.generate_keys_bt = Gtk.Button('Generate Key')
self.generate_keys_bt.connect('clicked', self.on_generate_keys)
self.save_bt = Gtk.Button('Save')
self.save_bt.connect('clicked', self.on_save)
self.cancel_bt = Gtk.Button('Cancel')
self.cancel_bt.connect('clicked', self.on_cancel)
self.close_bt = Gtk.Button('Close')
self.close_bt.connect('clicked', self.on_close)
self.owner_info = Gtk.Entry()
self.owner_info.set_sensitive(False)
self.copy_clipboard_bt = Gtk.Button(label='Copy to clipboard')
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.copy_clipboard_bt.connect('clicked', self.on_copy_clipboard)
h_owner = Gtk.Box(spacing=5)
h_owner.pack_start(self.owner_info, True, True, 1)
h_owner.pack_start(self.copy_clipboard_bt, False, False, 1)
self.friend_info = Gtk.Entry()
self.friend_info.set_placeholder_text('Key of your friend')
self.spinner = Gtk.Spinner()
self.pack_start(h_owner, True, False, 7)
self.pack_start(self.friend_info, True, False, 7)
self.pack_start(self.spinner, True, False, 7)
h_bt = Gtk.Box()
h_bt.pack_start(self.generate_keys_bt, True, False, 7)
h_bt.pack_start(self.save_bt, True, False, 7)
h_bt.pack_start(self.cancel_bt, True, False, 7)
h_bt.pack_start(self.close_bt, True, False, 7)
self.pack_start(h_bt, True, False, 7)
self.job = None
def on_generate_keys(self, button):
self.pub, self.prv, self.pub_info_key, self.job = self.fchat_prv.generate_key_for_friend()
self.owner_info.set_text(self.pub_info_key)
self.on_generate_keys_start()
def on_generate_keys_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_generate_keys_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_generate_keys_faild(self, text):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_cancel(self, button):
if self.job:
self.job.remove_from_queue_when_finish()
def on_close(self, button):
self.main_window.application.back_main_window_or_friend_list()
def on_save(self, button):
if self.owner_info.get_text() == '':
self.msg_info('You should generate a key that contains your info')
return
if self.friend_info.get_text() == '':
self.msg_info('Friend info is required')
return
self.fchat_prv.add_friend(self.pub, self.prv, self.friend_info.get_text())
self.on_save_start()
def on_save_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_save_start_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
self.friend_list.sync_friends_list()
def on_save_start_duplicate(self, text):
self.msg_info(text)
def on_save_start_faild(self):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Error adding friend please try later")
dialog.run()
dialog.destroy()
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_copy_clipboard(self, button):
self.clipboard.set_text(self.owner_info.get_text(), -1)
def msg_info(self, text):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "Info")
dialog.format_secondary_text(text)
dialog.run()
dialog.destroy()
| 2.34375 | 2 |
python01/game.py | liyan2013/hogwarts | 0 | 1081 | <filename>python01/game.py
import random
def game():
# 我的血量
my_hp = 1000
# 敌人的血量
enemy_hp = 1000
while True:
# 我受到随机的攻击,减少血量
my_hp = my_hp - random.randint(0, 50)
# 敌人收到随机的攻击,减少血量
enemy_hp = enemy_hp - random.randint(0, 50)
if my_hp <= 0:
# 如果我此时的血量<=0,则敌人赢了
print("敌人赢了")
# 退出循环
break
elif enemy_hp <= 0:
# 如果敌人此时的血量<=0,则我赢了
print("我赢了")
# 跳出循环
break
if __name__ == '__main__':
game()
| 3.65625 | 4 |
petstore/api/api_response.py | andrii-grytsenko/io.swagger.petstore3.testing | 0 | 1082 | from enum import Enum
class ApiResponseType(Enum):
error = "Error"
warning = "Warning"
info = "Info"
ok = "OK"
too_busy = "Too busy"
class ApiResponse:
def __init__(self, code: int, response_type: ApiResponseType, message):
self.code = code
self.type = response_type
self.message = message
class ApiResponseError(Exception):
def __init__(self, response: ApiResponse, message="Api exception"):
self.response = response
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message}\n{self.response.code}: [{self.response.type}] {self.response.message}"
| 3.359375 | 3 |
test/integration/component/test_browse_templates2.py | ycyun/ablestack-cloud | 1,131 | 1083 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.codes import PASS, FAILED, SUCCESS, XEN_SERVER
from marvin.sshClient import SshClient
import requests
requests.packages.urllib3.disable_warnings()
import random
import string
import telnetlib
import os
import urllib.request, urllib.parse, urllib.error
import time
import tempfile
_multiprocess_shared_ = True
class TestBrowseUploadTemplate(cloudstackTestCase):
"""
Tests for browser based upload template feature. Once all issues in test_browse_templates.py are fixed, this should be merged back
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestBrowseUploadTemplate, cls).getClsTestClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls._cleanup = []
cls.cleanup = []
hosts = list_hosts(
cls.apiclient,
type="Routing"
)
if hosts is None:
cls.SkipTest(
"There are no hypervisor's available. Check list hosts response")
cls.uploadtemplateformat = "VHD"
cls.templatename = "test"
cls.templatehypervisor = "XenServer"
cls.templateostypeid = 142
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup = [
cls.account
]
def waitForSystemVMAgent(self, vmname):
timeout = self.testdata["timeout"]
while True:
list_host_response = list_hosts(
self.apiclient,
name=vmname
)
if list_host_response and list_host_response[0].state == 'Up':
break
if timeout == 0:
raise Exception("Timed out waiting for SSVM agent to be Up")
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
def destroy_ssvm(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
ssvm_response = list_ssvm_response[0]
old_name = ssvm_response.name
self.debug("Destroying SSVM: %s" % ssvm_response.id)
cmd = destroySystemVm.destroySystemVmCmd()
cmd.id = ssvm_response.id
self.apiclient.destroySystemVm(cmd)
timeout = self.testdata["timeout"]
while True:
list_ssvm_response = list_ssvms(
self.apiclient,
zoneid=self.zone.id,
systemvmtype='secondarystoragevm'
)
if isinstance(list_ssvm_response, list):
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
raise Exception("List SSVM call failed!")
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
ssvm_response = list_ssvm_response[0]
# Verify Name, Public IP, Private IP and Link local IP
# for newly created SSVM
self.assertNotEqual(
ssvm_response.name,
old_name,
"Check SSVM new name with name of destroyed SSVM"
)
self.assertEqual(
hasattr(ssvm_response, 'privateip'),
True,
"Check whether SSVM has private IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'linklocalip'),
True,
"Check whether SSVM has link local IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'publicip'),
True,
"Check whether SSVM has public IP field"
)
# Wait for the agent to be up
self.waitForSystemVMAgent(ssvm_response.name)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_browser_upload_template_incomplete(self):
"""
Test browser based incomplete template upload, followed by SSVM destroy. Template should go to UploadAbandoned state and get cleaned up.
"""
try:
self.debug("========================= Test browser based incomplete template upload ========================")
#Only register template, without uploading
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadtemplateformat
cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
cmd.displaytext=cmd.name
cmd.hypervisor=self.templatehypervisor
cmd.ostypeid=self.templateostypeid
template_response=self.apiclient.getUploadParamsForTemplate(cmd)
#Destroy SSVM, and wait for new one to start
self.destroy_ssvm()
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
#Verify that the template is cleaned up as part of sync-up during new SSVM start
list_template_response=Template.list(
self.apiclient,
id=template_response.id,
templatefilter="all",
zoneid=self.zone.id)
self.assertEqual(list_template_response, None, "Template is not cleaned up, some issue with template sync-up")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
@classmethod
def tearDownClass(self):
try:
self.apiclient = super(TestBrowseUploadTemplate, self).getClsTestClient().getApiClient()
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
| 1.648438 | 2 |
tests/components/ozw/test_websocket_api.py | pcaston/core | 1 | 1084 | """Test OpenZWave Websocket API."""
from unittest.mock import patch
from openzwavemqtt.const import (
ATTR_CODE_SLOT,
ATTR_LABEL,
ATTR_OPTIONS,
ATTR_POSITION,
ATTR_VALUE,
ValueType,
)
from openpeerpower.components.ozw.const import ATTR_CONFIG_PARAMETER
from openpeerpower.components.ozw.lock import ATTR_USERCODE
from openpeerpower.components.ozw.websocket_api import (
ATTR_IS_AWAKE,
ATTR_IS_BEAMING,
ATTR_IS_FAILED,
ATTR_IS_FLIRS,
ATTR_IS_ROUTING,
ATTR_IS_SECURITYV1,
ATTR_IS_ZWAVE_PLUS,
ATTR_NEIGHBORS,
ATTR_NODE_BASIC_STRING,
ATTR_NODE_BAUD_RATE,
ATTR_NODE_GENERIC_STRING,
ATTR_NODE_QUERY_STAGE,
ATTR_NODE_SPECIFIC_STRING,
ID,
NODE_ID,
OZW_INSTANCE,
PARAMETER,
SCHEMA,
TYPE,
VALUE,
)
from openpeerpower.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
)
from .common import MQTTMessage, setup_ozw
async def test_websocket_api(opp, generic_data, opp_ws_client):
"""Test the ozw websocket api."""
await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
# Test instance list
await client.send_json({ID: 4, TYPE: "ozw/get_instances"})
msg = await client.receive_json()
assert len(msg["result"]) == 1
result = msg["result"][0]
assert result[OZW_INSTANCE] == 1
assert result["Status"] == "driverAllNodesQueried"
assert result["OpenZWave_Version"] == "1.6.1008"
# Test network status
await client.send_json({ID: 5, TYPE: "ozw/network_status"})
msg = await client.receive_json()
result = msg["result"]
assert result["Status"] == "driverAllNodesQueried"
assert result[OZW_INSTANCE] == 1
# Test node status
await client.send_json({ID: 6, TYPE: "ozw/node_status", NODE_ID: 32})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 32
assert result[ATTR_NODE_QUERY_STAGE] == "Complete"
assert result[ATTR_IS_ZWAVE_PLUS]
assert result[ATTR_IS_AWAKE]
assert not result[ATTR_IS_FAILED]
assert result[ATTR_NODE_BAUD_RATE] == 100000
assert result[ATTR_IS_BEAMING]
assert not result[ATTR_IS_FLIRS]
assert result[ATTR_IS_ROUTING]
assert not result[ATTR_IS_SECURITYV1]
assert result[ATTR_NODE_BASIC_STRING] == "Routing Slave"
assert result[ATTR_NODE_GENERIC_STRING] == "Binary Switch"
assert result[ATTR_NODE_SPECIFIC_STRING] == "Binary Power Switch"
assert result[ATTR_NEIGHBORS] == [1, 33, 36, 37, 39]
await client.send_json({ID: 7, TYPE: "ozw/node_status", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test node statistics
await client.send_json({ID: 8, TYPE: "ozw/node_statistics", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 39
assert result["send_count"] == 57
assert result["sent_failed"] == 0
assert result["retries"] == 1
assert result["last_request_rtt"] == 26
assert result["last_response_rtt"] == 38
assert result["average_request_rtt"] == 29
assert result["average_response_rtt"] == 37
assert result["received_packets"] == 3594
assert result["received_dup_packets"] == 12
assert result["received_unsolicited"] == 3546
# Test node metadata
await client.send_json({ID: 9, TYPE: "ozw/node_metadata", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result["metadata"]["ProductPic"] == "images/aeotec/zwa002.png"
await client.send_json({ID: 10, TYPE: "ozw/node_metadata", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test network statistics
await client.send_json({ID: 11, TYPE: "ozw/network_statistics"})
msg = await client.receive_json()
result = msg["result"]
assert result["readCnt"] == 92220
assert result[OZW_INSTANCE] == 1
assert result["node_count"] == 5
# Test get nodes
await client.send_json({ID: 12, TYPE: "ozw/get_nodes"})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 5
assert result[2][ATTR_IS_AWAKE]
assert not result[1][ATTR_IS_FAILED]
# Test get config parameters
await client.send_json({ID: 13, TYPE: "ozw/get_config_parameters", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 8
for config_param in result:
assert config_param["type"] in (
ValueType.LIST.value,
ValueType.BOOL.value,
ValueType.INT.value,
ValueType.BYTE.value,
ValueType.SHORT.value,
ValueType.BITSET.value,
)
# Test set config parameter
config_param = result[0]
current_val = config_param[ATTR_VALUE]
new_val = next(
option[0]
for option in config_param[SCHEMA][0][ATTR_OPTIONS]
if option[0] != current_val
)
new_label = next(
option[1]
for option in config_param[SCHEMA][0][ATTR_OPTIONS]
if option[1] != current_val and option[0] != new_val
)
await client.send_json(
{
ID: 14,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_val,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 15,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_label,
}
)
msg = await client.receive_json()
assert msg["success"]
# Test OZW Instance not found error
await client.send_json(
{ID: 16, TYPE: "ozw/get_config_parameters", OZW_INSTANCE: 999, NODE_ID: 1}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test OZW Node not found error
await client.send_json(
{
ID: 18,
TYPE: "ozw/set_config_parameter",
NODE_ID: 999,
PARAMETER: 0,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test parameter not found
await client.send_json(
{
ID: 19,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 45,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test list value not found
await client.send_json(
{
ID: 20,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test value type invalid
await client.send_json(
{
ID: 21,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: 0,
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_SUPPORTED
# Test invalid bitset format
await client.send_json(
{
ID: 22,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True, ATTR_LABEL: "test"},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_INVALID_FORMAT
# Test valid bitset format passes validation
await client.send_json(
{
ID: 23,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 10000,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
async def test_ws_locks(opp, lock_data, opp_ws_client):
"""Test lock websocket apis."""
await setup_ozw(opp, fixture=lock_data)
client = await opp_ws_client(opp)
await client.send_json(
{
ID: 1,
TYPE: "ozw/get_code_slots",
NODE_ID: 10,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 2,
TYPE: "ozw/set_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
ATTR_USERCODE: "1234",
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 3,
TYPE: "ozw/clear_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
}
)
msg = await client.receive_json()
assert msg["success"]
async def test_refresh_node(opp, generic_data, sent_messages, opp_ws_client):
"""Test the ozw refresh node api."""
receive_message = await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
msg = await client.receive_json()
assert len(sent_messages) == 1
assert msg["success"]
# Receive a mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "initializing"},
)
message.encode()
receive_message(message)
# Verify we got expected data on the websocket
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "initializing"
# Send another mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "versions"},
)
message.encode()
receive_message(message)
# Send a mock status update for a different node
message = MQTTMessage(
topic="OpenZWave/1/node/35/",
payload={"NodeID": 35, "NodeQueryStage": "fake_shouldnt_be_received"},
)
message.encode()
receive_message(message)
# Verify we received the message for node 39 but not for node 35
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "versions"
async def test_refresh_node_unsubscribe(opp, generic_data, opp_ws_client):
"""Test unsubscribing the ozw refresh node api."""
await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
with patch("openzwavemqtt.OZWOptions.listen") as mock_listen:
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
await client.receive_json()
# Send the unsubscribe command
await client.send_json({ID: 10, TYPE: "unsubscribe_events", "subscription": 9})
await client.receive_json()
assert mock_listen.return_value.called
| 2.015625 | 2 |
tests/test_formatters.py | samueljacques-qc/notification-utils | 0 | 1085 | <filename>tests/test_formatters.py
import pytest
from flask import Markup
from notifications_utils.formatters import (
unlink_govuk_escaped,
notify_email_markdown,
notify_letter_preview_markdown,
notify_plain_text_email_markdown,
sms_encode,
formatted_list,
strip_dvla_markup,
strip_pipes,
escape_html,
remove_whitespace_before_punctuation,
make_quotes_smart,
replace_hyphens_with_en_dashes,
tweak_dvla_list_markup,
nl2li,
strip_whitespace,
strip_and_remove_obscure_whitespace,
remove_smart_quotes_from_email_addresses,
strip_unsupported_characters,
normalise_whitespace
)
from notifications_utils.template import (
HTMLEmailTemplate,
PlainTextEmailTemplate,
SMSMessageTemplate,
SMSPreviewTemplate
)
@pytest.mark.parametrize(
"url", [
"http://example.com",
"http://www.gov.uk/",
"https://www.gov.uk/",
"http://service.gov.uk",
"http://service.gov.uk/blah.ext?q=a%20b%20c&order=desc#fragment",
pytest.param("http://service.gov.uk/blah.ext?q=one two three", marks=pytest.mark.xfail),
]
)
def test_makes_links_out_of_URLs(url):
link = '<a style="word-wrap: break-word; color: #005ea5;" href="{}">{}</a>'.format(url, url)
assert (notify_email_markdown(url) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(link))
@pytest.mark.parametrize('input, output', [
(
(
'this is some text with a link http://example.com in the middle'
),
(
'this is some text with a link '
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">http://example.com</a>'
' in the middle'
),
),
(
(
'this link is in brackets (http://example.com)'
),
(
'this link is in brackets '
'(<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">http://example.com</a>)'
),
)
])
def test_makes_links_out_of_URLs_in_context(input, output):
assert notify_email_markdown(input) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(output)
@pytest.mark.parametrize(
"url", [
"example.com",
"www.example.com",
"ftp://example.com",
"<EMAIL>",
"mailto:<EMAIL>",
"<a href=\"https://example.com\">Example</a>",
]
)
def test_doesnt_make_links_out_of_invalid_urls(url):
assert notify_email_markdown(url) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(url)
def test_handles_placeholders_in_urls():
assert notify_email_markdown(
"http://example.com/?token=<span class='placeholder'>((token))</span>&key=1"
) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com/?token=">'
'http://example.com/?token='
'</a>'
'<span class=\'placeholder\'>((token))</span>&key=1'
'</p>'
)
@pytest.mark.parametrize(
"url, expected_html, expected_html_in_template", [
(
"""https://example.com"onclick="alert('hi')""",
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22onclick=%22alert%28%27hi">https://example.com"onclick="alert('hi</a>')""", # noqa
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22onclick=%22alert%28%27hi">https://example.com"onclick="alert('hi</a>‘)""", # noqa
),
(
"""https://example.com"style='text-decoration:blink'""",
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22style=%27text-decoration:blink">https://example.com"style='text-decoration:blink</a>'""", # noqa
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22style=%27text-decoration:blink">https://example.com"style='text-decoration:blink</a>’""", # noqa
),
]
)
def test_URLs_get_escaped(url, expected_html, expected_html_in_template):
assert notify_email_markdown(url) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(expected_html)
assert expected_html_in_template in str(HTMLEmailTemplate({'content': url, 'subject': ''}))
def test_HTML_template_has_URLs_replaced_with_links():
assert (
'<a style="word-wrap: break-word; color: #005ea5;" href="https://service.example.com/accept_invite/a1b2c3d4">'
'https://service.example.com/accept_invite/a1b2c3d4'
'</a>'
) in str(HTMLEmailTemplate({'content': (
'You’ve been invited to a service. Click this link:\n'
'https://service.example.com/accept_invite/a1b2c3d4\n'
'\n'
'Thanks\n'
), 'subject': ''}))
@pytest.mark.parametrize('markdown_function, expected_output', [
(notify_email_markdown, (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com">'
'https://example.com'
'</a>'
'</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'Next paragraph'
'</p>'
)),
(notify_plain_text_email_markdown, (
'\n'
'\nhttps://example.com'
'\n'
'\nNext paragraph'
)),
])
def test_preserves_whitespace_when_making_links(
markdown_function, expected_output
):
assert markdown_function(
'https://example.com\n'
'\n'
'Next paragraph'
) == expected_output
@pytest.mark.parametrize(
"template_content,expected", [
("gov.uk", u"gov.\u200Buk"),
("GOV.UK", u"GOV.\u200BUK"),
("Gov.uk", u"Gov.\u200Buk"),
("https://gov.uk", "https://gov.uk"),
("https://www.gov.uk", "https://www.gov.uk"),
("www.gov.uk", "www.gov.uk"),
("gov.uk/register-to-vote", "gov.uk/register-to-vote"),
("gov.uk?q=", "gov.uk?q=")
]
)
def test_escaping_govuk_in_email_templates(template_content, expected):
assert unlink_govuk_escaped(template_content) == expected
assert expected in str(PlainTextEmailTemplate({'content': template_content, 'subject': ''}))
assert expected in str(HTMLEmailTemplate({'content': template_content, 'subject': ''}))
@pytest.mark.parametrize(
"subject,expected", [
("bonjour | hi", "bonjour | hi"),
("bonjour .", "bonjour."),
('double -- dash', 'double \u2013 dash'),
]
)
def test_subject_is_cleaned_up(subject, expected):
assert expected == HTMLEmailTemplate({'content': '', 'subject': subject}).subject
@pytest.mark.parametrize(
"prefix, body, expected", [
("a", "b", "a: b"),
(None, "b", "b"),
]
)
def test_sms_message_adds_prefix(prefix, body, expected):
template = SMSMessageTemplate({'content': body})
template.prefix = prefix
template.sender = None
assert str(template) == expected
def test_sms_preview_adds_newlines():
template = SMSPreviewTemplate({'content': """
the
quick
brown fox
"""})
template.prefix = None
template.sender = None
assert '<br>' in str(template)
@pytest.mark.parametrize(
'markdown_function, expected',
(
[
notify_letter_preview_markdown,
'print("hello")'
],
[
notify_email_markdown,
'print("hello")'
],
[
notify_plain_text_email_markdown,
'print("hello")'
],
)
)
def test_block_code(markdown_function, expected):
assert markdown_function('```\nprint("hello")\n```') == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>inset text</p>'
)
],
[
notify_email_markdown,
(
'<blockquote '
'style="Margin: 0 0 20px 0; border-left: 10px solid #BFC1C3;'
'padding: 15px 0 0.1px 15px; font-size: 19px; line-height: 25px;'
'">'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">inset text</p>'
'</blockquote>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\ninset text'
),
],
))
def test_block_quote(markdown_function, expected):
assert markdown_function('^ inset text') == expected
@pytest.mark.parametrize('heading', (
'# heading',
'#heading',
))
@pytest.mark.parametrize(
'markdown_function, expected',
(
[
notify_letter_preview_markdown,
'<h2>heading</h2>\n'
],
[
notify_email_markdown,
(
'<h2 style="Margin: 0 0 20px 0; padding: 0; font-size: 27px; '
'line-height: 35px; font-weight: bold; color: #0B0C0C;">'
'heading'
'</h2>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\n'
'\nheading'
'\n-----------------------------------------------------------------'
),
],
)
)
def test_level_1_header(markdown_function, heading, expected):
assert markdown_function(heading) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>inset text</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">inset text</p>'
],
[
notify_plain_text_email_markdown,
(
'\n'
'\ninset text'
),
],
))
def test_level_2_header(markdown_function, expected):
assert markdown_function('## inset text') == (expected)
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>a</p>'
'<div class="page-break"> </div>'
'<p>b</p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">a</p>'
'<hr style="border: 0; height: 1px; background: #BFC1C3; Margin: 30px 0 30px 0;">'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">b</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\na'
'\n'
'\n================================================================='
'\n'
'\nb'
),
],
))
def test_hrule(markdown_function, expected):
assert markdown_function('a\n\n***\n\nb') == expected
assert markdown_function('a\n\n---\n\nb') == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<ol>\n'
'<li>one</li>\n'
'<li>two</li>\n'
'<li>three</li>\n'
'</ol>\n'
)
],
[
notify_email_markdown,
(
'<table role="presentation" style="padding: 0 0 20px 0;">'
'<tr>'
'<td style="font-family: Helvetica, Arial, sans-serif;">'
'<ol style="Margin: 0 0 0 20px; padding: 0; list-style-type: decimal;">'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">one</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">two</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">three</li>'
'</ol>'
'</td>'
'</tr>'
'</table>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\n1. one'
'\n2. two'
'\n3. three'
),
],
))
def test_ordered_list(markdown_function, expected):
assert markdown_function(
'1. one\n'
'2. two\n'
'3. three\n'
) == expected
assert markdown_function(
'1.one\n'
'2.two\n'
'3.three\n'
) == expected
@pytest.mark.parametrize('markdown', (
( # no space
'*one\n'
'*two\n'
'*three\n'
),
( # single space
'* one\n'
'* two\n'
'* three\n'
),
( # two spaces
'* one\n'
'* two\n'
'* three\n'
),
( # tab
'* one\n'
'* two\n'
'* three\n'
),
( # dash as bullet
'- one\n'
'- two\n'
'- three\n'
),
pytest.param(( # plus as bullet
'+ one\n'
'+ two\n'
'+ three\n'
), marks=pytest.mark.xfail(raises=AssertionError)),
( # bullet as bullet
'• one\n'
'• two\n'
'• three\n'
),
))
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<ul>\n'
'<li>one</li>\n'
'<li>two</li>\n'
'<li>three</li>\n'
'</ul>\n'
)
],
[
notify_email_markdown,
(
'<table role="presentation" style="padding: 0 0 20px 0;">'
'<tr>'
'<td style="font-family: Helvetica, Arial, sans-serif;">'
'<ul style="Margin: 0 0 0 20px; padding: 0; list-style-type: disc;">'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">one</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">two</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">three</li>'
'</ul>'
'</td>'
'</tr>'
'</table>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\n• one'
'\n• two'
'\n• three'
),
],
))
def test_unordered_list(markdown, markdown_function, expected):
assert markdown_function(markdown) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>+ one</p><p>+ two</p><p>+ three</p>',
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">+ one</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">+ two</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">+ three</p>'
),
],
[
notify_plain_text_email_markdown,
(
'\n\n+ one'
'\n\n+ two'
'\n\n+ three'
),
],
))
def test_pluses_dont_render_as_lists(markdown_function, expected):
assert markdown_function(
'+ one\n'
'+ two\n'
'+ three\n'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>'
'line one<br>'
'line two'
'</p>'
'<p>'
'new paragraph'
'</p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">line one<br />'
'line two</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">new paragraph</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nline one'
'\nline two'
'\n'
'\nnew paragraph'
),
],
))
def test_paragraphs(markdown_function, expected):
assert markdown_function(
'line one\n'
'line two\n'
'\n'
'new paragraph'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>before</p>'
'<p>after</p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">before</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">after</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nbefore'
'\n'
'\nafter'
),
],
))
def test_multiple_newlines_get_truncated(markdown_function, expected):
assert markdown_function(
'before\n\n\n\n\n\nafter'
) == expected
@pytest.mark.parametrize('markdown_function', (
notify_letter_preview_markdown, notify_email_markdown, notify_plain_text_email_markdown
))
def test_table(markdown_function):
assert markdown_function(
'col | col\n'
'----|----\n'
'val | val\n'
) == (
''
)
@pytest.mark.parametrize('markdown_function, link, expected', (
[
notify_letter_preview_markdown,
'http://example.com',
'<p><strong>example.com</strong></p>'
],
[
notify_email_markdown,
'http://example.com',
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">http://example.com</a>'
'</p>'
)
],
[
notify_email_markdown,
"""https://example.com"onclick="alert('hi')""",
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22onclick=%22alert%28%27hi">'
'https://example.com"onclick="alert(\'hi'
'</a>\')'
'</p>'
)
],
[
notify_plain_text_email_markdown,
'http://example.com',
(
'\n'
'\nhttp://example.com'
),
],
))
def test_autolink(markdown_function, link, expected):
assert markdown_function(link) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>variable called thing</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">variable called thing</p>'
],
[
notify_plain_text_email_markdown,
'\n\nvariable called thing',
],
))
def test_codespan(markdown_function, expected):
assert markdown_function(
'variable called `thing`'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>something important</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">something **important**</p>'
],
[
notify_plain_text_email_markdown,
'\n\nsomething **important**',
],
))
def test_double_emphasis(markdown_function, expected):
assert markdown_function(
'something **important**'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>something important</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">something *important*</p>'
],
[
notify_plain_text_email_markdown,
'\n\nsomething *important*',
],
))
def test_emphasis(markdown_function, expected):
assert markdown_function(
'something *important*'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">foo ****** bar</p>'
],
[
notify_plain_text_email_markdown,
'\n\nfoo ****** bar',
],
))
def test_nested_emphasis(markdown_function, expected):
assert markdown_function(
'foo ****** bar'
) == expected
@pytest.mark.parametrize('markdown_function', (
notify_letter_preview_markdown, notify_email_markdown, notify_plain_text_email_markdown
))
def test_image(markdown_function):
assert markdown_function(
''
) == (
''
)
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>Example: <strong>example.com</strong></p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; '
'color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">Example</a>'
'</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nExample: http://example.com'
),
],
))
def test_link(markdown_function, expected):
assert markdown_function(
'[Example](http://example.com)'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>Example: <strong>example.com</strong></p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; '
'color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com" title="An example URL">'
'Example'
'</a>'
'</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nExample (An example URL): http://example.com'
),
],
))
def test_link_with_title(markdown_function, expected):
assert markdown_function(
'[Example](http://example.com "An example URL")'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>Strike</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">Strike</p>'
],
[
notify_plain_text_email_markdown,
'\n\nStrike'
],
))
def test_strikethrough(markdown_function, expected):
assert markdown_function('~~Strike~~') == expected
def test_footnotes():
# Can’t work out how to test this
pass
def test_sms_encode():
assert sms_encode('aàá…') == 'aàa...'
@pytest.mark.parametrize('items, kwargs, expected_output', [
([1], {}, '‘1’'),
([1, 2], {}, '‘1’ and ‘2’'),
([1, 2, 3], {}, '‘1’, ‘2’ and ‘3’'),
([1, 2, 3], {'prefix': 'foo', 'prefix_plural': 'bar'}, 'bar ‘1’, ‘2’ and ‘3’'),
([1], {'prefix': 'foo', 'prefix_plural': 'bar'}, 'foo ‘1’'),
([1, 2, 3], {'before_each': 'a', 'after_each': 'b'}, 'a1b, a2b and a3b'),
([1, 2, 3], {'conjunction': 'foo'}, '‘1’, ‘2’ foo ‘3’'),
(['&'], {'before_each': '<i>', 'after_each': '</i>'}, '<i>&</i>'),
([1, 2, 3], {'before_each': '<i>', 'after_each': '</i>'}, '<i>1</i>, <i>2</i> and <i>3</i>'),
])
def test_formatted_list(items, kwargs, expected_output):
assert formatted_list(items, **kwargs) == expected_output
def test_formatted_list_returns_markup():
assert isinstance(formatted_list([0]), Markup)
def test_removing_dvla_markup():
assert strip_dvla_markup(
(
'some words & some more <words>'
'<cr><h1><h2><p><normal><op><np><bul><tab>'
'<CR><H1><H2><P><NORMAL><OP><NP><BUL><TAB>'
'<tAb>'
)
) == 'some words & some more <words>'
def test_removing_pipes():
assert strip_pipes('|a|b|c') == 'abc'
def test_bleach_doesnt_try_to_make_valid_html_before_cleaning():
assert escape_html(
"<to cancel daily cat facts reply 'cancel'>"
) == (
"<to cancel daily cat facts reply 'cancel'>"
)
@pytest.mark.parametrize('dirty, clean', [
(
'Hello ((name)) ,\n\nThis is a message',
'Hello ((name)),\n\nThis is a message'
),
(
'Hello Jo ,\n\nThis is a message',
'Hello Jo,\n\nThis is a message'
),
(
'\n \t , word',
'\n, word',
),
(
'bonjour | hi',
'bonjour | hi',
),
])
def test_removing_whitespace_before_punctuation(dirty, clean):
assert remove_whitespace_before_punctuation(dirty) == clean
@pytest.mark.parametrize('dirty, clean', [
(
'Hello ((name)) .\n\nThis is a message',
'Hello ((name)).\n\nThis is a message'
),
(
'Hello Jo .\n\nThis is a message',
'Hello Jo.\n\nThis is a message'
),
(
'\n \t . word',
'\n. word',
),
])
def test_removing_whitespace_before_full_stops(dirty, clean):
assert remove_whitespace_before_punctuation(dirty) == clean
@pytest.mark.parametrize('dumb, smart', [
(
"""And I said, "what about breakfast at Tiffany's"?""",
"""And I said, “what about breakfast at Tiffany’s”?""",
),
(
"""
<a href="http://example.com?q='foo'">http://example.com?q='foo'</a>
""",
"""
<a href="http://example.com?q='foo'">http://example.com?q='foo'</a>
""",
),
])
def test_smart_quotes(dumb, smart):
assert make_quotes_smart(dumb) == smart
@pytest.mark.parametrize('nasty, nice', [
(
(
'The en dash - always with spaces in running text when, as '
'discussed in this section, indicating a parenthesis or '
'pause - and the spaced em dash both have a certain '
'technical advantage over the unspaced em dash. '
),
(
'The en dash \u2013 always with spaces in running text when, as '
'discussed in this section, indicating a parenthesis or '
'pause \u2013 and the spaced em dash both have a certain '
'technical advantage over the unspaced em dash. '
),
),
(
'double -- dash',
'double \u2013 dash',
),
(
'triple --- dash',
'triple \u2013 dash',
),
(
'quadruple ---- dash',
'quadruple ---- dash',
),
(
'em — dash',
'em – dash',
),
(
'already\u0020–\u0020correct', # \u0020 is a normal space character
'already\u0020–\u0020correct',
),
(
'2004-2008',
'2004-2008', # no replacement
),
(
'bonjour | hi',
'bonjour | hi',
),
])
def test_en_dashes(nasty, nice):
assert replace_hyphens_with_en_dashes(nasty) == nice
def test_unicode_dash_lookup():
en_dash_replacement_sequence = '\u0020\u2013'
hyphen = '-'
en_dash = '–'
space = ' '
non_breaking_space = ' '
assert en_dash_replacement_sequence == space + en_dash
assert non_breaking_space not in en_dash_replacement_sequence
assert hyphen not in en_dash_replacement_sequence
@pytest.mark.parametrize('markup, expected_fixed', [
(
'a',
'a',
),
(
'before<p><cr><p><cr>after',
'before<p><cr>after',
),
(
'before<cr><cr><np>after',
'before<cr><np>after',
),
(
'before{}<np>after'.format('<cr>' * 4),
'before{}<np>after'.format('<cr>' * 3),
),
])
def test_tweaking_dvla_list_markup(markup, expected_fixed):
assert tweak_dvla_list_markup(markup) == expected_fixed
def test_make_list_from_linebreaks():
assert nl2li(
'a\n'
'b\n'
'c\n'
) == (
'<ul>'
'<li>a</li>'
'<li>b</li>'
'<li>c</li>'
'</ul>'
)
@pytest.mark.parametrize('value', [
'bar',
' bar ',
"""
\t bar
""",
' \u180E\u200B \u200C bar \u200D \u2060\uFEFF ',
])
def test_strip_whitespace(value):
assert strip_whitespace(value) == 'bar'
@pytest.mark.parametrize('value', [
'notifications-email',
' \tnotifications-email \x0c ',
'\rn\u200Coti\u200Dfi\u200Bcati\u2060ons-\u180Eemai\uFEFFl\uFEFF',
])
def test_strip_and_remove_obscure_whitespace(value):
assert strip_and_remove_obscure_whitespace(value) == 'notifications-email'
def test_strip_and_remove_obscure_whitespace_only_removes_normal_whitespace_from_ends():
sentence = ' words \n over multiple lines with \ttabs\t '
assert strip_and_remove_obscure_whitespace(sentence) == 'words \n over multiple lines with \ttabs'
def test_remove_smart_quotes_from_email_addresses():
assert remove_smart_quotes_from_email_addresses("""
line one’s quote
first.o’<EMAIL> is someone’s email address
line ‘three’
""") == ("""
line one’s quote
first.o'<EMAIL> is someone’s email address
line ‘three’
""")
def test_strip_unsupported_characters():
assert strip_unsupported_characters("line one\u2028line two") == ("line oneline two")
def test_normalise_whitespace():
assert normalise_whitespace('\u200C Your tax is\ndue\n\n') == 'Your tax is due'
| 2.453125 | 2 |
ProgramFlow/functions/banner.py | kumarvgit/python3 | 0 | 1086 | <reponame>kumarvgit/python3
def banner_text(text):
screen_width = 80
if len(text) > screen_width - 4:
print("EEK!!")
print("THE TEXT IS TOO LONG TO FIT IN THE SPECIFIED WIDTH")
if text == "*":
print("*" * screen_width)
else:
centred_text = text.center(screen_width - 4)
output_string = "**{0}**".format(centred_text)
print(output_string)
banner_text("*")
banner_text("Always look on the bright side of life...")
banner_text("If life seems jolly rotten,")
banner_text("There's something you've forgotten!")
banner_text("And that's to laugh and smile and dance and sing,")
banner_text(" ")
banner_text("When you're feeling in the dumps,")
banner_text("Don't be silly chumps,")
banner_text("Just purse your lips and whistle - that's the thing!")
banner_text("And... always look on the bright side of life...")
banner_text("*")
result = banner_text("Nothing is returned")
print(result)
numbers = [4, 2, 7, 5, 8, 3, 9, 6, 1]
print(numbers.sort())
| 3.6875 | 4 |
Adafruit_BluefruitLE/interfaces/__init__.py | acoomans/Adafruit_Python_BluefruitLE | 415 | 1087 | <reponame>acoomans/Adafruit_Python_BluefruitLE
from .provider import Provider
from .adapter import Adapter
from .device import Device
from .gatt import GattService, GattCharacteristic, GattDescriptor
| 1.234375 | 1 |
axju/generic/__init__.py | axju/axju | 0 | 1088 | from axju.generic.basic import BasicWorker
from axju.generic.execution import ExecutionWorker
from axju.generic.template import TemplateWorker
| 1.132813 | 1 |
objectstoreSiteMover.py | nikmagini/pilot | 13 | 1089 | <filename>objectstoreSiteMover.py<gh_stars>10-100
#!/usr/bin/env python
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <<EMAIL>>, 2014
# objectstoreSiteMover.py
import os
from configSiteMover import config_sm
import SiteMover
from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover
from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover
class objectstoreSiteMover(SiteMover.SiteMover):
"""
ObjectstoreSiteMover
It uses the url to decide which ObjectstoreSiteMover implementation to be used.
"""
copyCommand = "objectstore"
checksum_command = "adler32"
def __init__(self, setup_path='', useTimerCommand=True, *args, **kwrds):
self._setup = setup_path
self._useTimerCommand = useTimerCommand
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
gpfn = gpfn.replace("s3+rucio", "s3")
if gpfn.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
if gpfn.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % gpfn
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
# Get input parameters from pdict
lfn = pdict.get('lfn', '')
logPath = pdict.get('logPath', '')
if logPath != "":
surl = logPath
else:
surl = os.path.join(destination, lfn)
surl = surl.replace("s3+rucio", "s3")
if surl.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover. put_data(source, destination, fsize, fchecksum, **pdict)
if surl.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover. put_data(source, surl, fsize, fchecksum, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % destination, destination, fsize, fchecksum, config_sm.ARCH_DEFAULT
if __name__ == '__main__':
os.environ['PilotHomeDir'] = os.getcwd()
from SiteInformation import SiteInformation
s1 = SiteInformation()
#s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE')
f = objectstoreSiteMover()
gpfn = "nonsens_gpfn"
lfn = "AOD.310713._000004.pool.root.1"
path = os.getcwd()
fsize = "4261010441"
fchecksum = "9145af38"
dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00"
report = {}
#print f.getGlobalFilePaths(dsname)
#print f.findGlobalFilePath(lfn, dsname)
#print f.getLocalROOTSetup()
#path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename"
"""
source = "/bin/hostname"
dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = 17848
localChecksum = "89b93830"
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = 17848
localChecksum = "89b93830"
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
"""
# test S3 object store
source = "/bin/hostname"
#dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = None
localChecksum = None
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='')
gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = None
localChecksum = None
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='<KEY>')
| 1.984375 | 2 |
codigos_videos/Exemplo_2.py | Miguel-mmf/Biblioteca_Dash_em-Python | 1 | 1090 | <filename>codigos_videos/Exemplo_2.py
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Esse arquivo possui algumas modificações em relação ao arquivo apresentado no vídeo do YouTube
# Não deixe de assistir o vídeo e estudar pela documentação ofical Dash
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# importando as bibliotecas necessárias
import dash
import dash_core_components as dcc
import dash_html_components as html
# importando as funções que auxiliam no funcionamento das callbacks do subpacote dependencies do pacote dash
from dash.dependencies import Input, Output
# importando o módulo graph_objects da biblioteca plotly
import plotly.graph_objects as go
# adicionando um estilo externo através do link abaixo
# esse link é o recomendado pela documentação da biblioteca Dash e ao acessar esse link no seu navegador,
# você perceberá que ele possui a estrutura de um arquivo CSS
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# criando a aplicação por meio da função Dash do pacote dash e atribuindo a variável app
app = dash.Dash(
__name__,
external_stylesheets=external_stylesheets
)
# criando uma função para gerar um gráfico com a biblioteca plotly.graph_objects
def gera_grafico(tipo):
# criando uma figura
# caso você faça print(fig), um dicionário será apresentado uma vez que as figuras podem ser representadas dessa forma, necessitando de módulos da biblioteca plotly para trabalhar com as informações
fig = go.Figure()
# https://plotly.com/python/creating-and-updating-figures/
# adicionando um traço a figura
fig.add_trace(
go.Scatter(
x=[0,1,2,3,4,5,6],
y=[0,1,2,3,4,5,6],
mode=tipo,
name='Reta',
)
)
fig.add_trace(
go.Scatter(
x=[0,1,2,3,4,5,6],
y=[0,1,4,9,16,25,36],
mode=tipo,
name='Parábola',
)
)
# adicionando um título ao gráfico
fig.update_layout(title='Gráfico Exemplo')
# variável retornada pela função gera_grafico(tipo)
return fig
# criando um layout para a variável app
# adicionando ao layout um componente html.Div que irá conter os demais componentes que dão forma
app.layout = html.Div([
# inserindo um componente da biblioteca dash HTML components como título/cabeçalho do layout
html.H2(
['Painel de Visualização de Gráficos'],
# o parâmetro style define estilos css para o componente
style={
'textAlign':'center', # texto alinhado
'font-weight':'bold' # texto em negrito
}
),
# adicionando uma linha horizontal no layout
html.Hr(),
# criando abas pai dentro do layout
dcc.Tabs(
# identidade/nome do componente
id='tabs',
# criando as abas filhas dentro do parâmetro children da função Tabs()
children=[
dcc.Tab(label='Gráfico de linha',value='tab-1'),
dcc.Tab(label='Gráfico de Barra',value='tab-2'),
dcc.Tab(label='Gráfico de Linha e Pontos',value='tab-3')
]
),
# onde será apresentado o conteúdo das abas logo após a callback ser ativada
html.Div(id='tabs-content'),
html.Hr(),
])
# Callback
# estruturando a callback com as entradas (input) e saídas (output)
@app.callback(
# Output(component_id,component_property)
Output('tabs-content','children'),
[
# Input(component_id,component_property)
Input('tabs','value')
]
)
# função que será chamada pela callback
def update_tab(tab):
# quando a aba com valor igual a 'tab-1' for selecionada, a propriedade children do componente 'tabs-content'
# receberá o gráfico de linha retornado abaixo pela função gera_gráfico(tipo='lines')
if tab == 'tab-1':
return html.Div([
dcc.Graph(figure = gera_grafico('lines'))
])
# quando a aba com valor igual a 'tab-2' for selecionada, a propriedade children do componente 'tabs-content'
# receberá o gráfico de barras construído e retornado abaixo
elif tab == 'tab-2':
fig_bar = go.Figure()
fig_bar.add_trace(
go.Bar(
x=[0,1,2,3,4,5,6],
y=[0,1,2,3,4,5,6],
)
)
fig_bar.add_trace(
go.Bar(
x=[0,1,2,3,4,5,6],
y=[0,1,4,9,16,25,36],
)
)
fig_bar.update_layout(title='Gráfico em Barras Exemplo')
return html.Div([
dcc.Graph(figure = fig_bar)
])
# quando a aba com valor igual a 'tab-3' for selecionada, a propriedade children do componente 'tabs-content'
# receberá o gráfico de linha retornado abaixo pela função gera_gráfico(tipo='lines+markers')
elif tab == 'tab-3':
return html.Div([
dcc.Graph(figure = gera_grafico('lines+markers'))
])
# caso nenhuma das condições acima sejam aceitas, significa que existe um erro, e assim, retornamos a mensagem de erro
else:
return html.Div(['Erro!'])
# servindo a aplicação em dash como versão para teste
if __name__ == "__main__":
app.run_server(debug=True) | 2.359375 | 2 |
Lab 2/javaccflab/formatter.py | tochanenko/MetaProgramming | 0 | 1091 | import re
import datetime
from javaccflab.lexer import parse
from javaccflab.java_token import TokenType, Token, update_token_value
class Formatter:
def __init__(self, files):
self.__files = files
self.__file = None
self.__tokens = []
self.__to_fix = dict()
def process(self):
tokens = []
for file in self.__files:
tokens.append(parse(open(file, 'r').read()))
i = 0
while i < len(tokens):
self.__tokens = tokens[i]
self.__file = self.__files[i]
self.__find_to_fix()
tokens[i] = self.__tokens
i += 1
i = 0
while i < len(tokens):
self.__tokens = tokens[i]
self.__file = self.__files[i]
self.__fix()
self.__fix_comments()
tokens[i] = self.__tokens
i += 1
return tokens
def __find_to_fix(self):
i = 0
while i < len(self.__tokens):
token = self.__tokens[i]
if token.get_value() == 'package':
i = self.__fix_package(i)
elif token.get_value() in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.':
i = self.__skip_ws_tokens(i + 1)
if not Formatter.is_camel_upper_case(self.__tokens[i].get_value()):
self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_camel_upper_case(
self.__tokens[i].get_value())
i = self.__fix_class_body(i, self.__tokens[i].get_value())
i += 1
def __fix_package(self, pos):
pos = self.__skip_ws_tokens(pos)
while self.__tokens[pos].get_value() != ';':
if self.__tokens[pos].get_type() == TokenType.IDENTIFIER and not Formatter.is_lower_case(
self.__tokens[pos].get_value()):
self.__to_fix[self.__tokens[pos].get_value()] = Formatter.to_lower_case(
(self.__tokens[pos].get_value()))
pos += 1
return pos
def __fix_class_body(self, pos, class_name):
while self.__tokens[pos].get_value() != '{':
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
elif self.__tokens[pos].get_value() == 'static':
i = self.__skip_ws_tokens(pos + 1)
if self.__tokens[i].get_value() == '{':
pos = i + 1
count += 1
continue
elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD):
if self.__is_parameter(pos):
parameters, i = self.__get_field_names(pos)
if self.__is_final(pos):
for parameter in parameters:
if not Formatter.is_snake_upper_case(parameter):
self.__to_fix[parameter] = Formatter.to_snake_upper_case(parameter)
else:
for parameter in parameters:
if not Formatter.is_camel_lower_case(parameter):
self.__to_fix[parameter] = Formatter.to_camel_lower_case(parameter)
pos = i
else:
self.__fix_method_name(pos, class_name)
parameters = self.__get_method_parameters(pos)
pos = self.__fix_method_body(pos, parameters)
pos += 1
return pos
def __fix_method_name(self, i, class_name):
while self.__tokens[i].get_value() not in ('(', ';'):
i += 1
i -= 1
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
if self.__tokens[i].get_value() != class_name and not Formatter.is_snake_lower_case(
self.__tokens[i].get_value()):
self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_snake_lower_case(self.__tokens[i].get_value())
def __get_method_parameters(self, i):
parameters = dict()
while self.__tokens[i].get_value() != '(':
i += 1
while self.__tokens[i].get_value() != ')':
if self.__tokens[i + 1].get_value() in (')', ','):
pos = i
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if not Formatter.is_camel_lower_case(self.__tokens[pos].get_value()):
fixed_value = Formatter.to_camel_lower_case(self.__tokens[pos].get_value())
parameters[self.__tokens[pos].get_value()] = fixed_value
update_token_value(self.__file, self.__tokens[pos], fixed_value)
i += 1
return parameters
def __fix_method_body(self, i, method_parameters):
params = dict()
while self.__tokens[i].get_value() not in ('{', ';'):
if self.__tokens[i].get_value() in method_parameters.keys():
update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()])
i += 1
if self.__tokens[i].get_value() == ';':
return i + 1
brace_count = 1
i += 1
while brace_count != 0:
if self.__tokens[i].get_value() == '{':
brace_count += 1
elif self.__tokens[i].get_value() == '}':
brace_count -= 1
elif self.__tokens[i].get_value() in ('=', ';'):
naming_pos = i - 1
while self.__tokens[naming_pos].get_type() == TokenType.WHITESPACE:
naming_pos -= 1
if self.__tokens[naming_pos].get_type() == TokenType.IDENTIFIER:
type_pos = naming_pos - 1
while self.__tokens[type_pos].get_type() == TokenType.WHITESPACE:
type_pos -= 1
if (self.__tokens[type_pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and \
self.__tokens[type_pos].get_value() not in ('class', 'identifier')) or self.__tokens[
type_pos].get_value() == ',':
if not Formatter.is_camel_lower_case(self.__tokens[naming_pos].get_value()):
fixed_value = Formatter.to_camel_lower_case(self.__tokens[naming_pos].get_value())
params[self.__tokens[naming_pos].get_value()] = fixed_value
update_token_value(self.__file, self.__tokens[naming_pos], fixed_value)
elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[
i].get_value() in params.keys():
update_token_value(self.__file, self.__tokens[i], params[self.__tokens[i].get_value()])
elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[
i].get_value() in method_parameters.keys():
update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()])
i += 1
return i
def __get_field_names(self, i):
params = []
while self.__tokens[i].get_value() != ';':
if self.__tokens[i + 1].get_value() in (';', '=', ','):
pos = i
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
field_name = self.__tokens[pos].get_value()
is_value = False
if self.__tokens[i + 1].get_value() in (';', ','):
while pos > 0 and self.__tokens[pos].get_value() not in (';', '}'):
if self.__tokens[pos].get_value() == '=':
is_value = True
pos -= 1
if not is_value:
params.append(field_name)
i += 1
end = i
return params, end
def __is_final(self, i):
while self.__tokens[i].get_value() not in (';', '=', '('):
if self.__tokens[i].get_value() == 'final':
return True
i += 1
return False
def __is_parameter(self, pos):
while self.__tokens[pos].get_value() != ';' and pos < len(self.__tokens):
if self.__tokens[pos].get_value() == '=':
return True
elif self.__tokens[pos].get_value() in ('class', 'interface', '(', ')'):
return False
pos += 1
return True
def __fix(self):
for token in self.__tokens:
if token.get_value() in self.__to_fix and not token.is_fixed():
update_token_value(self.__file, token, self.__to_fix[token.get_value()])
def __fix_comments(self):
self.__add_start_comment()
i = 0
while i < len(self.__tokens):
if self.__tokens[i].get_value() in ('class', 'interface'):
i = self.__fix_class_comments(i)
i += 1
i += 1
# Fix start comment
def __add_start_comment(self):
if not self.__is_start_comment_exists():
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'/*\n' \
f' * {self.__find_class_name()}\n' \
f' *\n' \
f' * {datetime.date.today().strftime("%B %d, %Y")}\n' \
f' */'
update_token_value(self.__file, comment_token, comment_string)
self.__tokens.insert(0, comment_token)
self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE))
def __is_start_comment_exists(self):
i = self.__skip_ws_tokens(0)
return self.__tokens[i].get_type() == TokenType.COMMENT
def __find_class_name(self, i=0):
while self.__tokens[i].get_value() not in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.':
i += 1
i = self.__skip_ws_tokens(i + 1)
return self.__tokens[i].get_value()
# Fix class comment
def __fix_class_comments(self, pos):
comment_token = self.__find_doc_comment_before(pos)
if comment_token is None:
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'/**\n' \
f' * Implementation of {self.__find_class_name(pos)}\n' \
f' */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
return self.__fix_class_body_comments(pos)
# Fix comments for methods and fields
def __fix_class_body_comments(self, pos):
while self.__tokens[pos].get_value() != '{':
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
elif self.__tokens[pos].get_value() == 'static':
i = self.__skip_ws_tokens(pos + 1)
if self.__tokens[i].get_value() == '{':
pos = i + 1
count += 1
continue
elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and self.__tokens[
pos + 1].get_value() != '.' and self.__tokens[pos].get_value() not in ('class', 'interface'):
if self.__is_parameter(pos):
pos = self.__fix_field_comment(pos)
else:
pos = self.__fix_method_comment(pos)
pos += 1
return pos
def __fix_field_comment(self, pos):
comment_token = self.__find_doc_comment_before(pos)
indent = self.__get_indent(pos)
if comment_token is None:
field_names = ', '.join(self.__get_field_names(pos)[0])
visibility = self.__find_visibility(pos)
comment_token = Token(None, TokenType.COMMENT)
comment_string = comment_string = f'{indent}/**\n' \
f'{indent} * The {visibility} {field_names} {"constant" if self.__is_final(pos) else "variable"}{"s" if len(field_names) > 0 else ""}\n' \
f'{indent} */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
return self.__find_token_after(pos, ';')
def __find_visibility(self, pos):
pos = self.__find_token_before(pos, '\n')
while self.__tokens[pos].get_value() not in ('=', ';', '('):
if self.__tokens[pos].get_value() in ('private', 'public', 'protected'):
return self.__tokens[pos].get_value()
pos += 1
return 'package-private'
def __fix_method_comment(self, pos):
comment_token = self.__find_doc_comment_before(pos)
indent = self.__get_indent(pos)
all_params = []
if comment_token is None:
params = self.__get_parameter_list(pos)
params.extend(self.__get_type_parameter_list(pos))
if len(params) > 0:
all_params.append("\n".join([f"{indent} * @param {param}" for param in params]))
throws = self.__get_throws(pos)
if len(throws) > 0:
all_params.append("\n".join([f"{indent} * @throws {param}" for param in throws]))
return_type = self.__get_return_type(pos)
if len(return_type) > 0:
all_params.append(f"{indent} * @return {self.__get_return_type(pos)}")
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'{indent}/**\n' + \
'\n'.join(all_params) + \
('' if len(params) <= 0 else ' ') + \
f'\n{indent} */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
params_list = self.__get_parameter_list(pos)
params_list.extend(self.__get_type_parameter_list(pos))
throws_list = self.__get_throws(pos)
return_type_value = self.__get_return_type(pos)
params, throws, return_type = self.__fix_comment_params(comment_token)
comment_string = comment_token.get_value()
append_string = ''
i = 0
if len(params) < len(params_list):
append_string += "\n" + "\n".join(
[f"{indent} * @param {param}" for param in Formatter.get_missing(params, params_list)])
i = comment_string.rfind('@param')
if i != -1:
i = comment_string.find('\n', i) if comment_string.find('\n',
i) != -1 else comment_string.find('*',
i) - 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
append_string = ''
if len(throws) < len(throws_list):
append_string += "\n" + "\n".join(
[f"{indent} * @throws {param}" for param in Formatter.get_missing(throws, throws_list)])
i = comment_string.rfind('@throws')
if i != -1:
i = comment_string.find('\n', i) if comment_string.find('\n',
i) != -1 else comment_string.find('*',
i) - 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
append_string = ''
i = comment_string.find('\n', i)
if len(return_type) == '':
append_string += "\n" + f"\n{indent} * @return {return_type_value}"
else:
i = comment_string.rfind('@return')
while comment_string[i] != '\n':
i -= 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
if comment_string != comment_token.get_value():
update_token_value(self.__file, comment_token, comment_string)
return self.__skip_method(pos)
@staticmethod
def get_missing(before, after):
missing_params = []
for value in after:
if value not in before:
missing_params.append(value)
return missing_params
def __get_parameter_list(self, pos):
parameters = []
while self.__tokens[pos].get_value() != '(':
pos += 1
while self.__tokens[pos].get_value() != ')':
if self.__tokens[pos + 1].get_value() in (')', ','):
i = pos
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
parameters.append(self.__tokens[i].get_value())
pos += 1
return parameters
def __get_type_parameter_list(self, pos):
parameters = []
while self.__tokens[pos].get_value() != '<':
if self.__tokens[pos].get_value() == '(':
return parameters
pos += 1
i = pos - 1
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
if self.__tokens[i].get_type() != TokenType.KEYWORD or self.__tokens[i].get_value() not in ('}', ';'):
return parameters
while self.__tokens[pos].get_value() != '>':
if self.__tokens[pos - 1].get_value() in ('<', ','):
i = pos
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i += 1
parameters.append(self.__tokens[i].get_value())
pos += 1
return parameters
def __get_throws(self, pos):
throws = []
is_throws = False
while self.__tokens[pos].get_value() not in ('{', ';'):
if self.__tokens[pos].get_value() == 'throws':
is_throws = True
elif is_throws and self.__tokens[pos].get_type() == TokenType.IDENTIFIER:
throws.append(self.__tokens[pos].get_value())
pos += 1
return throws
def __get_return_type(self, pos):
return_type = []
while self.__tokens[pos].get_value() != '(':
pos += 1
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
while self.__tokens[pos].get_type() != TokenType.WHITESPACE:
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if self.__tokens[pos].get_value() == '>':
while self.__tokens[pos].get_value() != '<':
return_type.append(self.__tokens[pos].get_value())
pos -= 1
return_type.append(self.__tokens[pos].get_value())
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
return_type.append(self.__tokens[pos].get_value())
pos -= 1
return_type.append(self.__tokens[pos].get_value())
return_type.reverse()
return ''.join(return_type)
def __fix_comment_params(self, comment_token):
i = 0
params = []
throws = []
return_type = ''
comment_string = comment_token.get_value()
while i < len(comment_string):
if comment_string[i] == '@':
start = comment_string.find(' ', i)
macro = comment_string[i:start]
end = min(comment_string.find(' ', start + 1), comment_string.find('\n', start + 1))
end = end if end >= 0 else max(comment_string.find(' ', start + 1),
comment_string.find('\n', start + 1))
if end > 0:
value = comment_string[start + 1:end]
new_value = self.__fix_link(value)
if value != new_value:
comment_string = comment_string.replace(value, new_value)
update_token_value(self.__file, comment_token, comment_string)
value = new_value
if macro == '@param':
params.append(value)
elif macro == '@throws':
throws.append(value)
elif macro == '@return':
return_type = value
i += 1
return params, throws, return_type
def __skip_method(self, pos):
while self.__tokens[pos].get_value() != '{':
if self.__tokens[pos].get_value() == ';':
return pos + 1
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
pos += 1
return pos
def __find_doc_comment_before(self, pos):
while self.__tokens[pos].get_value() != '\n':
pos -= 1
while pos > 0 and self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if self.__tokens[pos].get_type() == TokenType.COMMENT and self.__tokens[pos].get_value().startswith('/**'):
return self.__tokens[pos]
return None
def __find_token_before(self, pos, value):
while pos > 0 and self.__tokens[pos].get_value() != value:
pos -= 1
return pos
def __find_token_after(self, pos, value):
while pos < len(self.__tokens) and self.__tokens[pos].get_value() != value:
pos += 1
return pos
def __fix_comment_links(self, comment_token):
i = 0
link = None
comment_string = comment_token.get_value()
while i < len(comment_string):
if comment_string[i] == '@':
start = comment_string.find(' ', i)
if comment_string[i:start] != '@see':
i += 1
continue
end = comment_string.find('\n', i)
link = comment_string[start:end]
elif comment_string[i] == '{':
start = comment_string.find(' ', i)
end = comment_string.find('}', i)
link = comment_string[start:end]
if link is not None:
new_link = self.__fix_link(link)
comment_string = comment_string.replace(link, new_link)
link = None
i += 1
if comment_string != comment_token.get_value():
update_token_value(self.__file, comment_token, comment_string)
def __fix_link(self, link):
for name in self.__to_fix.keys():
pos = link.find(name)
if pos != -1 and not (link[pos - 1].isalpha() or link[
pos - 1].isdigit() or link[pos - 1] == '_'):
link = link.replace(name, self.__to_fix[name])
return link
def __get_indent(self, pos):
pos = self.__find_token_before(pos, '\n')
count = 0
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
if self.__tokens[pos].get_value() == ' ':
count += 1
pos += 1
return ' ' * count
def __skip_ws_tokens(self, pos):
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos += 1
return pos
@staticmethod
def is_lower_case(naming):
return naming.find('_') == -1 and naming.islower()
@staticmethod
def to_lower_case(naming):
return ''.join([component.lower() for component in naming.split('_')])
@staticmethod
def is_camel_lower_case(naming):
return naming.find('_') == -1 and not naming.isupper() and not naming[0].isupper()
@staticmethod
def to_camel_lower_case(naming):
naming = Formatter.remove_underscores_around(naming)
components = [
component[0] + component[1:].lower() if component.isupper() else component[0].upper() + component[1:] for
component in naming.split('_')]
return components[0][0].lower() + components[0][1:] + ''.join(components[1:])
@staticmethod
def is_camel_upper_case(naming):
return naming.find('_') == -1 and not naming.isupper() and naming[0].isupper()
@staticmethod
def to_camel_upper_case(naming):
lower = Formatter.to_camel_lower_case(naming)
return lower[0].upper() + lower[1:]
@staticmethod
def is_snake_lower_case(naming):
return naming.islower()
@staticmethod
def to_snake_lower_case(naming):
naming = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', naming)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', naming).lower()
@staticmethod
def is_snake_upper_case(naming):
return naming.isupper()
@staticmethod
def to_snake_upper_case(naming):
return Formatter.to_snake_lower_case(naming).upper()
@staticmethod
def remove_underscores_around(naming):
i = 0
while naming[i] == '_':
i += 1
naming = naming[i:]
j = len(naming) - 1
while naming[j] == '_':
i -= 1
naming = naming[:j + 1]
return naming
| 2.734375 | 3 |
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py | zangobot/secml | 63 | 1092 | <filename>src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
"""
.. module:: CAttackEvasionPGDExp
:synopsis: Evasion attack using Projected Gradient Descent.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from secml.adv.attacks.evasion import CAttackEvasionPGDLS
class CAttackEvasionPGDExp(CAttackEvasionPGDLS):
"""Evasion attacks using Projected Gradient Descent with Exponential line search.
This class implements the maximum-confidence evasion attacks proposed in:
- https://arxiv.org/abs/1910.00470, EURASIP JIS, 2020.
- https://arxiv.org/abs/1708.06939, ICCV W. ViPAR, 2017.
It is the multi-class extension of our original work in:
- https://arxiv.org/abs/1708.06131, ECML 2013,
implemented using a standard projected gradient solver.
This attack uses a faster line search than PGD-LS.
In all our attacks, we use a smart double initialization to avoid using the
mimicry term from our ECML 2013 paper, as described in:
- https://pralab.diee.unica.it/sites/default/files/zhang15-tcyb.pdf, IEEE TCYB, 2015
If the attack is not successful when starting from x0,
we initialize the optimization by projecting a point from another
class onto the feasible domain and try again.
Parameters
----------
classifier : CClassifier
Target classifier.
double_init_ds : CDataset or None, optional
Dataset used to initialize an alternative init point (double init).
double_init : bool, optional
If True (default), use double initialization point.
Needs double_init_ds not to be None.
distance : {'l1' or 'l2'}, optional
Norm to use for computing the distance of the adversarial example
from the original sample. Default 'l2'.
dmax : scalar, optional
Maximum value of the perturbation. Default 1.
lb, ub : int or CArray, optional
Lower/Upper bounds. If int, the same bound will be applied to all
the features. If CArray, a different bound can be specified for each
feature. Default `lb = 0`, `ub = 1`.
y_target : int or None, optional
If None an error-generic attack will be performed, else a
error-specific attack to have the samples misclassified as
belonging to the `y_target` class.
attack_classes : 'all' or CArray, optional
Array with the classes that can be manipulated by the attacker or
'all' (default) if all classes can be manipulated.
solver_params : dict or None, optional
Parameters for the solver.
Default None, meaning that default parameters will be used.
See :class:`COptimizerPGDExp` for more information.
Attributes
----------
class_type : 'e-pgd-exp'
"""
__class_type = 'e-pgd-exp'
def __init__(self, classifier,
double_init_ds=None,
double_init=True,
distance='l1',
dmax=0,
lb=0,
ub=1,
y_target=None,
attack_classes='all',
solver_params=None):
# INTERNALS
self._x0 = None
self._y0 = None
# this is an alternative init point. This could be a single point
# (targeted evasion) or an array of multiple points, one for each
# class (indiscriminate evasion). See _get_point_with_min_f_obj()
self._xk = None
super(CAttackEvasionPGDExp, self).__init__(
classifier=classifier,
double_init_ds=double_init_ds,
double_init=double_init,
distance=distance,
dmax=dmax,
lb=lb,
ub=ub,
y_target=y_target,
attack_classes=attack_classes,
solver_params=solver_params)
self.solver_type = 'pgd-exp'
| 2.59375 | 3 |
mail_log_parser/data_manager.py | kinteriq/mail-log-parser | 0 | 1093 | import sqlite3
class ManageData:
def __init__(self, queue_tracker_db, email_tracker_db, delivery_tracker_db):
self.queue_tracker_db = queue_tracker_db
self.email_tracker_db = email_tracker_db
self.delivery_tracker_db = delivery_tracker_db
def manage_queue_tracker(self, fields):
"""
Receive one of the following located groups as <fields>:
[('ID', <id>), ('client_email', <email>)];
[('ID', <id>), ('receivers', <email>), ('status', <status>)];
[('ID', <id>)];
and manage the <queue_tracker_db> accordingly.
"""
if len(fields) == 1:
ID = fields[0][1]
self.manage_email_tracker(ID)
self.manage_delivery_tracker(ID)
del self.queue_tracker_db[ID]
elif len(fields) == 2:
ID, client_email = (f[1] for f in fields)
self.queue_tracker_db[ID]['client_email'] = client_email
elif len(fields) == 3:
ID, receiver, status = (f[1] for f in fields)
if status == 'sent':
code = 1
else:
code = 0
self.queue_tracker_db[ID]['receivers'][receiver] = code
def manage_email_tracker(self, ID):
"""
Retrieve client's email from the <queue_tracker_db> by <ID>
with the amount of 'receivers' whose 'status' == 1
and store it in the <email_tracker_db>.
"""
client_email = self.queue_tracker_db[ID]['client_email']
receivers = self.queue_tracker_db[ID]['receivers']
delivered_mail = [r for r in receivers if receivers[r] == 1]
if client_email in self.email_tracker_db:
self.email_tracker_db[client_email] += len(delivered_mail)
else:
self.email_tracker_db[client_email] = len(delivered_mail)
def manage_delivery_tracker(self, ID):
"""
Go through all receivers of <ID> queue of <queue_tracker_db>,
and add their delivery statuses to the <delivery_tracker_db> counter
"""
receivers = self.queue_tracker_db[ID]['receivers']
for receiver in receivers:
if receivers[receiver] == 1:
self.delivery_tracker_db['delivered'] += 1
else:
self.delivery_tracker_db['undelivered'] += 1
class ManageDatabase(ManageData):
def __init__(self, path, *args, **kwargs):
self.path = path
super().__init__(*args, **kwargs)
def _execute_command(self, *command):
con = sqlite3.connect(self.path)
cursor = con.cursor()
result = cursor.execute(*command)
if result:
result = result.fetchall()
con.commit()
con.close()
return result
def create_db(self):
self._execute_command('''CREATE TABLE IF NOT EXISTS email_tracker
(client_email TEXT PRIMARY KEY, num_of_letters_sent INTEGER)''')
def transfer_data(self):
for email, num_of_letters in self.email_tracker_db.items():
self._execute_command('''INSERT INTO email_tracker VALUES
(?, ?)''', (email, num_of_letters))
| 3.25 | 3 |
Util/training_util.py | lychenyoko/content-aware-gan-compression | 47 | 1094 | import math
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
| 2.234375 | 2 |
app/configs/development_settings.py | DIS-SIN/FlaskShell | 0 | 1095 | <gh_stars>0
######################################################## FLASK SETTINGS ##############################################################
#Variable used to securly sign cookies
##THIS IS SET IN DEV ENVIRONMENT FOR CONVENIENCE BUT SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PROD
SECRET_KEY = "dev"
######################################################## DATABSE SETTINGS ####################################################
#Neo4j Database URI used by the Neomodel OGM
## THIS SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PRODUCTION ##
DATABASE_URI = "bolt://test:test@localhost:7687"
| 1.460938 | 1 |
autoarray/structures/grids/two_d/grid_2d_util.py | caoxiaoyue/PyAutoArray | 0 | 1096 | import numpy as np
from typing import Tuple, Union, Optional
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.geometry import geometry_util
from autoarray import numba_util
from autoarray.mask import mask_2d_util
@numba_util.jit()
def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]:
"""
Returns the centre of a grid from a 1D grid.
Parameters
----------
grid_2d_slim
The 1D grid of values which are mapped to a 2D array.
Returns
-------
(float, float)
The (y,x) central coordinates of the grid.
"""
centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0
centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0
return centre_y, centre_x
@numba_util.jit()
def grid_2d_slim_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into
a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates a the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore
removed and not included in the slimmed grid.
Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size)
grid_slim = np.zeros(shape=(total_sub_pixels, 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin
)
sub_index = 0
y_sub_half = pixel_scales[0] / 2
y_sub_step = pixel_scales[0] / (sub_size)
x_sub_half = pixel_scales[1] / 2
x_sub_step = pixel_scales[1] / (sub_size)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
for y1 in range(sub_size):
for x1 in range(sub_size):
grid_slim[sub_index, 0] = -(
y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0)
)
grid_slim[sub_index, 1] = (
x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0)
)
sub_index += 1
return grid_slim
def grid_2d_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are
given values (0.0, 0.0).
Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
grid_2d_slim = grid_2d_slim_via_mask_from(
mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
return grid_2d_native_from(
grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size
)
def grid_2d_slim_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_slim_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def grid_2d_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided
into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes
the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
@numba_util.jit()
def grid_scaled_2d_slim_radial_projected_from(
extent: np.ndarray,
centre: Tuple[float, float],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
shape_slim: Optional[int] = 0,
) -> np.ndarray:
"""
Determine a projected radial grid of points from a 2D region of coordinates defined by an
extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows:
1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of
the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes).
2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the
pixel_scale in the x dimension is used).
3) Determine the number of pixels between the centre and the edge of the region using the longest path between the
two chosen above.
4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate
from the centre in increasing steps of the pixel-scale.
5) Rotate these radial coordinates by the input `angle` clockwise.
A schematric is shown below:
-------------------
| |
|<- - - - ->x | x = centre
| | <-> = longest radial path from centre to extent edge
| |
-------------------
Using the centre x above, this function finds the longest radial path to the edge of the extent window.
The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre.
This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data
structure so that it can be used in functions which require that a 2D grid structure is input.
Parameters
----------
extent
The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax]
centre : (float, flloat)
The (y,x) central coordinate which the radial grid is traced outwards from.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
shape_slim
Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is
used (due to numba None cannot be used as a default value).
Returns
-------
ndarray
A radial set of points sampling the longest distance from the centre to the edge of the extent in along the
positive x-axis.
"""
distance_to_positive_x = extent[1] - centre[1]
distance_to_positive_y = extent[3] - centre[0]
distance_to_negative_x = centre[1] - extent[0]
distance_to_negative_y = centre[0] - extent[2]
scaled_distance = max(
[
distance_to_positive_x,
distance_to_positive_y,
distance_to_negative_x,
distance_to_negative_y,
]
)
if (scaled_distance == distance_to_positive_y) or (
scaled_distance == distance_to_negative_y
):
pixel_scale = pixel_scales[0]
else:
pixel_scale = pixel_scales[1]
if shape_slim == 0:
shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1
grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2))
grid_scaled_2d_slim_radii[:, 0] += centre[0]
radii = centre[1]
for slim_index in range(shape_slim):
grid_scaled_2d_slim_radii[slim_index, 1] = radii
radii += pixel_scale / sub_size
return grid_scaled_2d_slim_radii
@numba_util.jit()
def grid_pixels_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel
coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner
relative to the input scaled coordinate.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their
1D grid pixel coordinate values.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted to.
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = (
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = (
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = int(
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = int(
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards.
The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,).
For example:
The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0.
The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4.
The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A grid of slimmed pixel indexes with dimensions (total_pixels,).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_2d_slim,
shape_native=shape_native,
pixel_scales=pixel_scales,
origin=origin,
)
grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0])
for slim_index in range(grid_pixels_2d_slim.shape[0]):
grid_pixel_indexes_2d_slim[slim_index] = int(
grid_pixels_2d_slim[slim_index, 0] * shape_native[1]
+ grid_pixels_2d_slim[slim_index, 1]
)
return grid_pixel_indexes_2d_slim
@numba_util.jit()
def grid_scaled_2d_slim_from(
grid_pixels_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this
origin after computing their values from the 1D grid pixel indexes.
Parameters
----------
grid_pixels_2d_slim: np.ndarray
The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_scaled_2d_slim[slim_index, 0] = (
-(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5)
* pixel_scales[0]
)
grid_scaled_2d_slim[slim_index, 1] = (
grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5
) * pixel_scales[1]
return grid_scaled_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_from(
grid_scaled_2d: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d: np.ndarray
The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2).
Examples
--------
grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for y in range(grid_scaled_2d.shape[0]):
for x in range(grid_scaled_2d.shape[1]):
grid_pixels_2d[y, x, 0] = int(
(-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5
)
grid_pixels_2d[y, x, 1] = int(
(grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5
)
return grid_pixels_2d
@numba_util.jit()
def relocated_grid_via_jit_from(grid, border_grid):
"""
Relocate the coordinates of a grid to its border if they are outside the border, where the border is
defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*).
This is performed as follows:
1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid.
2: Compute the radial distance of every grid coordinate from the origin.
3: For every coordinate, find its nearest pixel in the border.
4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired
border pixel's radial distance.
5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the
border (if its inside the border, do nothing).
The method can be used on uniform or irregular grids, however for irregular grids the border of the
'image-plane' mask is used to define border pixels.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
border_grid : Grid2D
The grid of border (y,x) coordinates.
"""
grid_relocated = np.zeros(grid.shape)
grid_relocated[:, :] = grid[:, :]
border_origin = np.zeros(2)
border_origin[0] = np.mean(border_grid[:, 0])
border_origin[1] = np.mean(border_grid[:, 1])
border_grid_radii = np.sqrt(
np.add(
np.square(np.subtract(border_grid[:, 0], border_origin[0])),
np.square(np.subtract(border_grid[:, 1], border_origin[1])),
)
)
border_min_radii = np.min(border_grid_radii)
grid_radii = np.sqrt(
np.add(
np.square(np.subtract(grid[:, 0], border_origin[0])),
np.square(np.subtract(grid[:, 1], border_origin[1])),
)
)
for pixel_index in range(grid.shape[0]):
if grid_radii[pixel_index] > border_min_radii:
closest_pixel_index = np.argmin(
np.square(grid[pixel_index, 0] - border_grid[:, 0])
+ np.square(grid[pixel_index, 1] - border_grid[:, 1])
)
move_factor = (
border_grid_radii[closest_pixel_index] / grid_radii[pixel_index]
)
if move_factor < 1.0:
grid_relocated[pixel_index, :] = (
move_factor * (grid[pixel_index, :] - border_origin[:])
+ border_origin[:]
)
return grid_relocated
@numba_util.jit()
def furthest_grid_2d_slim_index_from(
grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float]
) -> int:
distance_to_centre = 0.0
for slim_index in slim_indexes:
y = grid_2d_slim[slim_index, 0]
x = grid_2d_slim[slim_index, 1]
distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2
if distance_to_centre_new >= distance_to_centre:
distance_to_centre = distance_to_centre_new
furthest_grid_2d_slim_index = slim_index
return furthest_grid_2d_slim_index
def grid_2d_slim_from(
grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked
pixels to a slimmed grid of shape [total_unmasked_pixels, 2].
The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such
that for an grid of shape (3,3) where all pixels are unmasked:
- pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid.
- pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid.
- pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid.
Parameters
----------
grid_2d_native : ndarray
The native grid of (y,x) values which are mapped to the slimmed grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels).
"""
grid_1d_slim_y = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size
)
grid_1d_slim_x = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size
)
return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1)
def grid_2d_native_from(
grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values
from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the
native 2D grid where masked values are set to zero.
This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked
pixels, for example:
- If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid.
- If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid.
- If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid.
Parameters
----------
grid_2d_slim
The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D
mapped from the slimmed grid.
"""
grid_2d_native_y = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size
)
grid_2d_native_x = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size
)
return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
@numba_util.jit()
def grid_2d_slim_upscaled_from(
grid_slim: np.ndarray,
upscale_factor: int,
pixel_scales: Union[float, Tuple[float, float]],
) -> np.ndarray:
"""
From an input slimmed 2D grid, return an upscaled slimmed 2D grid where (y,x) coordinates are added at an
upscaled resolution to each grid coordinate, analogous to a sub-grid.
Parameters
----------
grid_slim
The slimmed grid of (y,x) coordinates over which a square uniform grid is overlaid.
upscale_factor
The upscaled resolution at which the new grid coordinates are computed.
pixel_scales
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
grid_2d_slim_upscaled = np.zeros(
shape=(grid_slim.shape[0] * upscale_factor ** 2, 2)
)
upscale_index = 0
y_upscale_half = pixel_scales[0] / 2
y_upscale_step = pixel_scales[0] / upscale_factor
x_upscale_half = pixel_scales[1] / 2
x_upscale_step = pixel_scales[1] / upscale_factor
for slim_index in range(grid_slim.shape[0]):
y_grid = grid_slim[slim_index, 0]
x_grid = grid_slim[slim_index, 1]
for y in range(upscale_factor):
for x in range(upscale_factor):
grid_2d_slim_upscaled[upscale_index, 0] = (
y_grid
+ y_upscale_half
- y * y_upscale_step
- (y_upscale_step / 2.0)
)
grid_2d_slim_upscaled[upscale_index, 1] = (
x_grid
- x_upscale_half
+ x * x_upscale_step
+ (x_upscale_step / 2.0)
)
upscale_index += 1
return grid_2d_slim_upscaled
def grid_2d_of_points_within_radius(
radius: float, centre: Tuple[float, float], grid_2d: np.ndarray
):
y_inside = []
x_inside = []
for i in range(len(grid_2d[:, 0])):
if (grid_2d[i, 0] - centre[0]) ** 2 + (
grid_2d[i, 1] - centre[1]
) ** 2 > radius ** 2:
y_inside.append(grid_2d[i, 0])
x_inside.append(grid_2d[i, 1])
return np.asarray(y_inside, x_inside)
def compute_polygon_area(points):
x = points[:, 1]
y = points[:, 0]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
| 3.046875 | 3 |
Proxies/Proxies.py | crown-prince/proxies | 2 | 1097 | # coding: utf-8
import requests, math
import gevent
from gevent.queue import Queue
from gevent import monkey; monkey.patch_all()
from pyquery import PyQuery
class Proxies():
def __init__(self):
self.domestic_gn_url = 'http://www.kuaidaili.com/free/inha/{0}/'
self.domestic_pt_url = 'http://www.kuaidaili.com/free/intr/{0}/'
self.abroad_gn_url = 'http://www.kuaidaili.com/free/outha/{0}/'
self.abroad_pt_url = 'http://www.kuaidaili.com/free/outtr/{0}/'
self.result_arr = []
self.s = requests.Session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Referer': 'http://www.kuaidaili.com/'
}
def fetch_urls(self, queue, quantity):
while not queue.empty():
url = queue.get()
html = self.s.get(url, headers=self.headers).text
pq = PyQuery(html)
size = pq.find('tbody tr').size()
for index in range(size):
item = pq.find('tbody tr').eq(index)
ip = item.find('td').eq(0).text()
port = item.find('td').eq(1).text()
_type = item.find('td').eq(3).text()
self.result_arr.append({
str(_type).lower(): '{0}://{1}:{2}'.format(str(_type).lower(), ip, port)
})
if len(self.result_arr) >= quantity:
break
def get_proxies(self, quantity, type):
'''
quantity: 数量
type: 类型
1.国内高匿代理
2.国内普通代理
3.国外高匿代理
4.国外普通代理
'''
url_queue = Queue()
need_pages = int(math.ceil(quantity/15))
# 判断类型
if type == 1:
# 国内高匿代理
base_url = self.domestic_gn_url
elif type == 2:
# 国内普通代理
base_url = self.domestic_pt_url
elif type == 3:
# 国外高匿代理
base_url = self.abroad_gn_url
elif type == 4:
# 国外普通代理
base_url = self.abroad_pt_url
# 获取所需要的页面URL
for index in range(need_pages):
url = base_url.format(index+1)
url_queue.put(url)
# 处理所有URL,开启2个协程
gevent_list = []
for index in range(2):
gevent_list.append(
gevent.spawn(self.fetch_urls, url_queue, quantity)
)
gevent.joinall(gevent_list)
def get_result(self):
return self.result_arr
if __name__ == '__main__':
p = Proxies()
p.get_proxies(20, 1)
result = p.get_result()
print(result)
| 2.609375 | 3 |
parallelformers/policies/base/auto.py | Oaklight/parallelformers | 454 | 1098 | <filename>parallelformers/policies/base/auto.py
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import suppress
from typing import List, Union
from torch import nn
from parallelformers.policies.base import Policy
class AutoPolicy:
"""Class for finds automatically appropriate policies for the current model"""
def __init__(self):
self.builtin_policies = {}
with suppress(Exception):
from transformers.models.gpt_neo.modeling_gpt_neo import (
GPTNeoPreTrainedModel,
)
from parallelformers.policies.gpt_neo import GPTNeoPolicy
self.builtin_policies[GPTNeoPreTrainedModel] = [
GPTNeoPolicy,
]
with suppress(Exception):
from transformers.models.bert.modeling_bert import (
BertPreTrainedModel,
)
from parallelformers.policies.bert import BertPolicy
self.builtin_policies[BertPreTrainedModel] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.bart.modeling_bart import (
BartPretrainedModel,
)
from parallelformers.policies.bart import (
BartDecoderPolicy,
BartEncoderPolicy,
)
self.builtin_policies[BartPretrainedModel] = [
BartEncoderPolicy,
BartDecoderPolicy,
]
with suppress(Exception):
from transformers.models.blenderbot.modeling_blenderbot import (
BlenderbotPreTrainedModel,
)
from parallelformers.policies.blenderbot import (
BlenderbotDecoderPolicy,
BlenderbotEncoderPolicy,
)
self.builtin_policies[BlenderbotPreTrainedModel] = [
BlenderbotEncoderPolicy,
BlenderbotDecoderPolicy,
]
with suppress(Exception):
from transformers.models.deberta.modeling_deberta import (
DebertaPreTrainedModel,
)
from parallelformers.policies.deberta import DebertaPolicy
self.builtin_policies[DebertaPreTrainedModel] = [
DebertaPolicy,
]
with suppress(Exception):
from transformers.models.transfo_xl.modeling_transfo_xl import (
TransfoXLPreTrainedModel,
)
from parallelformers.policies.transfo_xl import TransfoXLPolicy
self.builtin_policies[TransfoXLPreTrainedModel] = [
TransfoXLPolicy,
]
with suppress(Exception):
from transformers.models.roberta.modeling_roberta import (
RobertaPreTrainedModel,
)
from parallelformers.policies.roberta import RobertaPolicy
self.builtin_policies[RobertaPreTrainedModel] = [
RobertaPolicy,
]
with suppress(Exception):
from transformers.models.albert.modeling_albert import (
AlbertPreTrainedModel,
)
from parallelformers.policies.albert import AlbertPolicy
self.builtin_policies[AlbertPreTrainedModel] = [
AlbertPolicy,
]
with suppress(Exception):
from transformers.models.gpt2.modeling_gpt2 import (
GPT2PreTrainedModel,
)
from parallelformers.policies.gpt2 import GPT2Policy
self.builtin_policies[GPT2PreTrainedModel] = [
GPT2Policy,
]
with suppress(Exception):
from transformers.models.ctrl.modeling_ctrl import (
CTRLPreTrainedModel,
)
from parallelformers.policies.ctrl import CTRLPolicy
self.builtin_policies[CTRLPreTrainedModel] = [
CTRLPolicy,
]
with suppress(Exception):
from transformers.models.deberta_v2.modeling_deberta_v2 import (
DebertaV2PreTrainedModel,
)
from parallelformers.policies.deberta_v2 import DebertaV2Policy
self.builtin_policies[DebertaV2PreTrainedModel] = [
DebertaV2Policy,
]
with suppress(Exception):
from transformers.models.openai.modeling_openai import (
OpenAIGPTPreTrainedModel,
)
from parallelformers.policies.openai import OpenAIGPTPolicy
self.builtin_policies[OpenAIGPTPreTrainedModel] = [
OpenAIGPTPolicy,
]
with suppress(Exception):
from transformers.models.electra.modeling_electra import (
ElectraPreTrainedModel,
)
from parallelformers.policies.electra import ElectraPolicy
self.builtin_policies[ElectraPreTrainedModel] = [
ElectraPolicy,
]
with suppress(Exception):
from transformers.models.blenderbot_small.modeling_blenderbot_small import (
BlenderbotSmallPreTrainedModel,
)
from parallelformers.policies.blenderbot_small import (
BlenderbotSmallDecoderPolicy,
BlenderbotSmallEncoderPolicy,
)
self.builtin_policies[BlenderbotSmallPreTrainedModel] = [
BlenderbotSmallEncoderPolicy,
BlenderbotSmallDecoderPolicy,
]
with suppress(Exception):
from transformers.models.distilbert.modeling_distilbert import (
DistilBertPreTrainedModel,
)
from parallelformers.policies.distil_bert import DistilBertPolicy
self.builtin_policies[DistilBertPreTrainedModel] = [
DistilBertPolicy,
]
with suppress(Exception):
from transformers.models.convbert.modeling_convbert import (
ConvBertPreTrainedModel,
)
from parallelformers.policies.convbert import ConvBertPolicy
self.builtin_policies[ConvBertPreTrainedModel] = [
ConvBertPolicy,
]
with suppress(Exception):
from transformers.models.bert_generation.modeling_bert_generation import (
BertGenerationPreTrainedModel,
)
from parallelformers.policies.bert import BertPolicy
self.builtin_policies[BertGenerationPreTrainedModel] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.big_bird.modeling_big_bird import (
BigBirdPreTrainedModel,
)
from parallelformers.policies.bigbird import BigBirdPolicy
self.builtin_policies[BigBirdPreTrainedModel] = [
BigBirdPolicy,
]
with suppress(Exception):
from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import (
BigBirdPegasusPreTrainedModel,
)
from parallelformers.policies.bigbird_pegasus import (
BigBirdPegasusDecoderPolicy,
BigBirdPegasusEncoderPolicy,
)
self.builtin_policies[BigBirdPegasusPreTrainedModel] = [
BigBirdPegasusEncoderPolicy,
BigBirdPegasusDecoderPolicy,
]
with suppress(Exception):
from transformers.models.vit.modeling_vit import ViTPreTrainedModel
from parallelformers.policies.vit import ViTPolicy
self.builtin_policies[ViTPreTrainedModel] = [
ViTPolicy,
]
with suppress(Exception):
from transformers.models.deit.modeling_deit import (
DeiTPreTrainedModel,
)
from parallelformers.policies.deit import DeiTPolicy
self.builtin_policies[DeiTPreTrainedModel] = [DeiTPolicy]
with suppress(Exception):
from transformers.models.mbart.modeling_mbart import (
MBartPreTrainedModel,
)
from parallelformers.policies.mbart import (
MBartDecoderPolicy,
MBartEncoderPolicy,
)
self.builtin_policies[MBartPreTrainedModel] = [
MBartEncoderPolicy,
MBartDecoderPolicy,
]
with suppress(Exception):
from transformers.models.t5.modeling_t5 import T5PreTrainedModel
from parallelformers.policies.t5 import T5Policy
self.builtin_policies[T5PreTrainedModel] = [
T5Policy,
]
with suppress(Exception):
from transformers.models.pegasus.modeling_pegasus import (
PegasusPreTrainedModel,
)
from parallelformers.policies.pegasus import (
PegasusDecoderPolicy,
PegasusEncoderPolicy,
)
self.builtin_policies[PegasusPreTrainedModel] = [
PegasusEncoderPolicy,
PegasusDecoderPolicy,
]
with suppress(Exception):
from transformers.models.fsmt.modeling_fsmt import (
PretrainedFSMTModel,
)
from parallelformers.policies.fsmt import (
FSMTDecoderPolicy,
FSMTEncoderPolicy,
)
self.builtin_policies[PretrainedFSMTModel] = [
FSMTEncoderPolicy,
FSMTDecoderPolicy,
]
with suppress(Exception):
from transformers.models.xlm.modeling_xlm import XLMPreTrainedModel
from parallelformers.policies.xlm import (
XLMAttentionPolicy,
XLMMLPPolicy,
)
self.builtin_policies[XLMPreTrainedModel] = [
XLMAttentionPolicy,
XLMMLPPolicy,
]
with suppress(Exception):
from transformers.models.m2m_100.modeling_m2m_100 import (
M2M100PreTrainedModel,
)
from parallelformers.policies.m2m_100 import (
M2M100DecoderPolicy,
M2M100EncoderPolicy,
)
self.builtin_policies[M2M100PreTrainedModel] = [
M2M100EncoderPolicy,
M2M100DecoderPolicy,
]
with suppress(Exception):
from transformers.models.marian.modeling_marian import (
MarianPreTrainedModel,
)
from parallelformers.policies.marian import (
MarianDecoderPolicy,
MarianEncoderPolicy,
)
self.builtin_policies[MarianPreTrainedModel] = [
MarianEncoderPolicy,
MarianDecoderPolicy,
]
with suppress(Exception):
from transformers.models.mobilebert.modeling_mobilebert import (
MobileBertPreTrainedModel,
)
from parallelformers.policies.mobilebert import MobileBertPolicy
self.builtin_policies[MobileBertPreTrainedModel] = [
MobileBertPolicy,
]
with suppress(Exception):
from transformers.models.mpnet.modeling_mpnet import (
MPNetPreTrainedModel,
)
from parallelformers.policies.mpnet import (
MPNetEncoderPolicy,
MPNetLayerPolicy,
)
self.builtin_policies[MPNetPreTrainedModel] = [
MPNetEncoderPolicy,
MPNetLayerPolicy,
]
with suppress(Exception):
from transformers.models.luke.modeling_luke import (
LukePreTrainedModel,
)
from parallelformers.policies.luke import LukePolicy
self.builtin_policies[LukePreTrainedModel] = [
LukePolicy,
]
with suppress(Exception):
from transformers.models.dpr.modeling_dpr import (
DPRPretrainedContextEncoder,
DPRPretrainedQuestionEncoder,
DPRPretrainedReader,
)
self.builtin_policies[DPRPretrainedReader] = [
BertPolicy,
]
self.builtin_policies[DPRPretrainedQuestionEncoder] = [
BertPolicy,
]
self.builtin_policies[DPRPretrainedContextEncoder] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.lxmert.modeling_lxmert import (
LxmertPreTrainedModel,
)
from parallelformers.policies.lxmert import LxmertPolicy
self.builtin_policies[LxmertPreTrainedModel] = [
LxmertPolicy,
]
with suppress(Exception):
from transformers.models.hubert.modeling_hubert import (
HubertPreTrainedModel,
)
from parallelformers.policies.hubert import HubertPolicy
self.builtin_policies[HubertPreTrainedModel] = [
HubertPolicy,
]
with suppress(Exception):
from transformers.models.wav2vec2.modeling_wav2vec2 import (
Wav2Vec2PreTrainedModel,
)
from parallelformers.policies.wav2vec import Wav2VecPolicy
self.builtin_policies[Wav2Vec2PreTrainedModel] = [
Wav2VecPolicy,
]
with suppress(Exception):
from transformers.models.xlnet.modeling_xlnet import (
XLNetPreTrainedModel,
)
from parallelformers.policies.xlnet import XLNetPolicy
self.builtin_policies[XLNetPreTrainedModel] = [
XLNetPolicy,
]
with suppress(Exception):
from transformers.models.retribert.modeling_retribert import (
RetriBertPreTrainedModel,
)
self.builtin_policies[RetriBertPreTrainedModel] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.clip.modeling_clip import (
CLIPPreTrainedModel,
)
from parallelformers.policies.clip import (
CLIPLayerPolicy,
CLIPTextPolicy,
CLIPVisionPolicy,
)
self.builtin_policies[CLIPPreTrainedModel] = [
CLIPLayerPolicy,
CLIPTextPolicy,
CLIPVisionPolicy,
]
with suppress(Exception):
from transformers.models.detr.modeling_detr import (
DetrPreTrainedModel,
)
from parallelformers.policies.detr import (
DetrDecoderPolicy,
DetrEncoderPolicy,
)
self.builtin_policies[DetrPreTrainedModel] = [
DetrEncoderPolicy,
DetrDecoderPolicy,
]
with suppress(Exception):
from transformers.models.reformer.modeling_reformer import (
ReformerPreTrainedModel,
)
from parallelformers.policies.reformer import ReformerPolicy
self.builtin_policies[ReformerPreTrainedModel] = [
ReformerPolicy,
]
with suppress(Exception):
from transformers.models.longformer.modeling_longformer import (
LongformerPreTrainedModel,
)
from parallelformers.policies.longformer import LongformerPolicy
self.builtin_policies[LongformerPreTrainedModel] = [
LongformerPolicy,
]
with suppress(Exception):
from transformers.models.roformer.modeling_roformer import (
RoFormerPreTrainedModel,
)
from parallelformers.policies.roformer import RoformerPolicy
self.builtin_policies[RoFormerPreTrainedModel] = [
RoformerPolicy,
]
with suppress(Exception):
from transformers.models.ibert.modeling_ibert import (
IBertPreTrainedModel,
)
from parallelformers.policies.ibert import IBertPolicy
self.builtin_policies[IBertPreTrainedModel] = [
IBertPolicy,
]
with suppress(Exception):
from transformers.models.tapas.modeling_tapas import (
TapasPreTrainedModel,
)
from parallelformers.policies.tapas import TapasPolicy
self.builtin_policies[TapasPreTrainedModel] = [
TapasPolicy,
]
with suppress(Exception):
from transformers.models.funnel.modeling_funnel import (
FunnelPreTrainedModel,
)
from parallelformers.policies.funnel import FunnelPolicy
self.builtin_policies[FunnelPreTrainedModel] = [
FunnelPolicy,
]
with suppress(Exception):
from transformers.models.layoutlm.modeling_layoutlm import (
LayoutLMPreTrainedModel,
)
from parallelformers.policies.layoutlm import LayoutLMPolicy
self.builtin_policies[LayoutLMPreTrainedModel] = [
LayoutLMPolicy,
]
with suppress(Exception):
from transformers.models.led.modeling_led import LEDPreTrainedModel
from parallelformers.policies.led import (
LEDDecoderPolicy,
LEDEncoderPolicy,
)
self.builtin_policies[LEDPreTrainedModel] = [
LEDEncoderPolicy,
LEDDecoderPolicy,
]
with suppress(Exception):
from transformers.models.prophetnet.modeling_prophetnet import (
ProphetNetPreTrainedModel,
)
from parallelformers.policies.prophetnet import (
ProphetNetDecoderPolicy,
ProphetNetEncoderPolicy,
)
self.builtin_policies[ProphetNetPreTrainedModel] = [
ProphetNetEncoderPolicy,
ProphetNetDecoderPolicy,
]
with suppress(Exception):
from transformers.models.visual_bert.modeling_visual_bert import (
VisualBertPreTrainedModel,
)
from parallelformers.policies.visual_bert import VisualBertPolicy
self.builtin_policies[VisualBertPreTrainedModel] = [
VisualBertPolicy,
]
with suppress(Exception):
from transformers.models.speech_to_text.modeling_speech_to_text import (
Speech2TextPreTrainedModel,
)
from parallelformers.policies.speech_to_text import (
Speech2TextDecoderPolicy,
Speech2TextEncoderPolicy,
)
self.builtin_policies[Speech2TextPreTrainedModel] = [
Speech2TextEncoderPolicy,
Speech2TextDecoderPolicy,
]
with suppress(Exception):
from transformers.models.gptj.modeling_gptj import (
GPTJPreTrainedModel,
)
from parallelformers.policies.gptj import GPTJPolicy
self.builtin_policies[GPTJPreTrainedModel] = [
GPTJPolicy,
]
with suppress(Exception):
from transformers.models.megatron_bert import (
MegatronBertPreTrainedModel,
)
from parallelformers.policies.megtron_bert import (
MegatronBertPolicy,
)
self.builtin_policies[MegatronBertPreTrainedModel] = [
MegatronBertPolicy,
]
def get_policy(self, model: nn.Module) -> Union[List[Policy], None]:
"""
Find appropriate policies for the current model
Args:
model (nn.Module): model to parallelize
Returns:
Union[List[Policy], None]: appropriate policies or none
"""
for k, v in self.available().items():
if isinstance(model, k):
return v
return None
def available(self):
"""Dictionary of available models and policies"""
return self.builtin_policies
| 1.617188 | 2 |
main/upper_air_humidity.py | RyosukeDTomita/gcmPlot | 0 | 1099 | # coding: utf-8
"""
Name: upper_air_humidity.py
Make upper level weather chart.
Usage: python3 upper_air_humidity.py --file <ncfile>
Author: <NAME>
Date: 2022/01/07
"""
import argparse
from ncmagics import fetchtime, japanmap, meteotool
def parse_args() -> dict:
"""parse_args.
set file path.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="set ncfile.", type=str)
p = parser.parse_args()
args = {"file": p.file}
return args
def output_name(ncfile: str, isobaric_surface: int) -> str:
"""output_name.
Args:
ncfile (str): ncfile
isobaric_surface (int): isobaric_surface
Returns:
str:
"""
date_time = fetchtime.fetch_time(ncfile)
outname = (date_time + "_" + str(isobaric_surface))
return outname
def main():
"""main.
"""
args = parse_args()
meteo_tool = meteotool.MeteoTools(args["file"])
lat, lon = meteo_tool.get_lat_lon()
isobaric_surface = (850, 500, 300)
#label_upper = (30, 0)
#lebel_min = (-30, -60)
for i, pressure in enumerate(isobaric_surface):
# get parameter
temp_c = meteo_tool.get_parameter('t', isobaric_surface=pressure) - 273.15
rh = meteo_tool.get_parameter('r', isobaric_surface=pressure)
height_gpm = meteo_tool.get_parameter('gh', isobaric_surface=pressure)
u_wind = meteo_tool.get_parameter('u', isobaric_surface=pressure)
v_wind = meteo_tool.get_parameter('v', isobaric_surface=pressure)
jp_map = japanmap.JpMap()
jp_map.contour_plot(lon, lat, height_gpm)
#jp_map.shade_plot(lon, lat, temp_c,
# label="2m temperature ($^\circ$C)",
# color_bar_label_max=label_upper[i],
# color_bar_label_min=lebel_min[i],
# color_map_type="temperature",
# double_color_bar=True,)
jp_map.shade_plot(lon, lat, rh,
label="relative humidity (%)",
color_bar_label_max=100,
color_bar_label_min=0,
color_map_type="gray",
double_color_bar=False,)
jp_map.vector_plot(lon, lat, u_wind, v_wind,
vector_interval=5, vector_scale=10, mode="wind")
#jp_map.gray_shade(lon, lat, rh,
# label="relative humidity (%)",
# color_bar_label_max=100,
# color_bar_label_min=0,
# )
if pressure == 850:
jp_map.color_line(lon, lat, temp_c, line_value=-6, color='#0000ff')
if pressure == 500:
jp_map.color_line(lon, lat, temp_c, line_value=-36, color='#b22222')
outname = output_name(args["file"], pressure)
print(outname)
jp_map.save_fig(outname, str(pressure) + "hPa")
if __name__ == "__main__":
main()
| 3.203125 | 3 |
Subsets and Splits