max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
blargh/engine/storage/pg/pg_storage.py | johny-b/blargh | 0 | 12799451 | <reponame>johny-b/blargh
'''
CURRENT ASSUMPTIONS:
* all tables are in a single schema
* tables have the same names as resources
'''
from ..base_storage import BaseStorage
from blargh.engine import dm
from .query import Query
from .... import exceptions
import psycopg2
def capture_psycopg_error(f):
def wrapped(self, *args, **kwargs):
def diag_2_msg(diag):
# print(diag.message_primary)
return diag.message_primary
# return "{}\n{}".format(diag.message_primary, diag.message_detail)
try:
return f(self, *args, **kwargs)
except psycopg2.extensions.TransactionRollbackError as e:
# Either transaction was not serializable, or some deadlock was detected.
# Whatever happened, it makes sense to run this operation again.
raise exceptions.TransactionConflictRetriable()
except psycopg2.InterfaceError as e:
raise exceptions.e500(diag_2_msg(e.diag))
except psycopg2.Error as e:
raise exceptions.e400(diag_2_msg(e.diag))
return wrapped
class PGStorage(BaseStorage):
def __init__(self, conn, schema, query_cls=Query):
# This construction is purely to avoid wrapping __init__
self._true_init(conn, schema, query_cls)
@capture_psycopg_error
def _true_init(self, conn, schema, query_cls):
# Modify connection
if conn.status is not psycopg2.extensions.STATUS_READY:
conn.commit()
conn.set_session(isolation_level='SERIALIZABLE', autocommit=False)
conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''')
self._conn = conn
self._schema = schema
# To initialize query instance we need list of all primary keys. Those are available
# in the data model, but it seems a good idea to avoid passing data model directly to storage.
# Instead, self._query is built in lazy way, when needed - and that should be after engine
# was set up, so the data model is available in dm() function.
self._query_cls = query_cls
self._query = None
def _q(self):
if self._query is None:
self._query = self._query_cls(self._conn, self._schema,
{o.name: o.pkey_field().name for o in dm().objects().values()})
return self._query
# PUBLIC INTERFACE
@capture_psycopg_error
def save(self, instance):
# If we got here, instance.changed() is True, but all changes could be made
# on "virtual" columns (rel fields stored on the other side). In such case,
# nothing is saved, because no database columns changed.
table_columns = self._q().table_columns(instance.model.name)
changed_columns = [f.name for f in instance.changed_fields if f.name in table_columns]
if not changed_columns:
return
# Create representation
data = self._write_repr(instance)
# Save new value
name = instance.model.name
self._q().upsert(name, data)
def _write_repr(self, instance):
'''
Returns INSTANCE representation including all columns that will be written to the database.
This means
* all values other than None
* None values, if they were explicitly set (by INSTANCE.update() -> they are in INSTANCE.changed_fields
'''
# 1. Create dictionary with all columns that should be written to the database
data = {}
for field, val in instance.field_values():
if not field.stored():
continue
# If val.stored() is None it should be written only if field changed.
# This way we distinguish None fields that were never set before (and might be set to
# a different value by database default) from updated fields set to None.
if val.stored() is None and field not in instance.changed_fields:
continue
data[field.name] = val.stored()
# 2. Add primary key value (if this is a fresh instance, it is already in data)
pkey_name = instance.model.pkey_field().name
data[pkey_name] = instance.id()
# 3. Remove keys not matching database columns
clean_data = self._remove_virtual_columns(instance.model.name, data)
return clean_data
@capture_psycopg_error
def load(self, name, id_):
return self.load_many(name, [id_])[0]
@capture_psycopg_error
def load_many(self, name, ids):
if not ids:
return []
# Determine column name
pkey_name = dm().object(name).pkey_field().name
stored_data = self._select_objects(name, {pkey_name: ids})
if len(stored_data) != len(ids):
got_ids = [d[pkey_name] for d in stored_data]
missing_ids = [id_ for id_ in ids if id_ not in got_ids]
raise exceptions.e404(object_name=name, object_id=missing_ids[0])
full_data = self._add_virtual_columns(name, stored_data)
return full_data
@capture_psycopg_error
def begin(self):
# All necesary things were set in __init__(autocommit, deferrable constraints),
# so begin does nothing
pass
@capture_psycopg_error
def commit(self):
self._conn.commit()
@capture_psycopg_error
def rollback(self):
self._conn.rollback()
@capture_psycopg_error
def delete(self, name, id_):
self._q().delete(name, id_)
@capture_psycopg_error
def selected_ids(self, this_name, wr, sort, limit):
'''
Return IDs from table NAME matching WR.
SORT and LIMIT are ignored (storages are allwed to ignore those parameters, they are applied
later in Enigne.get).
HOW IT SHOULD BE DONE
1. WR is interpreted as WHERE
2. SORT becomes ORDER BY
3. LIMIT becomes LIMIT
and everything is processed in a single query.
That would be easy if we assumed that all REL fields have information stored in THIS_NAME table
but unfortunately REL field could be stored on any table, so instead
of WHEREs we might get some JOINS and this becomes more complicated.
HOW IT IS CURRENLY DONE
1. WR is split into two parts:
* one select for THIS_NAME table with all possible WHEREs
* one select for each joined table with REL field stored on the other side
2. Intersection of IDs from all selects is returned
3. SORT and LIMIT are ignored. SORT is ignored because there is no way of implementing it
different from both:
* HOW IT SHOULD BE DONE above
* sorting in Engine.get
and LIMIT is ignored because SORTing first is necesary.
'''
model = dm().object(this_name)
# First, split to parts
this_table_wr = {}
other_selects = []
for key, val in wr.items():
if key in self._q().table_columns(this_name):
this_table_wr[key] = val
else:
field = model.field(key)
other_name = field.stores.name
other_field_name = field.other.name
other_pkey_name = dm().object(other_name).pkey_field().name
other_selects.append((other_name, other_field_name, {other_pkey_name: val}))
# List of sets of ids, to be intersected later
sets_of_ids = []
# This table ids
this_table_objects = self._select_objects(this_name, this_table_wr)
this_pkey_name = model.pkey_field().name
sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects]))
# Other tables ids
for other_name, other_fk_name, other_table_wr in other_selects:
other_table_objects = self._select_objects(other_name, other_table_wr)
sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects]))
# Final ids
return sorted(set.intersection(*sets_of_ids))
@capture_psycopg_error
def _select_objects(self, name, wr):
'''
WR containst key-val pairs matching columns in table NAME.
List of dictionaries from table NAME is returned.
'''
return self._q().select(name, wr)
@capture_psycopg_error
def next_id(self, name):
'''
If NAME primary key column has default value, it is returned.
This works well with
* nextval(sequence)
* any simmilar user-defined function
If there is no default, an exception is raised. This might change and one
day we'll look for the biggest current ID and add 1.
NOTE: Any value returned by any generator might be already taken, if client set it
in an explicit way (probably via PUT). Generator is called repeatedly, until we
find a non-duplicated value. This might take long, if there were many PUT's,
but next time, it will probably be fast (if nextval(sequence) is used).
Also:
* if generator returns twice the same value, exception is raised
* maybe this could be done better? Note - we want to handle also other than nextval() defaults,
i.e. dependant on now().
'''
pkey_name = dm().object(name).pkey_field().name
default_expr = self._q().default_pkey_expr(name, pkey_name)
if default_expr is None:
raise exceptions.ProgrammingError("Unknown default pkey value for {}".format(name))
old_val = None
while True:
cursor = self._conn.cursor()
cursor.execute("SELECT {}".format(default_expr))
val = cursor.fetchone()[0]
if self._select_objects(name, {pkey_name: val}):
if old_val == val:
raise exceptions.ProgrammingError('Pkey value generator returned twice the same value. \
Table: {}, val: {}'.format(name, val))
else:
old_val = val
else:
return val
@capture_psycopg_error
def data(self):
d = {}
for name, obj in dm().objects().items():
d[name] = self._q().dump_table(name, obj.pkey_field().name)
return d
# PRIVATE METHODS
def _remove_virtual_columns(self, name, data):
'''
DATA contains all "possible" column values.
Some of those need to be written to the database, but other are
redundat (i.e. if we have relation parent-child, probably child table has
something like 'parent_id', but parent table has no 'children' column, so
it might not be written) and they need to be removed now.
This operation should reverse _add_virtual_columns.
'''
clean_data = {}
for key, val in data.items():
if key in self._q().table_columns(name):
clean_data[key] = val
return clean_data
def _add_virtual_columns(self, this_name, data):
'''
DATA contains only values stored in table NAME.
We need to fill relationship fields based on other tables.
I.e. if we have parent-child relationship probably child table has
'parent_id', and parent has no 'children' column,
so if NAME == 'parent' we need to add 'children' key in data,
based on relationship fields.
This operation should reverse _remove_virtual_columns.
'''
# Determine IDs
pkey_name = dm().object(this_name).pkey_field().name
ids = [d[pkey_name] for d in data]
for field in dm().object(this_name).fields():
name = field.name
if field.rel and name not in data[0]:
other_name = field.stores.name
other_field_name = field.other.name
all_related = self._select_objects(other_name, {other_field_name: ids})
related_pkey_name = dm().object(other_name).pkey_field().name
for el in data:
this_related = [x for x in all_related if x[other_field_name] == el[pkey_name]]
related_ids = [x[related_pkey_name] for x in this_related]
if field.multi:
el[name] = related_ids
else:
el[name] = related_ids[0] if related_ids else None
return data
| 1.921875 | 2 |
python-algorithm/leetcode/problem_981.py | isudox/nerd-algorithm | 5 | 12799452 | """981. Time Based Key-Value Store
https://leetcode.com/problems/time-based-key-value-store/
Create a timebased key-value store class TimeMap, that supports two operations.
1. set(string key, string value, int timestamp)
Stores the key and value, along with the given timestamp.
2. get(string key, int timestamp)
Returns a value such that set(key, value, timestamp_prev) was called previously,
with timestamp_prev <= timestamp.
If there are multiple such values, it returns the one with the largest
timestamp_prev.
If there are no values, it returns the empty string ("").
Example 1:
Input: inputs = ["TimeMap","set","get","get","set","get","get"], inputs = [[],["foo","bar",1],["foo",1],["foo",3],["foo","bar2",4],["foo",4],["foo",5]]
Output: [null,null,"bar","bar",null,"bar2","bar2"]
Explanation:
TimeMap kv;
kv.set("foo", "bar", 1); // store the key "foo" and value "bar" along with timestamp = 1
kv.get("foo", 1); // output "bar"
kv.get("foo", 3); // output "bar" since there is no value corresponding to foo at timestamp 3 and timestamp 2, then the only value is at timestamp 1 ie "bar"
kv.set("foo", "bar2", 4);
kv.get("foo", 4); // output "bar2"
kv.get("foo", 5); //output "bar2"
Example 2:
Input: inputs = ["TimeMap","set","set","get","get","get","get","get"], inputs = [[],["love","high",10],["love","low",20],["love",5],["love",10],["love",15],["love",20],["love",25]]
Output: [null,null,null,"","high","high","low","low"]
Note:
All key/value strings are lowercase.
All key/value strings have length in the range [1, 100]
The timestamps for all TimeMap.set operations are strictly increasing.
1 <= timestamp <= 10^7
TimeMap.set and TimeMap.get functions will be called a total of 120000 times (combined) per test case.
"""
class TimeMap:
def __init__(self):
"""
Initialize your data structure here.
"""
self.dic = {}
def set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None':
if key in self.dic:
self.dic[key].append({'v': value, 't': timestamp})
else:
self.dic[key] = [{'v': value, 't': timestamp}]
def get(self, key: 'str', timestamp: 'int') -> 'str':
if key in self.dic:
for kv in reversed(self.dic[key]):
if timestamp >= kv['t']:
return kv['v']
return ""
else:
return ""
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
| 3.6875 | 4 |
chronologer/vega.py | dandavison/chronologer | 165 | 12799453 | import json
import os
from jinja2 import Template
from chronologer.config import config
def write_html():
html_file = os.path.join(os.path.dirname(__file__), "templates", "index.html")
with open(html_file) as fp:
html_template = Template(fp.read())
if not config.dry_run:
boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2)
with open(config.html_output_file, "w") as fp:
fp.write(html_template.render(boxplot_spec=boxplot_spec))
def _get_boxplot_spec():
with open(config.combined_benchmark_file) as fp:
values = json.load(fp)
return {
"$schema": "https://vega.github.io/schema/vega-lite/v3.json",
"data": {"values": values},
"mark": {"type": "boxplot", "extent": "min-max", "size": 5},
"width": 1400,
"height": 500,
"encoding": {
"y": {"field": "time", "type": "quantitative", "axis": {"title": "Time"}},
"x": {
"field": "commit",
"type": "ordinal",
"axis": {"title": "Commit", "labels": False, "ticks": False},
},
"tooltip": {"field": "message", "type": "ordinal", "aggregate": "min"},
},
}
| 2.375 | 2 |
pyaaf/__init__.py | markreidvfx/pyaaf_old | 2 | 12799454 | from core import *
import core
def Initialize():
"""
find libcom-api and initialize
"""
import os
import sys
dirname = os.path.dirname(__file__)
ext = '.so'
if sys.platform == 'darwin':
ext = '.dylib'
elif sys.platform.startswith("win"):
ext = '.dll'
AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext)
os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI)
return core.AxInit()
_AxInit = Initialize()
from util import __AxWrap
__AxWrap(globals())
from open import open
from util import Ax | 2.25 | 2 |
utils/yaml_utils.py | TestOpsFeng/selenium_framework | 7 | 12799455 | import yaml
import os
fileNamePath = os.path.split(os.path.realpath(__file__))[0]
dir = os.path.join(fileNamePath,'../conf')
def get(file_name,*keys,file_path=dir):
yamlPath = os.path.join(file_path, file_name)
file = open(yamlPath, 'r', encoding='utf-8')
config = yaml.load(file)
for key in keys:
config = config[key]
return config
if __name__ == '__main__':
# wait_time = yaml_utils.get("constant.yaml", "wait_elements_time")
# driver = get("host","url_regerister")
# driver2 = get_url("constant.yaml","host")
driver2 = get("constant.yaml","test1","test2","test33")
print(driver2)
# a = (1,2)
# print(type(a)) | 2.625 | 3 |
checkov/terraform/checks/resource/oci/IAMPasswordLength.py | jamesholland-uk/checkov | 1 | 12799456 | <reponame>jamesholland-uk/checkov
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class IAMPasswordLength(BaseResourceCheck):
def __init__(self):
name = "OCI IAM password policy for local (non-federated) users has a minimum length of 14 characters"
id = "CKV_OCI_18"
supported_resources = ['oci_identity_authentication_policy']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
if 'password_policy' in conf.keys():
self.evaluated_keys = ["password_policy"]
rules = conf.get("password_policy")[0]
if 'minimum_password_length' in rules:
passwordlength = rules.get("minimum_password_length")
if isinstance(passwordlength[0], int) and passwordlength[0] < 14:
self.evaluated_keys = ["password_policy/minimum_password_length"]
return CheckResult.FAILED
return CheckResult.PASSED
return CheckResult.FAILED
return CheckResult.FAILED
check = IAMPasswordLength()
| 2.25 | 2 |
run.py | ilyavinn/geppetto | 4 | 12799457 | <reponame>ilyavinn/geppetto
"""
The MIT License (MIT)
Copyright (c) <NAME>, Inc. 2015.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import argparse
import textwrap
import traceback
from common.common import report, capture_exception_and_abort
from common.geppetto import Geppetto
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--test_file', required=True, help="Test file.")
parser.add_argument('-c', '--config', required=True, help="Configuration file.")
parser.add_argument('-e', '--email', help="Email to send results to.")
return parser.parse_args()
def do_welcome():
title = """
_____ _ _
/ ____| | | | |
| | __ ___ _ __ _ __ ___| |_| |_ ____
| | |_ |/ _ \ '_ \| '_ \ / _ \ __| __/ _ |
| |__| | __/ |_) | |_) | __/ |_| || (_) |
\_____|\___| .__/| .__/ \___|\__|\__\___/
| | | |
|_| |_| The Cloud Maestro
"""
license = """THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
license = '%s\n%s\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' * 70,)
usage = """ """
print(title)
print(license)
print(usage)
def main():
args = parse_args()
# Import the test file.
try:
test_file_name = args.test_file
test_file = test_file_name[:-3].replace('/', '.')
mod = __import__(test_file, fromlist=['TestRun'])
TestRun = getattr(mod, 'TestRun')
except:
report('Unable to load TestRun() from file: %s' % args.test_file, 'critical', no_date=True)
print(traceback.print_exc())
sys.exit(1)
# Import the config file.
try:
config_file_name = args.config
config_file = config_file_name[:-3].replace('/', '.')
mod = __import__(config_file, fromlist=['CONFIG_DICT'])
config_dict = getattr(mod, 'CONFIG_DICT')
except:
report("Unable to import the config file: %s" % args.config, 'critical', no_date=True)
print(traceback.print_exc())
sys.exit(1)
do_welcome()
class GeppettoExecutableTest(TestRun):
def __init__(self):
Geppetto.__init__(self)
TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name)
@capture_exception_and_abort
def run(self):
TestRun.run(self)
g = GeppettoExecutableTest()
g.run()
if __name__ == '__main__':
main()
| 2.015625 | 2 |
test/test_expansion.py | zachjweiner/pystella | 14 | 12799458 | __copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pystella as ps
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("Stepper", [ps.RungeKutta4, ps.LowStorageRK54])
def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False):
if proc_shape != (1, 1, 1):
pytest.skip("test expansion only on one rank")
def sol(w, t):
x = (1 + 3*w)
return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x)
from pystella.step import LowStorageRKStepper
is_low_storage = LowStorageRKStepper in Stepper.__bases__
for w in [0, 1/3, 1/2, 1, -1/4]:
def energy(a):
return a**(-3-3*w)
def pressure(a):
return w * energy(a)
t = 0
dt = .005
expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi))
while t <= 10. - dt:
for s in range(expand.stepper.num_stages):
slc = (0) if is_low_storage else (0 if s == 0 else 1)
expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt)
t += dt
slc = () if is_low_storage else (0)
order = expand.stepper.expected_order
rtol = dt**order
print(order,
w,
expand.a[slc]/sol(w, t) - 1,
expand.constraint(energy(expand.a[slc])))
assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \
f"FLRW solution inaccurate for {w=}"
assert expand.constraint(energy(expand.a[slc])) < rtol, \
f"FLRW solution disobeying constraint for {w=}"
if __name__ == "__main__":
from common import parser
args = parser.parse_args()
from pystella.step import all_steppers
for stepper in all_steppers[-5:]:
test_expansion(
None, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing,
Stepper=stepper,
)
| 1.726563 | 2 |
mergify_engine/tests/unit/test_subscription.py | Divine-D/mergify-engine | 1 | 12799459 | import pytest
from mergify_engine import subscription
def test_init():
subscription.Subscription(
123, True, "friend", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY})
)
def test_dict():
owner_id = 1234
sub = subscription.Subscription(
owner_id,
True,
"friend",
{},
frozenset({subscription.Features.PRIVATE_REPOSITORY}),
)
assert sub.from_dict(owner_id, sub.to_dict()) == sub
@pytest.mark.parametrize(
"features",
(
{},
{subscription.Features.PRIVATE_REPOSITORY},
{
subscription.Features.PRIVATE_REPOSITORY,
subscription.Features.PRIORITY_QUEUES,
},
),
)
@pytest.mark.asyncio
async def test_save_sub(features):
owner_id = 1234
sub = subscription.Subscription(owner_id, True, "friend", {}, frozenset(features))
await sub.save_subscription_to_cache()
rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id)
assert rsub == sub
@pytest.mark.asyncio
async def test_unknown_sub():
sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189)
assert sub is None
def test_from_dict_unknown_features():
assert subscription.Subscription.from_dict(
123,
{
"subscription_active": True,
"subscription_reason": "friend",
"tokens": {},
"features": ["unknown feature"],
},
) == subscription.Subscription(
123,
True,
"friend",
{},
frozenset(),
)
def test_active_feature():
sub = subscription.Subscription(
123,
True,
"friend",
{},
frozenset(),
)
assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False
sub = subscription.Subscription(
123,
False,
"friend",
{},
frozenset([subscription.Features.PRIORITY_QUEUES]),
)
assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False
sub = subscription.Subscription(
123,
True,
"friend",
{},
frozenset([subscription.Features.PRIORITY_QUEUES]),
)
assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is True
| 2.046875 | 2 |
src/ingest-pipeline/airflow/plugins/hubmap_api/__init__.py | AustinHartman/ingest-pipeline | 6 | 12799460 | from airflow.plugins_manager import AirflowPlugin
from hubmap_api.manager import aav1 as hubmap_api_admin_v1
from hubmap_api.manager import aav2 as hubmap_api_admin_v2
from hubmap_api.manager import aav3 as hubmap_api_admin_v3
from hubmap_api.manager import aav4 as hubmap_api_admin_v4
from hubmap_api.manager import aav5 as hubmap_api_admin_v5
from hubmap_api.manager import aav6 as hubmap_api_admin_v6
from hubmap_api.manager import blueprint as hubmap_api_blueprint
class AirflowHuBMAPPlugin(AirflowPlugin):
name = "hubmap_api"
operators = []
sensors = []
hooks = []
executors = []
macros = []
admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3,
hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6]
flask_blueprints = [hubmap_api_blueprint]
menu_links = []
appbuilder_views = []
appbuilder_menu_items = []
global_operator_extra_links = []
| 1.460938 | 1 |
giggleliu/tba/hgen/setup.py | Lynn-015/Test_01 | 2 | 12799461 | '''
Setup file for Operator and Hamiltonain Generators.
'''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config=Configuration('hgen',parent_package,top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 1.40625 | 1 |
hoodapp/migrations/0014_auto_20220110_1119.py | lizgi/Hood-Watch | 0 | 12799462 | # Generated by Django 3.2.9 on 2022-01-10 08:19
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodapp', '0013_auto_20220110_1102'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-pk']},
),
migrations.RemoveField(
model_name='post',
name='image',
),
migrations.AddField(
model_name='business',
name='image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'),
),
]
| 1.789063 | 2 |
src/googleapis/codegen/filesys/package_writer_foundry.py | aiuto/google-apis-client-generator | 178 | 12799463 | <reponame>aiuto/google-apis-client-generator
#!/usr/bin/python2.7
"""Foundary for getting a package writer."""
from googleapis.codegen.filesys import filesystem_library_package
from googleapis.codegen.filesys import single_file_library_package
from googleapis.codegen.filesys import tar_library_package
from googleapis.codegen.filesys import zip_library_package
def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'):
"""Get an output writer for a package."""
if not (output_dir or output_file):
raise ValueError(
'GetPackageWriter requires either output_dir or output_file')
if output_dir and output_file:
raise ValueError(
'GetPackageWriter requires only one of output_dir or output_file')
if output_dir:
package_writer = filesystem_library_package.FilesystemLibraryPackage(
output_dir)
else:
out = open(output_file, 'w')
if output_format == 'tgz':
package_writer = tar_library_package.TarLibraryPackage(out)
elif output_format == 'tar':
package_writer = tar_library_package.TarLibraryPackage(out,
compress=False)
elif output_format == 'txt':
package_writer = single_file_library_package.SingleFileLibraryPackage(out)
else:
package_writer = zip_library_package.ZipLibraryPackage(out)
return package_writer
| 2.296875 | 2 |
PYTHON/generateQRcode.py | kunalmpandey/open-source-contribution | 2 | 12799464 | <gh_stars>1-10
# Import QRCode from pyqrcode
import pyqrcode
import png
from pyqrcode import QRCode
print("WELCOME TO THE QR CODE GENERATION")
# Take input
name = input("Enter Name : ")
stream = input("Enter Stream : ")
collegename = input("Enter Name of College : ")
# String which represents the QR code
s = "Name : "+ name + "\nStream : " + stream +"\nCollege Name : " + collegename
# Generate QR code
url = pyqrcode.create(s)
# Create and save the png file naming "myqr.png"
url.png('myqr.png', scale = 6)
| 3.296875 | 3 |
utils/files_chain.py | syth0le/mat_mod_labs | 0 | 12799465 | from abc import ABCMeta, abstractmethod
from typing import Optional
import json
def error_catcher(method):
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except (AttributeError, ValueError):
return "File error: указан неверный тип файла."
return wrapper
class AbstractHandler(metaclass=ABCMeta):
"""The Interface for handling requests."""
@abstractmethod
def set_successor(self, successor):
"""Set the next handler in the chain"""
pass
@abstractmethod
def handle(self, file) -> Optional[str]:
"""Handle the event"""
pass
class JSON(AbstractHandler):
def __init__(self):
self._successor = None
self._temp: list = list()
def set_successor(self, successor):
self._successor = successor
return successor
@error_catcher
def handle(self, FILE):
"""Handle the *.json file event"""
file_name, file_ext = str(FILE).split(".")
if file_ext == self.__class__.__name__.lower():
with open(FILE, "r") as f:
self.deserialization(json.load(f))
return self.getter()
else:
return self._successor.handle(FILE)
def deserialization(self, data):
length = len(data['x'])
for i in range(length):
x_temp = list(map(float, data['x'][i]))
y_temp = list(map(float, data['y'][i]))
temp = [x_temp, y_temp]
self._temp.append(temp)
def __repr__(self):
return f"{self.__class__.__name__}"
def getter(self):
return self._temp
class TXT(AbstractHandler):
def __init__(self):
self._successor = None
self._temp: list = list()
def set_successor(self, successor):
self._successor = successor
return successor
@error_catcher
def handle(self, FILE):
"""Handle the *.txt file event"""
file_name, file_ext = str(FILE).split(".")
if file_ext == self.__class__.__name__.lower():
with open(FILE, "r") as f:
for line in f.read().split('\n'):
reformat_line = line[1:-1].split('];[')
a = [list(map(float, elem.split(','))) for elem in reformat_line]
self._temp.append(a)
return self.getter()
else:
return self._successor.handle(FILE)
def __repr__(self):
return f"{self.__class__.__name__}"
def getter(self):
return self._temp
class CSV(AbstractHandler):
def __init__(self):
self._successor = None
self._temp: list = list()
def set_successor(self, successor):
self._successor = successor
return successor
@error_catcher
def handle(self, FILE):
"""Handle the *.csv file event"""
file_name, file_ext = str(FILE).split(".")
if file_ext == self.__class__.__name__.lower():
with open(FILE, "r") as f:
for line in f.read().split(',\n'):
reformat_line = line[1:-1].split('","')
a = [list(map(float, elem.split(','))) for elem in reformat_line]
self._temp.append(a)
return self.getter()
else:
return self._successor.handle(FILE)
def __repr__(self):
return f"{self.__class__.__name__}"
def getter(self):
return self._temp
class FilesChain:
def __init__(self):
self.chain1 = JSON()
self.chain2 = TXT()
self.chain3 = CSV()
# set the chain of responsibility
# The Client may compose chains once or
# the hadler can set them dynamically at
# handle time
self.chain1.set_successor(self.chain2).set_successor(self.chain3)
def client_code(self):
FILE = str(input("Input file name: "))
return self.chain1.handle(FILE)
| 3.25 | 3 |
benchmarks/toolkit/methods/utils.py | SergioRAgostinho/cvxpnpl | 51 | 12799466 | <filename>benchmarks/toolkit/methods/utils.py
from importlib import import_module
import numpy as np
# Dynamically import matlab
_matlab = None
_matlab_engine = None
try:
_matlab = import_module("matlab")
_matlab.engine = import_module("matlab.engine")
except ModuleNotFoundError:
pass
def init_matlab():
global _matlab_engine
if _matlab is None:
return None
if _matlab_engine is not None:
return _matlab_engine
# start the engine
print("Launching MATLAB Engine: ", end="", flush=True)
_matlab_engine = _matlab.engine.start_matlab()
print("DONE", flush=True)
return _matlab_engine
class VakhitovHelper:
"""Utility functions to prepare inputs for what is requested
by functions in Vakhitov's pnpl toolbox. We adopt the same naming
convention the author used.
"""
def lines(line_2d, line_3d, K):
# set up bearing vectors
bear = np.linalg.solve(
K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d)))))
).T[:, :-1]
bear = bear.reshape((-1, 2, 2))
# Split points into start and end points
xs = _matlab.double(bear[:, 0, :].T.tolist())
xe = _matlab.double(bear[:, 1, :].T.tolist())
Xs = _matlab.double(line_3d[:, 0, :].T.tolist())
Xe = _matlab.double((line_3d[:, 1, :]).T.tolist())
return xs, xe, Xs, Xe
def points(pts_2d, pts_3d, K):
# set up bearing vectors
bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d))))))
# Rename vars to PnPL convention
xxn = _matlab.double(bear[:-1].tolist())
XXw = _matlab.double(pts_3d.T.tolist())
return xxn, XXw
| 2.390625 | 2 |
ex5.py | ppedraum/infosatc-lp-avaliativo-02 | 0 | 12799467 | <reponame>ppedraum/infosatc-lp-avaliativo-02
#5
lista = ["laranja", "banana", "maçã", "goiaba", "romã"]
if "laranja" in lista:
print("Laranja está na lista.")
else:
print("Laranja não está na lista")
| 3.6875 | 4 |
examples/amac/__init__.py | acracker/ruia | 0 | 12799468 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-01-17 13:48
# @Author : pang
# @File : __init__.py.py
# @Software: PyCharm
| 1.164063 | 1 |
garage/envs/dm_control/dm_control_viewer.py | shadiakiki1986/garage | 3 | 12799469 | <reponame>shadiakiki1986/garage<gh_stars>1-10
import numpy as np
import pygame
CAPTION = "dm_control viewer"
class DmControlViewer:
def __init__(self):
pygame.init()
pygame.display.set_caption(CAPTION)
self.screen = None
def loop_once(self, image):
image = np.swapaxes(image, 0, 1)
if not self.screen:
self.screen = pygame.display.set_mode((image.shape[0],
image.shape[1]))
pygame.surfarray.blit_array(self.screen, image)
pygame.display.flip()
def finish(self):
pygame.quit()
| 2.84375 | 3 |
topboard_sdk/api/topboard/update_comment_pb2.py | easyopsapis/easyops-api-python | 5 | 12799470 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_comment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2
from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2
from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_comment.proto',
package='topboard',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x14update_comment.proto\x12\x08topboard\x1a)topboard_sdk/model/topboard/comment.proto\x1a-topboard_sdk/model/topboard/issue_basic.proto\x1a\"topboard_sdk/model/cmdb/user.proto\"M\n\x14UpdateCommentRequest\x12\x11\n\tcommentID\x18\x01 \x01(\t\x12\"\n\x07\x63omment\x18\x02 \x01(\x0b\x32\x11.topboard.Comment\"q\n\x1cUpdateCommentResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x1f\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x11.topboard.Commentb\x06proto3')
,
dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,])
_UPDATECOMMENTREQUEST = _descriptor.Descriptor(
name='UpdateCommentRequest',
full_name='topboard.UpdateCommentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=237,
)
_UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateCommentResponseWrapper',
full_name='topboard.UpdateCommentResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=352,
)
_UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT
_UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT
DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST
DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECOMMENTREQUEST,
'__module__' : 'update_comment_pb2'
# @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest)
})
_sym_db.RegisterMessage(UpdateCommentRequest)
UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER,
'__module__' : 'update_comment_pb2'
# @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper)
})
_sym_db.RegisterMessage(UpdateCommentResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.21875 | 1 |
setup.py | AZdet/causal-infogan | 0 | 12799471 | from setuptools import setup
import numpy
setup(
name='CIGAN',
version='0.2dev',
packages=['vpa'],
license='MIT License',
include_dirs=[numpy.get_include(),],
) | 1.132813 | 1 |
apps/CUP3D_LES_HIT/computeTrainedSpectraErrors.py | slitvinov/smarties | 0 | 12799472 | #!/usr/bin/env python3
import re, argparse, numpy as np, glob, os
#from sklearn.neighbors.kde import KernelDensity
import matplotlib.pyplot as plt
from extractTargetFilesNonDim import epsNuFromRe
from extractTargetFilesNonDim import getAllData
from computeSpectraNonDim import readAllSpectra
colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99']
colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999']
#colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928']
#colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da']
def findDirectory(runspath, re, token):
retoken = 'RE%03d' % re
alldirs = glob.glob(runspath + '/*')
for dirn in alldirs:
if retoken not in dirn: continue
if token not in dirn: continue
return dirn
assert(False, 're-token combo not found')
def main_integral(runspath, target, REs, tokens, labels):
nBins = 2 * 16//2 - 1
modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi
plt.figure()
#REs = findAllParams(path)
nRes = len(REs)
axes, lines = [], []
for j in range(nRes):
axes += [ plt.subplot(1, nRes, j+1) ]
for j in range(nRes):
RE = REs[j]
# read target file
logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE])
for i in range(len(tokens)):
eps, nu = epsNuFromRe(RE)
dirn = findDirectory(runspath, RE, tokens[i])
runData = getAllData(dirn, eps, nu, nBins, fSkip=1)
logE = np.log(runData['spectra'])
avgLogSpec = np.mean(logE, axis=0)
assert(avgLogSpec.size == nBins)
LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel()
print(LL.shape)
p = axes[j].plot(LL, modes, label=labels[i], color=colors[i])
#p = axes[j].plot(LL, modes, color=colors[i])
if j == 0: lines += [p]
#stdLogSpec = np.std(logE, axis=0)
#covLogSpec = np.cov(logE, rowvar=False)
#print(covLogSpec.shape)
axes[0].set_ylabel(r'$k$')
for j in range(nRes):
axes[j].set_title(r'$Re_\lambda$ = %d' % REs[j])
#axes[j].set_xscale("log")
axes[j].set_ylim([1, 15])
axes[j].grid()
axes[j].set_xlabel(r'$\frac{\log E(k) - \mu_{\log E(k)}}{\sigma_{\log E(k)}}$')
for j in range(1,nRes): axes[j].set_yticklabels([])
#axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0)
assert(len(lines) == len(labels))
#axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5))
axes[0].legend(bbox_to_anchor=(0.5, 0.5))
plt.tight_layout()
plt.show()
#axes[0].legend(loc='lower left')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Compute a target file for RL agent from DNS data.")
parser.add_argument('--target', help="Path to target files directory")
parser.add_argument('--tokens', nargs='+', help="Text token distinguishing each series of runs")
parser.add_argument('--res', nargs='+', type=int, help="Reynolds numbers")
parser.add_argument('--labels', nargs='+', help="Plot labels to assiciate to tokens")
parser.add_argument('--runspath', help="Plot labels to assiciate to tokens")
args = parser.parse_args()
assert(len(args.tokens) == len(args.labels))
main_integral(args.runspath, args.target, args.res, args.tokens, args.labels)
| 1.953125 | 2 |
python/lastfactorialdigit.py | twirrim/kattis | 0 | 12799473 | #!/usr/bin/env python3
""" An attempt to solve the Last Factorial Digit """
import sys
# This is totally wrong, but given N maxes out at 10, and anything after 5 the last digit is 0,
# this is likely cheaper and faster
result_dict = {1: 1,
2: 2,
3: 6,
4: 4}
dont_care = sys.stdin.readline()
for line in sys.stdin.readlines():
number = int(line.rstrip())
if number >= 5:
print(0)
else:
print(result_dict[number])
| 3.4375 | 3 |
ptptest/pytz.py | chrisy/ptptest | 3 | 12799474 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright (c) 2014 <NAME> <<EMAIL>>
#
"""Pytz dummy module"""
# This empty module with the name pytz.py fools
# bson.py into loading; we then provide the only
# pytz-reference used by bson - 'utc'
from datetime import datetime
from datetime import tzinfo
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
| 2.359375 | 2 |
apps/execution_node_editor/main.py | beyse/NodeEditor | 3 | 12799475 | import os, sys
from PyQt5 import QtCore, QtGui
from qtpy.QtWidgets import QApplication
import ctypes
from sys import platform
sys.path.insert(0, os.path.join( os.path.dirname(__file__), "..", ".." ))
from window import ExecutionNodeEditorWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
exe_path = os.path.dirname(os.path.realpath(sys.argv[0]))
assets_dir = os.path.join(exe_path, 'assets')
for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')):
for f in filenames:
font_id = QtGui.QFontDatabase.addApplicationFont(f)
if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1:
print("Could not load font")
sys.exit(-1)
# print(QStyleFactory.keys())
app.setStyle('Fusion')
app_icon = QtGui.QIcon()
app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16))
app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24))
app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32))
app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48))
app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64))
app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128))
app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256))
app.setWindowIcon(app_icon)
if platform == "win32":
# Windows...
#This will make sure that the app icon is set in the taskbar on windows
# See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105
myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
wnd = ExecutionNodeEditorWindow()
wnd.setWindowIcon(app_icon)
wnd.show()
wnd.actNew.trigger()
if len(sys.argv) == 2:
wnd.openFile(sys.argv[1])
sys.exit(app.exec_())
| 2.1875 | 2 |
pyunsplash/tests/test_users.py | mmangione/pyunsplash | 40 | 12799476 | <filename>pyunsplash/tests/test_users.py
###############################################################################
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# File: test_users.py
#
# Author: <NAME> <<EMAIL>>
# Date: 14 Dec 2016
# Purpose: users unit tests
#
# Revision: 1
# Comment: What's new in revision 1
# use local resources
#
###############################################################################
import responses
import json
import os
from pyunsplash import PyUnsplash
from pyunsplash.src.settings import API_ROOT
api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID'
class TestUsers:
# TODO: avoid code duplication
# Need to workout how to combine responses.activate so as to avoid
# code duplication, as the testcases are pretty much the same for all
# TOXINIDIR comes from tox.ini
root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None)
store_mapping = {
'salvoventura':
os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']),
'salvoventura_statistics':
os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json'])
}
@responses.activate
def test_stats_total(self):
type = 'salvoventura'
resource_filepath = self.store_mapping[type]
stored_response = json.loads(open(resource_filepath).read())
responses.add(
responses.GET,
'{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class always inits without query params
json=stored_response.get('body'),
status=stored_response.get('status_code'),
content_type='application/json',
adding_headers=stored_response.get('headers')
)
pu_obj = PyUnsplash(api_key=api_key)
this_user = pu_obj.user(source=type)
print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos)
# TODO: collections, photos and users from the user object
@responses.activate
def test_user_stats(self):
username = 'salvoventura'
# Add the user api response
type = 'salvoventura'
resource_filepath = self.store_mapping[type]
stored_response = json.loads(open(resource_filepath).read())
responses.add(
responses.GET,
'{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class always inits without query params
json=stored_response.get('body'),
status=stored_response.get('status_code'),
content_type='application/json',
adding_headers=stored_response.get('headers')
)
# Add the user statistics api response
type = 'salvoventura_statistics'
resource_filepath = self.store_mapping[type]
stored_response = json.loads(open(resource_filepath).read())
responses.add(
responses.GET,
'{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class always inits without query params
json=stored_response.get('body'),
status=stored_response.get('status_code'),
content_type='application/json',
adding_headers=stored_response.get('headers')
)
pu_obj = PyUnsplash(api_key=api_key)
this_user = pu_obj.user(source=username) # create a User object
this_user_stats = this_user.statistics() # fetch a UserStatistics object
print(this_user_stats.downloads.get('total'), this_user_stats.views.get('total'), this_user_stats.likes.get('total'))
| 2.15625 | 2 |
ex097.py | honeyhugh/PythonCurso | 0 | 12799477 | <gh_stars>0
from math import trunc
def escreva(msg):
c = trunc(len(msg)/2)
print(f'{"-=":^}' * (c + 2))
print(f' {msg}')
print(f'{"-=":^}' * (c + 2))
# Programa Principal
n = input('Escreva uma mensagem: ')
escreva(n)
| 3.171875 | 3 |
app.py | yaniv-aknin/multiviz | 0 | 12799478 | <gh_stars>0
#!/usr/bin/env python
import subprocess
from flask import Flask, request
app = Flask(__name__)
class OptionGraph:
def __init__(self):
self.header = []
self.footer = []
self.sections = {}
self.current = self.header
def parse(self, filename):
def parse_label(line):
return line.strip().split()[1]
current = self.header
with open(filename) as handle:
for line in handle:
if '#*/' in line:
assert current is not self.header, 'stop before first section'
assert current is not self.footer, 'footer has no stop'
current = self.footer
continue
if '/*#' in line:
assert current is self.header or current is self.footer, 'nested section'
label = parse_label(line)
current = self.sections[label] = []
continue
current.append(line)
return self
def emit(self, query=()):
result = []
result.extend(self.header)
for label, section in self.sections.items():
if label in query:
result.extend(section)
result.extend(self.footer)
return "".join(result)
def list(self):
return self.sections
graph = OptionGraph().parse('graph.dot')
@app.route('/')
def index():
with open('index.html') as handle:
return handle.read()
@app.route('/sections')
def sections():
return {'sections': [{'section': section} for section in graph.list()]}
@app.route('/render')
def render():
query = request.args.getlist('sections')
dot = graph.emit(query).encode()
result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout
return result, 200, {'Content-Type': 'image/svg+xml'}
if __name__ == '__main__':
app.run(debug=True)
| 2.578125 | 3 |
config.py | dodo0822/demucs-frontend | 1 | 12799479 | from environs import Env
env = Env()
env.read_env()
db_host = env.str('DB_HOST', 'localhost')
db_port = env.int('DB_PORT', 27017) | 1.953125 | 2 |
easyquotation_enhance/__init__.py | veink-y/easyquotation_enhance | 5 | 12799480 | from .sina import SinaQuotation
from .tencent import TencentQuotation
from .helpers import update_stock_codes, stock_a_hour
__version__ = "0.0.0.1"
__author__ = "demonfinch"
| 1.304688 | 1 |
Python Files/ImageSizeEqualizer.py | suneel87/Online_Voting_System | 0 | 12799481 | import cv2
def image_equalize(imgA, imgB):
new_size = max(imgA.shape, imgB.shape)
new_imgA = cv2.resize(imgA, new_size)
new_imgB = cv2.resize(imgB, new_size)
return new_imgA, new_imgB
| 2.796875 | 3 |
bostaSDK/pickup/get/__init__.py | bostaapp/bosta-python | 0 | 12799482 | from .GetPickupDetailsRequest import GetPickupDetailsRequest
from .GetPickupDetailsResponse import GetPickupDetailsResponse
| 1.140625 | 1 |
kegs/views.py | akithegood/ohsiha2020 | 0 | 12799483 | <filename>kegs/views.py
from django.shortcuts import render, redirect, get_object_or_404
from kegs.forms import BeerForm, KegForm
from kegs.models import Beer, Keg
# Create your views here.
def keg_detail(request, pk):
beer_obj = Beer.objects.get(pk=pk)
keg_objs = Keg.objects.filter(beer_id=beer_obj.id)
context = {
'kegs': keg_objs,
'beers': beer_obj,
}
return render(request, 'keg_detail.html', context)
def beer_list(request, template_name='beer_list.html'):
beer = Beer.objects.all()
data = {}
data['object_list'] = beer
return render(request, template_name, data)
def keg_list(request, template_name='keg_list.html'):
keg = Keg.objects.all()
data = {}
data['object_list'] = keg
return render(request, template_name, data)
def beer_create(request, template_name='beer_form.html'):
form = BeerForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('kegs:beer_list')
return render(request, template_name, {'form':form})
def keg_create(request, template_name='keg_form.html'):
form = KegForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('kegs:keg_list')
return render(request, template_name, {'form':form})
def beer_update(request, pk, template_name='beer_form.html'):
beer = get_object_or_404(Beer, pk=pk)
form = BeerForm(request.POST or None, instance=beer)
if form.is_valid():
form.save()
return redirect('kegs:beer_list')
return render(request, template_name, {'form':form})
def keg_update(request, pk, template_name='keg_form.html'):
keg = get_object_or_404(Keg, pk=pk)
form = KegForm(request.POST or None, instance=keg)
if form.is_valid():
form.save()
return redirect('kegs:keg_list')
return render(request, template_name, {'form':form})
def beer_delete(request, pk, template_name='beer_confirm_delete.html'):
beer = get_object_or_404(Beer, pk=pk)
if request.method=='POST':
beer.delete()
return redirect('kegs:beer_list')
return render(request, template_name, {'object':beer})
def keg_delete(request, pk, template_name='keg_confirm_delete.html'):
keg = get_object_or_404(Keg, pk=pk)
if request.method=='POST':
keg.delete()
return redirect('kegs:keg_list')
return render(request, template_name, {'object':keg}) | 2.28125 | 2 |
time_cross_validation/TimeCV.py | rick12000/time-series-cross-validation | 0 | 12799484 | <reponame>rick12000/time-series-cross-validation
class TimeCV:
def __init__(self, X, train_sample_size = None, test_sample_size = None, step = 1):
#initiate variables:
self.X = X
self.train_sample_size = train_sample_size
self.test_sample_size = test_sample_size
self.step = step
if train_sample_size == None:
self.train_sample_size = max(1,round(len(X)/3))
if test_sample_size == None:
self.test_sample_size = max(1, round(len(X)/10))
#error handling:
if len(X) == 0:
raise IndexError("input array 'X' cannot have length zero.")
if len(X) == 1:
raise IndexError("input array 'X' cannot have length 1.")
if train_sample_size > len(X):
raise IndexError("train_sample_size cannot be larger than length of input variable (X).")
if test_sample_size > len(X):
raise IndexError("test_sample_size cannot be larger than length of input variable (X).")
if step > len(X):
raise IndexError("step cannot be larger than length of input variable.")
def rolling_train_test_split(self):
list_of_indexes = []
for i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step):
train_index = list(range(0+i,self.train_sample_size+i))
test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size))
list_of_indexes.append((train_index, test_index))
return list_of_indexes
def expanding_train_test_split(self):
list_of_indexes = []
for i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step):
train_index = list(range(0,self.train_sample_size+i))
test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size))
list_of_indexes.append((train_index, test_index))
return list_of_indexes | 3.25 | 3 |
src/data/preprocess.py | KunalK27/Automatic-License-Plate-Recognition | 26 | 12799485 | import click
import pandas as pd
@click.command()
@click.option("--input-path", "-i", default = "data/0_raw/", required=True,
help="Path to csv file to be processed.",
)
@click.option("--output-path", "-o", default="data/3_processed/",
help="Path to csv file to store the result.")
def main(input_path, output_path):
""" Runs data processing scripts to read raw data (../0_raw) and convert it into
processed csv file (../3_processed) to be used for further analysis.
"""
print("Preprocessing indian_license_plate.csv")
df = pd.read_csv(input_path+"indian_license_plates.csv", dtype={'image_name':str})
df["image_name"] = df["image_name"] + ".jpg"
df.to_csv(output_path+"processed.csv", index=False)
print("Preprocessed and saved as processed.csv")
if __name__ == '__main__':
main()
| 3.75 | 4 |
frameworks/pycellchem-2.0/src/RD/WritePNG.py | danielrcardenas/ac-course-2017 | 0 | 12799486 | <reponame>danielrcardenas/ac-course-2017
#---------------------------------------------------------------------------
#
# WritePNG.py: writes compressed, true-color RGBA PNG files
#
# RGBA stands for "Red Green Blue Alpha", where alpha is the opacity level
#
# extracted from:
# http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
#
# Original source code:
# https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155
#
def write_png(buf, width, height):
# by ideasman42, 2013-10-04, stackoverflow.com
""" buf: must be bytes or a bytearray in py3, a regular string in py2. formatted RGBARGBA... """
import zlib, struct
# reverse the vertical line order and add null bytes at the start
width_byte_4 = width * 4
raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4]
for span in range((height - 1) * width * 4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
def test_write_png():
# a red square:
buf=b'\xFF\x00\x00\xFF'
n=9
imgsize=2**n # generate an image of size imgsize x imgsize pixels
for i in range(2*n):
buf = buf + buf
print "len=", len(buf)/4
# The data should be written directly to a file opened as binary, as in:
data = write_png(buf, imgsize, imgsize)
with open("my_image.png", 'wb') as fd:
fd.write(data)
def saveAsPNG(array, filename):
# by <NAME>, 2014-01-10, stackoverflow.com
import struct
if any([len(row) != len(array[0]) for row in array]):
raise ValueError, "Array should have elements of equal size"
#First row becomes top row of image.
flat = []; map(flat.extend, reversed(array))
#Big-endian, unsigned 32-byte integer.
buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) )
for i32 in flat]) #Rotate from ARGB to RGBA.
data = write_png(buf, len(array[0]), len(array))
f = open(filename, 'wb')
f.write(data)
f.close()
def test_save_png():
import numpy as np
a = np.empty((2,2), np.uint32)
a.fill(0xFF)
r = np.empty((2,2), np.uint32)
r[0,0] = 0xFF
r[0,1] = 0xFF
g = np.empty((2,2), np.uint32)
g[0,1] = 0xFF
b = np.empty((2,2), np.uint32)
b[1,1] = 0xFF
tot = (a << 24) | (r << 16) | (g << 8) | b
print tot
saveAsPNG(tot, 'test_grid.png')
#saveAsPNG([[0xffff0000, 0xffFFFF00],
# [0xff00aa77, 0xff333333]], 'test_grid.png')
if __name__ == '__main__':
test_save_png()
| 2.875 | 3 |
lib/10x/10xfastq.py | shengqh/ngsperl | 6 | 12799487 | <filename>lib/10x/10xfastq.py<gh_stars>1-10
import argparse
import sys
import logging
import os
import csv
import gzip
DEBUG=False
NotDEBUG=not DEBUG
parser = argparse.ArgumentParser(description="Extract barcode and UMI from 10x fastq first read file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG)
parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG)
parser.add_argument('-o', '--output1', action='store', nargs='?', help="Output first read file", required=NotDEBUG)
parser.add_argument('-p', '--output2', action='store', nargs='?', help="Output second read file", required=NotDEBUG)
parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help="Input barcode white list file", required=NotDEBUG)
args = parser.parse_args()
if DEBUG:
args.input1 = "/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz"
args.input2 = "/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz"
args.output1 = "/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz"
args.output2 = "/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz"
args.barcodeFile = "/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz"
logger = logging.getLogger('10xFastq')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
barcodes = set()
with gzip.open(args.barcodeFile, 'rt') as fin:
for barcode in fin:
barcode = barcode.rstrip()
barcode = barcode.replace("-1", "")
barcodes.add(barcode)
logger.info("Total %d barcode in whitelist" % len(barcodes))
tmpFile1 = args.output1 + ".tmp.gz"
tmpFile2 = args.output2 + ".tmp.gz"
fin1count = 0
logger.info("Processing reads ...")
with gzip.open(args.input1, 'rt') as fin1:
with gzip.open(args.input2, 'rt') as fin2:
count = 0
with gzip.open(tmpFile1, "wt") as fout1:
with gzip.open(tmpFile2, "wt") as fout2:
while True:
query = fin1.readline()
if query == "":
break
seq = fin1.readline()
sig = fin1.readline()
score = fin1.readline()
fin1count += 1
if fin1count % 100000 == 0:
logger.info("processed %d reads ..." % fin1count)
q2 = fin2.readline()
seq2 = fin2.readline()
sig2 = fin2.readline()
score2 = fin2.readline()
barcode = seq[:16]
if not (barcode in barcodes):
continue
count = count + 1
umi = seq[16:26]
seq = seq[26:]
score = score[26:]
query = "@q%d:%s:%s\n" % (count, barcode, umi)
fout1.write(query)
fout1.write(seq)
fout1.write(sig)
fout1.write(score)
fout2.write(query)
fout2.write(seq2)
fout2.write(sig2)
fout2.write(score2)
#
# if count == 1000:
# break
if os.path.isfile(args.output1):
os.remove(args.output1)
if os.path.isfile(args.output2):
os.remove(args.output2)
os.rename(tmpFile1, args.output1)
os.rename(tmpFile2, args.output2)
logger.info("done.")
| 2.484375 | 2 |
tests/test_remote.py | inverse/Hue-remotes-HASS | 0 | 12799488 | """Tests for remote.py."""
import logging
from datetime import timedelta
import pytest
from custom_components.hueremote import DOMAIN
from custom_components.hueremote.data_manager import HueSensorData
from custom_components.hueremote.hue_api_response import (
parse_hue_api_response,
parse_rwl,
parse_zgp,
parse_z3_rotary,
)
from custom_components.hueremote.remote import async_setup_platform, HueRemote
from .conftest import (
DEV_ID_REMOTE_1,
entity_test_added_to_hass,
patch_async_track_time_interval,
)
from .api_samples import (
MOCK_RWL,
MOCK_ZGP,
MOCK_Z3_ROTARY,
PARSED_RWL,
PARSED_ZGP,
PARSED_Z3_ROTARY,
)
@pytest.mark.parametrize(
"raw_response, sensor_key, parsed_response, parser_func",
(
(MOCK_ZGP, "ZGP_00:44:23:08", PARSED_ZGP, parse_zgp),
(MOCK_RWL, "RWL_00:17:88:01:10:3e:3a:dc-02", PARSED_RWL, parse_rwl),
(
MOCK_Z3_ROTARY,
"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00",
PARSED_Z3_ROTARY,
parse_z3_rotary,
),
),
)
def test_parse_remote_raw_data(
raw_response, sensor_key, parsed_response, parser_func, caplog
):
"""Test data parsers for known remotes and check behavior for unknown."""
assert parser_func(raw_response) == parsed_response
unknown_sensor_data = {"modelid": "new_one", "uniqueid": "ff:00:11:22"}
assert parse_hue_api_response(
[raw_response, unknown_sensor_data, raw_response]
) == {sensor_key: parsed_response}
assert len(caplog.messages) == 0
async def test_platform_remote_setup(mock_hass, caplog):
"""Test platform setup for remotes."""
with caplog.at_level(logging.DEBUG):
with patch_async_track_time_interval():
await async_setup_platform(
mock_hass,
{"platform": "hueremote", "scan_interval": timedelta(seconds=3)},
lambda *x: logging.warning("Added remote entity: %s", x[0]),
)
assert DOMAIN in mock_hass.data
data_manager = mock_hass.data[DOMAIN]
assert isinstance(data_manager, HueSensorData)
assert len(data_manager.registered_entities) == 1
assert data_manager._scan_interval == timedelta(seconds=3)
assert len(data_manager.data) == 1
assert DEV_ID_REMOTE_1 in data_manager.data
assert len(data_manager.sensors) == 0
assert len(data_manager.registered_entities) == 1
remote = data_manager.registered_entities[DEV_ID_REMOTE_1]
assert not remote.hass
await entity_test_added_to_hass(data_manager, remote)
# await remote.async_added_to_hass()
assert len(data_manager.sensors) == 1
assert DEV_ID_REMOTE_1 in data_manager.sensors
assert isinstance(remote, HueRemote)
assert remote.hass
assert remote.force_update
assert remote.state == "3_click"
assert remote.icon == "mdi:remote"
assert not remote.should_poll
assert "last_updated" in remote.device_state_attributes
assert remote.unique_id == DEV_ID_REMOTE_1
await remote.async_will_remove_from_hass()
assert len(data_manager.sensors) == 0
assert len(data_manager.registered_entities) == 0
assert not data_manager.available
| 2.03125 | 2 |
App/McCloud/views.py | ssziolkowski/App | 0 | 12799489 | <filename>App/McCloud/views.py
from django.shortcuts import render
from .text_generator import create
# Create your views here.
def text_generation(request):
context = {}
if request.method == "POST":
file = request.FILES.get("file")
if file.name.lower().endswith(('.txt')):
context['output'] = create(file.read().decode('utf-8'))
return render(request, 'McCloud/text_generation.html', context)
| 2.265625 | 2 |
training_accuracy.py | Supreme-Sector/Hand-Sign-Detection-Application | 0 | 12799490 | <reponame>Supreme-Sector/Hand-Sign-Detection-Application
import data_loader
import network
import _pickle as cPickle
f=open("neural_network.pickle","rb")
net=cPickle.load(f)
f.close()
training_data, test_data=data_loader.load_data()
num_correct = net.evaluate(training_data)
print("{}/{} correct".format(num_correct, len(training_data)))
| 2.375 | 2 |
online_store/apps/products/migrations/0005_alter_productimage_is_main.py | oocemb/Online_store | 0 | 12799491 | <filename>online_store/apps/products/migrations/0005_alter_productimage_is_main.py
# Generated by Django 4.0.2 on 2022-02-23 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0004_productimage_is_main'),
]
operations = [
migrations.AlterField(
model_name='productimage',
name='is_main',
field=models.BooleanField(default=False),
),
]
| 1.226563 | 1 |
bindings/python/examples/mouse_game.py | augustye/muniverse | 380 | 12799492 | <filename>bindings/python/examples/mouse_game.py
"""
Simple program to demonstrate how to use muniverse on a
game that takes mouse events.
"""
import sys
import numpy as np
sys.path.insert(0, '..')
import muniverse # noqa: E402
def main():
print('Looking up environment...')
spec = muniverse.spec_for_name('TowerMania-v1')
print('Creating environment...')
env = muniverse.Env(spec)
try:
print('Resetting environment...')
env.reset()
print('Getting observation...')
obs = env.observe()
print(ascii_art(obs))
print('Playing game...')
step_idx = 0
action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1)
actions = [action, action.with_event('mouseReleased')]
while True:
reward, done = env.step(0.1, actions[step_idx % 2])
step_idx += 1
print('reward: ' + str(reward))
if done:
break
finally:
env.close()
def ascii_art(img):
brightness = np.sum(img, axis=2) / 3
downsampled = brightness[::14, ::7]
binary = downsampled > 128
height, width = binary.shape
res = ''
for y in range(0, height):
if res != '':
res += '\n'
for x in range(0, width):
if binary[y, x]:
res += 'X'
else:
res += ' '
return res
if __name__ == '__main__':
main()
| 3.5 | 4 |
pyani/aniblastall.py | widdowquinn/pyani | 144 | 12799493 | # -*- coding: utf-8 -*-
# (c) University of Strathclyde 2021
# Author: <NAME>
#
# Contact: <EMAIL>
#
# <NAME>,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2021 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code to implement the ANIblastall average nucleotide identity method."""
import logging
import os
import platform
import re
import shutil
import subprocess
from pathlib import Path
from . import pyani_config
from . import PyaniException
class PyaniblastallException(PyaniException):
"""ANIblastall-specific exception for pyani."""
def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:
r"""Return BLAST blastall version as a string.
:param blast_exe: path to blastall executable
We expect blastall to return a string as, for example
.. code-block:: bash
$ blastall -version
[blastall 2.2.26] ERROR: Number of database sequences to show \
one-line descriptions for (V) [ersion] is bad or out of range [? to ?]
This is concatenated with the OS name.
The following circumstances are explicitly reported as strings
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
- executable cannot be run on this OS
"""
logger = logging.getLogger(__name__)
try:
blastall_path = Path(shutil.which(blast_exe)) # type:ignore
except TypeError:
return f"{blast_exe} is not found in $PATH"
if not blastall_path.is_file(): # no executable
return f"No blastall at {blastall_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(blastall_path, os.X_OK): # file exists but not executable
return f"blastall exists at {blastall_path} but not executable"
if platform.system() == "Darwin":
cmdline = [blast_exe, "-version"]
else:
cmdline = [blast_exe]
try:
result = subprocess.run(
cmdline, # type: ignore
shell=False,
stdout=subprocess.PIPE, # type: ignore
stderr=subprocess.PIPE,
check=False, # blastall doesn't return 0
)
except OSError:
logger.warning("blastall executable will not run", exc_info=True)
return f"blastall exists at {blastall_path} but could not be executed"
version = re.search( # type: ignore
r"(?<=blastall\s)[0-9\.]*", str(result.stderr, "utf-8")
).group()
if 0 == len(version.strip()):
return f"blastall exists at {blastall_path} but could not retrieve version"
return f"{platform.system()}_{version} ({blastall_path})"
| 1.757813 | 2 |
plotter.py | garrettkatz/rnn-fxpts | 2 | 12799494 | <gh_stars>1-10
"""
Convenience wrappers around matplotlib plotting functions.
Points are handled in matrix columns rather than separate arguments for separate coordinates.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot(ax, X, *args, **kwargs):
"""
Plot 2d or 3d points in numpy.array X
ax should be the matplotlib.Axes (or Axes3D) on which to plot
X[:,p] should be the p^{th} point to plot
args and kwargs should be as in matplotlib.Axes.plot
"""
if X.shape[0]==2:
ax.plot(X[0,:],X[1,:], *args, **kwargs)
elif X.shape[0]==3:
#ax = plt.gca(projection="3d")
ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs)
def scatter(ax, X, *args, **kwargs):
"""
Scatter-plot 2d or 3d points in numpy.array X
ax should be the matplotlib.Axes (or Axes3D) on which to plot
X[:,p] should be the p^{th} point to plot
args and kwargs should be as in matplotlib.Axes.plot
"""
if X.shape[0]==2:
ax.scatter(X[0,:],X[1,:], *args, **kwargs)
elif X.shape[0]==3:
#ax = plt.gca(projection="3d")
ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs)
def text(ax, X, strs, *args, **kwargs):
"""
Plot text at 2d or 3d points in numpy.array X
ax should be the matplotlib.Axes (or Axes3D) on which to plot
X[:,p] should be the p^{th} point at which to plot
strs[p] should be the p^{th} string to plot
args and kwargs should be as in matplotlib.Axes.plot
"""
for j in range(X.shape[1]):
if X.shape[0]==2:
ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs)
elif X.shape[0]==3:
#ax = plt.gca(projection="3d")
ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs)
def quiver(ax, X, U, *args, **kwargs):
"""
Plot 2d or 3d vector field in numpy.arrays X and U.
ax should be the matplotlib.Axes (or Axes3D) on which to plot
X[:,p] should be the base point for the p^{th} vector
U[:,p] should be the p^{th} vector to plot
args and kwargs should be as in matplotlib.Axes.plot
"""
if X.shape[0]==2:
ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs)
elif X.shape[0]==3:
#ax = plt.gca(projection="3d")
ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs)
def plotNd(X, lims, *args):
"""
Plot Nd points in numpy.array X
Every two dimensions are shown on a separate subplot
The last dimension is omitted when N odd
X[:,p] should be the p^{th} point to plot
lims[n,0] and lims[n,1] are low and high plot limits for the n^{th} dimension
args should be as in matplotlib.Axes.plot
"""
num_subplots = int(X.shape[0]/2);
num_rows = np.floor(np.sqrt(num_subplots))
num_cols = np.ceil(num_subplots/num_rows)
for subplot in range(num_subplots):
ax = plt.subplot(num_rows, num_cols, subplot+1)
ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args)
ax.set_xlim(lims[0,:])
ax.set_ylim(lims[1,:])
def set_lims(ax, lims):
"""
Set all 2d or 3d plot limits at once.
ax is the matplotlib.Axes (or Axes3D) on which to plot
lims[0,:] are xlims, etc.
"""
ax.set_xlim(lims[0,:])
ax.set_ylim(lims[1,:])
if len(lims)>2:
ax.set_zlim(lims[2,:])
def lattice(mins, maxes, samp):
"""
Samples Nd points on a regularly spaced grid
mins[i], maxes[i] are the grid extents in the i^{th} dimension
samp is the number of points to sample in each dimension
Returns numpy.array G, where
G[:,n] is the n^{th} grid point sampled
"""
G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))]
G = np.array([g.flatten() for g in G])
return G
def plot_trisurf(ax, X, *args, **kwargs):
"""
Plots points in numpy.array X as a surface.
ax is the matplotlib.Axes3D on which to plot
X[:,p] is the p^{th} point
X[2,:] is shown as a surface over X[1,:] and X[2,:]
args and kwargs should be as in matplotlib.Axes3D.plot_trisurf
"""
ax.plot_trisurf(X[0,:],X[1,:],X[2,:],*args, **kwargs)
| 3.515625 | 4 |
gcode_gen/assembly.py | tulth/gcode_gen | 0 | 12799495 | from functools import partial
from collections.abc import MutableSequence
from . import base_types
from . import tree
from . import transform
from .state import CncState
from . import point as pt
from . import action
class Assembly(tree.Tree, transform.TransformableMixin):
'''tree of assembly items'''
def __init__(self, name=None, parent=None, state=None):
super().__init__(name=name, parent=parent)
if state is not None:
if not isinstance(state, CncState):
raise TypeError('state must be of type CncState, not {}'.format(type(state)))
self._state = state
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
self._state = new_state
for child in self.children:
child.state = self.state
def check_type(self, other):
assert isinstance(other, Assembly)
def append(self, arg):
super().append(arg)
arg.state = self.state
def last(self):
return self.children[-1]
def get_gcode(self):
return self.get_actions().get_gcode()
def get_points(self):
return self.get_actions().get_points()
def update_children_preorder(self):
pass
def get_preorder_actions(self):
return ()
def get_postorder_actions(self):
return ()
def update_children_postorder(self):
pass
def get_actions(self):
with self.state.excursion():
al = action.ActionList()
for step in self.depth_first_walk():
if step.is_visit:
if step.is_preorder:
step.visited.update_children_preorder()
al.extend(step.visited.get_preorder_actions())
elif step.is_postorder:
al.extend(step.visited.get_postorder_actions())
step.visited.update_children_postorder()
return al
@property
def pos(self):
return self.state['position']
@pos.setter
def pos(self, arg):
self.state['position'] = arg
def pos_offset(self, x=None, y=None, z=None):
self.pos = self.pos.offset(x, y, z)
@property
def root_transforms(self):
'''get transforms stacked all the way to the root'''
result = transform.TransformList()
for walk_step in self.root_walk():
if walk_step.is_visit and walk_step.is_preorder:
if isinstance(walk_step.visited, Assembly):
# extend left
result[0:0] = walk_step.visited.transforms
return result
class SafeJog(Assembly):
def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None):
super().__init__(name=name, parent=parent, state=state)
self.dest = pt.PointList(((x, y, z), ))
@property
def point(self):
return pt.PointList(self.root_transforms(self.dest.arr))[0]
@property
def changes(self):
return pt.changes(self.pos, self.point)
def get_preorder_actions(self):
al = action.ActionList()
# print(self.changes)
if self.changes:
jog = partial(action.Jog, state=self.state)
al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe'])
al += jog(x=self.point.x, y=self.point.y, z=self.pos.z)
al += jog(x=self.point.x, y=self.point.y, z=self.point.z)
# print("safejog", self.state['position'])
return al
class SafeZ(Assembly):
def __init__(self, name=None, parent=None, state=None):
super().__init__(name=name, parent=parent, state=state)
def get_preorder_actions(self):
al = action.ActionList()
points = pt.PointList(((0, 0, self.state['z_margin']), ))
point = pt.PointList(self.root_transforms(points.arr))[0]
jog = partial(action.Jog, state=self.state)
al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe'])
return al
| 2.21875 | 2 |
locallib/catalog/views.py | joseph-njogu/Django_local_lib | 0 | 12799496 |
from django.shortcuts import render
# # from django.shortcuts import get_object_or_404
# from django.http import HttpResponseRedirect
# from django.urls import reverse
import datetime
from django.contrib.auth.decorators import permission_required
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Book, Author, BookInstance
from django.views import generic
from django.contrib.auth.mixins import PermissionRequiredMixin
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# # Available copies of books
num_instances_available = \
BookInstance.objects.filter(status__exact='a').count()
num_authors = Author.objects.count() # The 'all()' is implied by default.
# Number of visits to this view, as counted in the session variable.
# num_visits = request.session.get('num_visits', 0)
# request.session['num_visits'] = num_visits + 1
# Render the HTML template index.html
# with the data in the context variable.
return render(request,'index.html',context={
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors, }
)
class BookListView(generic.ListView):
"""Generic class-based view for a list of books."""
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
"""Generic class-based detail view for a book."""
model = Book
class AuthorListView(generic.ListView):
"""Generic class-based list view for a list of authors."""
model = Author
paginate_by = 2
class AuthorDetailView(generic.DetailView):
"""Generic class-based detail view for an author."""
model = Author
@permission_required('catalog.can_mark_returned')
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
# Generic class-based view listing all books
# on loan. Only visible to users with can_mark_returned permission."""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 2
class AuthorCreate(PermissionRequiredMixin, CreateView):
model = Author
fields = '__all__'
initial = {'date_of_death': '05/01/2018'}
permission_required = 'catalog.can_mark_returned'
class AuthorUpdate(PermissionRequiredMixin, UpdateView):
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
permission_required = 'catalog.can_mark_returned'
class AuthorDelete(PermissionRequiredMixin, DeleteView):
model = Author
success_url = reverse_lazy('authors')
permission_required = 'catalog.can_mark_returned'
# Classes created for the forms challenge
class BookCreate(PermissionRequiredMixin, CreateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookUpdate(PermissionRequiredMixin, UpdateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookDelete(PermissionRequiredMixin, DeleteView):
model = Book
success_url = reverse_lazy('books')
permission_required = 'catalog.can_mark_returned'
| 2.375 | 2 |
dags/landsat_scenes_sync/landsat_scenes_fill_gaps.py | digitalearthafrica/deafrica-airflow | 1 | 12799497 | """
# Read report and generate messages to fill missing scenes
#### Utility utilization
The DAG can be parameterized with run time configurations `scenes_limit`, which receives a INT as value.
* The option scenes_limit limit the number of scenes to be read from the report,
therefore limit the number of messages to be sent
#### example conf in json format
{
"scenes_limit":10
}
"""
import gzip
import json
import logging
import traceback
from datetime import datetime
from typing import Optional
from airflow import DAG
from airflow.contrib.hooks.aws_sqs_hook import SQSHook
from airflow.operators.python_operator import PythonOperator
from odc.aws.queue import publish_messages
from infra.connections import CONN_LANDSAT_SYNC
from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME
from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME
from infra.variables import REGION
from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME
from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert
from utils.aws_utils import S3
REPORTING_PREFIX = "status-report/"
# This process is manually run
SCHEDULE_INTERVAL = None
default_args = {
"owner": "RODRIGO",
"start_date": datetime(2021, 6, 7),
"email": ["<EMAIL>"],
"email_on_failure": True,
"email_on_success": True,
"email_on_retry": False,
"retries": 0,
"version": "0.0.1",
"on_failure_callback": task_fail_slack_alert,
}
def post_messages(message_list) -> None:
"""
Publish messages
:param message_list:(list) list of messages
:return:(None)
"""
count = 0
messages = []
sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC)
sqs_hook = sqs_conn.get_resource_type(
resource_type="sqs", region_name=REGION
)
queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME)
logging.info("Sending messages")
for message_dict in message_list:
message = {
"Id": str(count),
"MessageBody": str(json.dumps(message_dict)),
}
messages.append(message)
count += 1
# Send 10 messages per time
if count % 10 == 0:
publish_messages(queue, messages)
messages = []
# Post the last messages if there are any
if len(messages) > 0:
publish_messages(queue, messages)
logging.info(f"{count} messages sent successfully")
def find_latest_report(landsat: str) -> str:
"""
Function to find the latest gap report
:param landsat:(str)satellite name
:return:(str) return the latest report file name
"""
continuation_token = None
list_reports = []
while True:
s3 = S3(conn_id=CONN_LANDSAT_SYNC)
resp = s3.list_objects(
bucket_name=LANDSAT_SYNC_BUCKET_NAME,
region=REGION,
prefix=f"{STATUS_REPORT_FOLDER_NAME}/",
continuation_token=continuation_token,
)
if not resp.get("Contents"):
raise Exception(
f"Report not found at "
f"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/"
f" - returned {resp}"
)
list_reports.extend(
[
obj["Key"]
for obj in resp["Contents"]
if landsat in obj["Key"] and "orphaned" not in obj["Key"]
]
)
# The S3 API is paginated, returning up to 1000 keys at a time.
if resp.get("NextContinuationToken"):
continuation_token = resp["NextContinuationToken"]
else:
break
list_reports.sort()
return list_reports[-1] if list_reports else ""
def build_message(missing_scene_paths, update_stac):
"""
"""
message_list = []
for path in missing_scene_paths:
landsat_product_id = str(path.strip("/").split("/")[-1])
if not landsat_product_id:
raise Exception(f'It was not possible to build product ID from path {path}')
message_list.append(
{
"Message": {
"landsat_product_id": landsat_product_id,
"s3_location": str(path),
"update_stac": update_stac
}
}
)
return message_list
def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None:
"""
Function to retrieve the latest gap report and create messages to the filter queue process.
:param landsat:(str) satellite name
:param scenes_limit:(str) limit of how many scenes will be filled
:return:(None)
"""
try:
logging.info("Looking for latest report")
latest_report = find_latest_report(landsat=landsat)
logging.info(f"Latest report found {latest_report}")
if not latest_report:
logging.error("Report not found")
raise RuntimeError("Report not found!")
else:
logging.info("Reading missing scenes from the report")
s3 = S3(conn_id=CONN_LANDSAT_SYNC)
missing_scene_file_gzip = s3.get_s3_contents_and_attributes(
bucket_name=LANDSAT_SYNC_BUCKET_NAME,
region=REGION,
key=latest_report,
)
# This should just use Pandas. It's already a dependency.
missing_scene_paths = [
scene_path
for scene_path in gzip.decompress(missing_scene_file_gzip).decode("utf-8").split("\n")
if scene_path
]
logging.info(f"Number of scenes found {len(missing_scene_paths)}")
logging.info(f"Example scenes: {missing_scene_paths[0:10]}")
logging.info(f"Limited: {'No limit' if scenes_limit else scenes_limit}")
if scenes_limit:
missing_scene_paths = missing_scene_paths[:int(scenes_limit)]
update_stac = False
if 'update' in latest_report:
logging.info('FORCED UPDATE FLAGGED!')
update_stac = True
messages_to_send = build_message(
missing_scene_paths=missing_scene_paths,
update_stac=update_stac
)
logging.info("Publishing messages")
post_messages(message_list=messages_to_send)
except Exception as error:
logging.error(error)
# print traceback but does not stop execution
traceback.print_exc()
raise error
with DAG(
"landsat_scenes_fill_the_gap",
default_args=default_args,
schedule_interval=SCHEDULE_INTERVAL,
tags=["Landsat_scenes", "fill the gap"],
catchup=False,
) as dag:
PROCESSES = []
satellites = [
"landsat_8",
"landsat_7",
"landsat_5"
]
for sat in satellites:
PROCESSES.append(
PythonOperator(
task_id=f"{sat}_fill_the_gap",
python_callable=fill_the_gap,
op_kwargs=dict(landsat=sat, scenes_limit="{{ dag_run.conf.scenes_limit }}"),
on_success_callback=task_success_slack_alert,
)
)
PROCESSES
| 2.296875 | 2 |
bouncer/api/views/auth/reset_password.py | ikechuku/bouncer_rest_api | 0 | 12799498 | <reponame>ikechuku/bouncer_rest_api<gh_stars>0
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
import bcrypt
from ...models.user import User
class ResetPassword(APIView):
def post(self, request):
data = request.data
password = data['password']
token = data['token']
if len(password) < 6:
return Response({"message":"Invalid Password! Password must contain 6 or more characters"},
status=status.HTTP_400_BAD_REQUEST)
result = User.objects.filter(forgot_password_token = token)
if result.count() == 1:
user = result[0]
# hash user password using bcrypt algorithm
hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
user.password = <PASSWORD>
user.save()
message = "Your password has been sucessfully reset"
return Response({"message":message},status=status.HTTP_200_OK)
return Response(dict(error="This user does not exist"), status=status.HTTP_400_BAD_REQUEST)
| 2.390625 | 2 |
alembic/versions/799310dca712_increase_sql_path_column_length_to_128.py | SnaKyEyeS/flask-track-usage | 46 | 12799499 | """Increase sql path column length to 128
Revision ID: 799310dca712
Revises: ca514840f404
Create Date: 2020-04-09 11:34:05.456439
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '799310dca712'
down_revision = 'ca514840f404'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32))
def downgrade():
op.alter_column('flask_usage', 'path', type_=sa.String(32), existing_type=sa.String(length=128))
| 1.390625 | 1 |
project/asylum/management/commands/generate_all.py | jssmk/asylum | 1 | 12799500 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from asylum.tests.fixtures.full import generate_all
class Command(BaseCommand):
help = 'Generates full set of test data'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
generate_all()
| 1.796875 | 2 |
viewer_3d.py | tek5030/lab-camera-pose-py | 0 | 12799501 | import numpy as np
import pyvista as pv
from pylie import SE3
class Viewer3D:
"""Visualises the lab in 3D"""
def __init__(self):
"""Sets up the 3D viewer"""
self._plotter = pv.Plotter()
# Add scene origin and plane
scene_plane = pv.Plane(i_size=1000, j_size=1000)
self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe')
self._add_axis(SE3(), 100)
# Set camera.
self._plotter.camera.position = (100, 1500, -500)
self._plotter.camera.up = (-0.042739, -0.226979, -0.972961)
self._plotter.camera.focal_point = (100, 300, -200)
self._plotter.show(title="3D visualization", interactive_update=True)
def add_body_axes(self, pose_local_body: SE3):
"""Add axes representing the body pose to the 3D world
:param pose_local_body: The pose of the body in the local coordinate system.
"""
self._add_axis(pose_local_body)
def add_camera_axes(self, pose_local_camera: SE3):
"""Add axes representing the camera pose to the 3D world
:param pose_local_camera: The pose of the camera in the local coordinate system.
"""
self._add_axis(pose_local_camera)
def add_camera_frustum(self, camera_model, image):
"""Add a frustum representing the camera model and image to the 3D world"""
self._add_frustum(camera_model, image)
def _add_axis(self, pose: SE3, scale=10.0):
T = pose.to_matrix()
point = pv.Sphere(radius=0.1*scale)
point.transform(T)
self._plotter.add_mesh(point)
x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale)
x_arrow.transform(T)
self._plotter.add_mesh(x_arrow, color='red')
y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale)
y_arrow.transform(T)
self._plotter.add_mesh(y_arrow, color='green')
z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale)
z_arrow.transform(T)
self._plotter.add_mesh(z_arrow, color='blue')
def _add_frustum(self, camera_model, image, scale=20.0):
S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0])
img_height, img_width = image.shape[:2]
point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.])))
point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.])))
point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.])))
point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.])))
point_focal = np.zeros([3])
pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal])
pyramid.transform(S)
rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right])
rectangle.texture_map_to_plane(inplace=True)
rectangle.transform(S)
image_flipped_rgb = image[::-1, :, ::-1].copy()
tex = pv.numpy_to_texture(image_flipped_rgb)
self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe')
self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9)
def update(self, time=500):
self._plotter.update(time)
def show(self):
self._plotter.show()
| 2.859375 | 3 |
pinot/data/tests/test_temporal.py | choderalab/pinot | 13 | 12799502 | import pytest
def test_import():
""" """
import pinot.data.datasets
@pytest.fixture
def moonshot():
""" """
import pinot
ds = pinot.data.moonshot_with_date
return ds
def test_moonshot(moonshot):
"""
Parameters
----------
moonshot :
Returns
-------
"""
print(moonshot)
| 2.109375 | 2 |
tests/unittest/test_exceptions.py | YuriyLisovskiy/NeuralNetwork | 1 | 12799503 | <reponame>YuriyLisovskiy/NeuralNetwork<filename>tests/unittest/test_exceptions.py
import unittest
from neural_network.network import net
from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER
class TestExceptions(unittest.TestCase):
def test_last_layer_exception(self):
with self.assertRaises(ValueError):
params = {
'input_layer': INPUT_LAYER,
'hidden_layers': HIDDEN_LAYERS,
'output_layer': [2]
}
net.NeuralNetwork(**params)
def test_redundant_layers_exception(self):
with self.assertRaises(ValueError):
params = {
'input_layer': INPUT_LAYER,
'hidden_layers': [9],
'output_layer': OUTPUT_LAYER
}
net.NeuralNetwork(**params)
def test_input_layer_exception(self):
with self.assertRaises(ValueError):
params = {
'input_layer': INPUT_LAYER,
'hidden_layers': HIDDEN_LAYERS,
'output_layer': [1, 2]
}
net.NeuralNetwork(**params)
def test_output_layer_exception(self):
with self.assertRaises(ValueError):
params = {
'input_layer': [1, 1],
'hidden_layers': HIDDEN_LAYERS,
'output_layer': [1]
}
net.NeuralNetwork(**params)
def run(suite):
suite.addTest(TestExceptions('test_last_layer_exception'))
suite.addTest(TestExceptions('test_redundant_layers_exception'))
suite.addTest(TestExceptions('test_input_layer_exception'))
suite.addTest(TestExceptions('test_output_layer_exception'))
| 2.84375 | 3 |
nixutil/plat_util/freebsd.py | cptpcrd/nixutil | 0 | 12799504 | <gh_stars>0
# pylint: disable=invalid-name,too-few-public-methods
import ctypes
import os
from typing import Iterator, Optional
from . import bsd_util
CTL_KERN = 1
KERN_PROC = 14
KERN_PROC_FILEDESC = 33
KF_TYPE_VNODE = 1
PATH_MAX = 1024
pid_t = ctypes.c_int
sa_family_t = ctypes.c_uint8
_SS_MAXSIZE = 128
_SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64)
_SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t)
_SS_PAD2SIZE = (
_SS_MAXSIZE
- ctypes.sizeof(ctypes.c_ubyte)
- ctypes.sizeof(sa_family_t)
- _SS_PAD1SIZE
- _SS_ALIGNSIZE
)
CAP_RIGHTS_VERSION = 0
class SockaddrStorage(ctypes.Structure):
_fields_ = [
("ss_len", ctypes.c_ubyte),
("ss_family", sa_family_t),
("ss_pad1", (ctypes.c_char * _SS_PAD1SIZE)),
("ss_align", ctypes.c_int64),
("ss_pad2", (ctypes.c_char * _SS_PAD2SIZE)),
]
class CapRights(ctypes.Structure):
_fields_ = [
("cr_rights", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))),
]
class KinfoFile11(ctypes.Structure):
_fields_ = [
("kf_vnode_type", ctypes.c_int),
("kf_sock_domain", ctypes.c_int),
("kf_sock_type", ctypes.c_int),
("kf_sock_protocol", ctypes.c_int),
("kf_sa_local", SockaddrStorage),
("kf_sa_peer", SockaddrStorage),
]
class KinfoFileSock(ctypes.Structure):
_fields_ = [
("kf_sock_sendq", ctypes.c_uint32),
("kf_sock_domain0", ctypes.c_int),
("kf_sock_type0", ctypes.c_int),
("kf_sock_protocol0", ctypes.c_int),
("kf_sa_local", SockaddrStorage),
("kf_sa_peer", SockaddrStorage),
("kf_sock_pcb", ctypes.c_uint64),
("kf_sock_inpcb", ctypes.c_uint64),
("kf_sock_unpconn", ctypes.c_uint64),
("kf_sock_snd_sb_state", ctypes.c_uint16),
("kf_sock_rcv_sb_state", ctypes.c_uint16),
("kf_sock_recvq", ctypes.c_uint32),
]
class KinfoFileFile(ctypes.Structure):
_fields_ = [
("kf_file_type", ctypes.c_int),
("kf_spareint", (ctypes.c_int * 3)),
("kf_spareint64", (ctypes.c_uint64 * 30)),
("kf_file_fsid", ctypes.c_uint64),
("kf_file_rdev", ctypes.c_uint64),
("kf_file_fileid", ctypes.c_uint64),
("kf_file_size", ctypes.c_uint64),
("kf_file_fsid_freebsd11", ctypes.c_uint32),
("kf_file_rdev_freebsd11", ctypes.c_uint32),
("kf_file_mode", ctypes.c_uint16),
("kf_file_pad0", ctypes.c_uint16),
("kf_file_pad1", ctypes.c_uint32),
]
class KinfoFileSem(ctypes.Structure):
_fields_ = [
("kf_spareint", (ctypes.c_uint32 * 4)),
("kf_spareint64", (ctypes.c_uint64 * 32)),
("kf_sem_value", ctypes.c_uint32),
("kf_sem_mode", ctypes.c_uint16),
]
class KinfoFilePipe(ctypes.Structure):
_fields_ = [
("kf_spareint", (ctypes.c_uint32 * 4)),
("kf_spareint64", (ctypes.c_uint64 * 32)),
("kf_pipe_addr", ctypes.c_uint64),
("kf_pipe_peer", ctypes.c_uint64),
("kf_pipe_buffer_cnt", ctypes.c_uint32),
("kf_pts_pad0", (ctypes.c_uint32 * 3)),
]
class KinfoFilePts(ctypes.Structure):
_fields_ = [
("kf_spareint", (ctypes.c_uint32 * 4)),
("kf_spareint64", (ctypes.c_uint64 * 32)),
("kf_pts_dev_freebsd11", ctypes.c_uint32),
("kf_pts_pad0", ctypes.c_uint32),
("kf_pts_dev", ctypes.c_uint64),
("kf_pts_pad1", (ctypes.c_uint32 * 4)),
]
class KinfoFileProc(ctypes.Structure):
_fields_ = [
("kf_spareint", (ctypes.c_uint32 * 4)),
("kf_spareint64", (ctypes.c_uint64 * 32)),
("kf_pid", pid_t),
]
class KinfoFileUn(ctypes.Union):
_fields_ = [
("kf_freebsd11", KinfoFile11),
("kf_sock", KinfoFileSock),
("kf_file", KinfoFileFile),
("kf_sem", KinfoFileSem),
("kf_pipe", KinfoFilePipe),
("kf_pts", KinfoFilePts),
("kf_proc", KinfoFileProc),
]
class KinfoFile(ctypes.Structure):
_fields_ = [
("kf_structsize", ctypes.c_int),
("kf_type", ctypes.c_int),
("kf_fd", ctypes.c_int),
("kf_ref_count", ctypes.c_int),
("kf_flags", ctypes.c_int),
("kf_pad0", ctypes.c_int),
("kf_offset", ctypes.c_int64),
("kf_un", KinfoFileUn),
("kf_status", ctypes.c_uint16),
("kf_pad1", ctypes.c_uint16),
("_kf_ispare0", ctypes.c_int),
("kf_cap_rights", CapRights),
("_kf_cap_spare", ctypes.c_uint64),
("kf_path", (ctypes.c_char * PATH_MAX)),
]
def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]:
kinfo_file_data = bsd_util.sysctl_bytes_retry(
[CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None
)
kinfo_file_size = ctypes.sizeof(KinfoFile)
i = 0
while i < len(kinfo_file_data):
kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b"\0")
kfile = KinfoFile.from_buffer_copy(kfile_data)
if kfile.kf_structsize == 0:
break
yield kfile
i += kfile.kf_structsize
def try_recover_fd_path(fd: int) -> Optional[str]:
for kfile in _iter_kinfo_files(os.getpid()):
if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE:
# Sometimes the path is empty ("") for no apparent reason.
return os.fsdecode(kfile.kf_path) or None
return None
| 2.0625 | 2 |
opensda_flasher/config.py | jed-frey/opensda_flasher | 8 | 12799505 | <reponame>jed-frey/opensda_flasher
"""Module config functions."""
import os
import sys
from configparser import ConfigParser
from configparser import ExtendedInterpolation
def read_config(local_config=None):
"""Read Configuration File."""
if local_config is None:
local_config = ""
module_dir = os.path.dirname(__file__)
home_dir = os.path.expanduser("~")
default_config = os.path.join(module_dir, "opensda_flasher.ini")
home_config = os.path.join(home_dir, ".opensda_flasher.ini")
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read([default_config, home_config, local_config])
return config
if __name__ == "__main__":
if len(sys.argv) > 1:
cfg = read_config(sys.argv[1])
else:
cfg = read_config()
for k, v in cfg.items():
print(k)
for k2, v2 in v.items():
print(f"\t{k2}: {v2}")
| 2.25 | 2 |
test/mock_extensions.py | BenjaminHamon/BuildService | 2 | 12799506 | import asyncio
from unittest.mock import MagicMock
class MockException(Exception):
pass
# AsyncMock is new in Python 3.8
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation
return super().__call__(*args, **kwargs)
class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors
async def __call__(self, *args, **kwargs):
await asyncio.sleep(1)
return await super().__call__(*args, **kwargs)
| 2.578125 | 3 |
Section 1/Section1/Video4_collections_1_tuples.py | PacktPublishing/-Python-By-Example | 8 | 12799507 | '''
Created on Mar 31, 2018
@author: <NAME>
'''
# ------------ Tuples ------------
number_tuple = (1, 2, 3, 4) # tuples use parentheses for creation
letter_tuple = ('a', 'b', 'c', 'd')
mixed_tuple = (1, 'a', 2, 'b', [88, 99]) # can mix different types
print(number_tuple)
print(letter_tuple)
print(mixed_tuple)
print(type(number_tuple)) # <class 'tuple'>
try:
number_tuple[0] = 9 # tuples are immutable, can't change items once assigned
except Exception as e:
print('** caught Exception:', e) # TypeError: 'tuple' object does not support item assignment
try: # try to add item to tuples by creating a new tuple
new_number_tuple = number_tuple + (5) # TypeError: can only concatenate tuple (not "int") to tuple
except Exception as e:
print('** caught Exception:', e)
new_number_tuple = number_tuple + (5,) # have to add a comma to make it a tuple
print(new_number_tuple)
number_tuple = number_tuple + (5,) # if we reassign to original tuple value,
print(number_tuple) # it appears as if we are updating it
print(id(number_tuple)) # but they are two different objects
number_tuple = number_tuple + (6,)
print(id(number_tuple)) # the variable name is now pointing to a new object
print(number_tuple[:]) # slice operator - entire tuple
print(number_tuple[1:3]) # start at element 1, end at 3 (exclusive)
print(number_tuple[3:]) # start at element 3 until the end (inclusive)
| 4.03125 | 4 |
examples/salesman.py | Eyjafjallajokull/pyga | 0 | 12799508 | import sys
import os
import logging
sys.path.append(os.environ["PWD"])
from pyga import *
population_size = 10
elite_count = 2
crossover_points = 2
crossover_mutate_probability = 0.2
max_weight = 15
city_names = ['a', 'b', 'c', 'd']
distances = [
# a b c d
[ 0, 130, 180, 300], # a
[130, 0, 320, 350], # b
[180, 320, 0, 360], # c
[300, 350, 360, 0] # d
]
class SalesmanFitnessEvaluator(FitnessEvaluator):
def __init__(self, distances):
super().__init__()
self.distances = distances
def get_fitness(self, candidate, population):
total_distance = 0
cities_order = candidate.data
for i, city in enumerate(cities_order):
next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0]
total_distance += self.distances[city][next_city]
return Fitness(-total_distance, is_natural=False)
def print_results(result):
print('Visit cities in this order:')
cities_order = result.data
for i, city in enumerate(cities_order):
next_city = cities_order[i + 1] if i + 1 < len(cities_order) else cities_order[0]
print('- ', city_names[city], distances[city][next_city])
print('Total distance: ', abs(result.fitness))
logging.basicConfig(level=logging.DEBUG)
random = Random()
probability = Probability(crossover_mutate_probability, random)
candidate_factory = ListFactory(random, len(distances)-1)
crossover = ListOrderCrossover(probability, random)
mutation = ListOrderMutation(probability, random, 2)
operator = PipelineOperator()
operator.append_operator(crossover)
operator.append_operator(mutation)
fitness_evaluator = SalesmanFitnessEvaluator(distances)
selection_strategy = RouletteWheelSelection(random)
termination_condition = Stagnation(100)
engine = GenerationalEvolutionEngine()
engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy)
population = engine.evolve(population_size, elite_count, termination_condition)
print_results(population.get_best())
| 2.609375 | 3 |
begins-with-t.py | babubaskaran/pands-problem-set | 0 | 12799509 | <reponame>babubaskaran/pands-problem-set
# Author : <NAME>
# Date : 05/04/2019 Time : 19:00 pm
# Solution for problem number 2
# Version 1.0
# import the datetime of the system using import syntax
import datetime
# checking the weekday number is equal to 1 using if condition
if datetime.datetime.today().weekday() == 1:
# print result if the weekday number is 1
print("Yes - today begins with a T")
# checking the weekday number is equal to 3 using elif conditon
elif datetime.datetime.today().weekday() == 3:
# print result if the weekday number is 3
print("Yes - today begins with a T")
# pass on if the weekday number is not 1 or 3
else:
# print the day is not beging with T since the weekday is not 1 & 3
print("No - today is doesn't begins with T") | 4.34375 | 4 |
bin/automated_insert.py | kevinmooreiii/mechdriver | 1 | 12799510 | """ Add a species to your database
usiing a log file
"""
import sys
import os
import autofile
import automol
from mechanalyzer.inf import thy as tinfo
from mechanalyzer.inf import rxn as rinfo
from mechanalyzer.inf import spc as sinfo
import elstruct
import autorun
from mechroutines.es._routines.conformer import _saved_cnf_info
from mechroutines.es._routines.conformer import _sym_unique
from mechroutines.es._routines.conformer import _save_unique_parsed_conformer
from mechroutines.es._routines.conformer import _geo_unique
from mechroutines.es._routines.conformer import _fragment_ring_geo
from mechroutines.es._routines._sadpt import save_saddle_point
from mechlib.reaction.rxnid import _id_reaction
THEORY_DCT = {
'lvl_wbs': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'wb97xd',
'basis': '6-31g*'
},
'lvl_wbm': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'wb97xd',
'basis': '6-31+g*'
},
'lvl_wbt': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'wb97xd',
'basis': 'cc-pvtz'},
'lvl_m06s': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'm062x',
'basis': '6-31g*'
},
'lvl_m06m': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'm062x',
'basis': '6-31+g*'
},
'lvl_m06t': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'm062x',
'basis': 'cc-pvtz'},
'lvl_b2d': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b2plypd3',
'basis': 'cc-pvdz'},
'lvl_b2t': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b2plypd3',
'basis': 'cc-pvtz'},
'lvl_b2q': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b2plypd3',
'basis': 'cc-pvqz'
},
'lvl_b3s': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b3lyp',
'basis': '6-31g*'
},
'lvl_b3mg': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b3lyp',
'basis': '6-311g**'
},
'lvl_b3t': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b3lyp',
'basis': 'cc-pvtz'},
'cc_lvl_d': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)', 'basis': 'cc-pvdz'},
'cc_lvl_t': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)', 'basis': 'cc-pvtz'},
'cc_lvl_q': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)', 'basis': 'cc-pvqz'
},
'cc_lvl_df': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)-f12',
'basis': 'cc-pvdz-f12'
},
'cc_lvl_tf': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)-f12',
'basis': 'cc-pvtz-f12'
},
'cc_lvl_qf': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)-f12',
'basis': 'cc-pvqz-f12'
},
'mlvl_cas_dz': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'caspt2',
'basis': 'cc-pvdz'},
'mlvl_cas_tz': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'caspt2',
'basis': 'cc-pvtz'}}
def parse_user_locs(insert_dct):
rid = insert_dct['rid']
cid = insert_dct['cid']
if rid is None:
rid = autofile.schema.generate_new_ring_id()
if cid is None:
cid = autofile.schema.generate_new_conformer_id()
return (rid, cid)
def parse_user_species(insert_dct):
smi = insert_dct['smiles']
ich = insert_dct['inchi']
mult = insert_dct['mult']
chg = insert_dct['charge']
if ich is None and smi is None:
print(
'Error: user did not specify species' +
'with an inchi or smiles in input')
sys.exit()
if ich is None:
ich = automol.smiles.inchi(smi)
if not automol.inchi.is_complete(ich):
ich = automol.inchi.add_stereo(ich)
if mult is None:
print('Error: user did not specify mult in input')
sys.exit()
if chg is None:
print('Error: user did not specify charge in input')
sys.exit()
return sinfo.from_data(ich, chg, mult)
def parse_user_reaction(insert_dct):
smis = insert_dct['smiles']
ichs = insert_dct['inchi']
mults = insert_dct['mult']
chgs = insert_dct['charge']
rxn_class = insert_dct['rxn_class']
# zrxn_file = insert_dct['zrxn_file']
if ichs is None:
ichs = [[], []]
for smi in smis[0]:
ichs[0].append(automol.smiles.inchi(smi))
for smi in smis[1]:
ichs[1].append(automol.smiles.inchi(smi))
for idx, ich in enumerate(ichs[0]):
if not automol.inchi.is_complete(ich):
ich = automol.inchi.add_stereo(ich)
ichs[0][idx] = ich
for idx, ich in enumerate(ichs[1]):
if not automol.inchi.is_complete(ich):
ich = automol.inchi.add_stereo(ich)
ichs[1][idx] = ich
if mults is None:
print('Error: user did not specify mults in input')
sys.exit()
if chgs is None:
print('Error: user did not specify charges in input')
sys.exit()
flat_ichs = sum(ichs, [])
if len(flat_ichs) != len(mults):
print(
'Error: number of species does not match number of mults')
sys.exit()
if len(flat_ichs) != len(chgs):
print(
'Error: number of species does not match number of charges')
sys.exit()
idx = 0
rxn_muls = [[], []]
rxn_chgs = [[], []]
for ich in ichs[0]:
mults_allowed = automol.graph.possible_spin_multiplicities(
automol.inchi.graph(ich, stereo=False))
if mults[idx] not in mults_allowed:
print(
'user specified mult of {}'.format(mults[idx]) +
'is not an allowed multiplicty for inchi {}'.format(ich))
sys.exit()
rxn_muls[0].append(mults[idx])
rxn_chgs[0].append(chgs[idx])
idx += 1
for ich in ichs[1]:
mults_allowed = automol.graph.possible_spin_multiplicities(
automol.inchi.graph(ich, stereo=False))
if mults[idx] not in mults_allowed:
print(
'user specified mult of {}'.format(mults[idx]) +
'is not an allowed multiplicty for inchi {}'.format(ich))
sys.exit()
rxn_muls[1].append(mults[idx])
rxn_chgs[1].append(chgs[idx])
idx += 1
ts_mult = insert_dct['ts_mult']
if ts_mult is None:
print(
'Error: user did not specify ts_mul')
sys.exit()
rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult))
ts_info = rinfo.ts_info(rxn_info)
# if zrxn_file is not None:
# zrxn_str = autofile.io_.read_file(zrxn_file)
# zrxns = [automol.reac.from_string(zrxn_str)]
# else:
# zrxns, _ = _id_reaction(rxn_info)
if rxn_class is None:
print(
'Error: user did not specify rxn_class')
sys.exit()
return rxn_info, ts_info, rxn_class
def parse_user_theory(insert_dct):
# Get input method explicitly inputted
program = insert_dct['program']
method = insert_dct['method']
basis = insert_dct['basis']
orb_res = insert_dct['orb_res']
# Get input method from theory dictionary
theory = insert_dct['theory']
if theory is None:
if program is None:
print('Error: user did not specify program in input')
sys.exit()
elif method is None:
print('Error: user did not specify method in input')
sys.exit()
elif basis is None:
print('Error: user did not specify basis in input')
sys.exit()
elif orb_res is None:
print('Error: user did not specify orb_res in input')
sys.exit()
else:
thy_info = (program, method, basis, orb_res)
else:
if theory in THEORY_DCT:
thy_info = tinfo.from_dct(THEORY_DCT[theory])
else:
print(
'Error: user did not specify a theory {}'.format(theory) +
' that is in the THEORY_DCT' +
'please add it to the dct in the script or use program/method/basis/orb_dct' +
'keywords instead of theory')
sys.exit()
return thy_info
def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None):
# species filesystem
spc_fs = autofile.fs.species(prefix)
spc_fs[-1].create(spc_info)
spc_prefix = spc_fs[-1].path(spc_info)
# theory filesystem
thy_fs = autofile.fs.theory(spc_prefix)
thy_fs[-1].create(mod_thy_info[1:])
thy_prefix = thy_fs[-1].path(mod_thy_info[1:])
# conformer
cnf_fs = autofile.fs.conformer(thy_prefix)
if locs is not None:
cnf_fs[-1].create(locs)
cnf_prefix = cnf_fs[-1].path(locs)
else:
cnf_prefix = None
return (
(spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix))
def create_reaction_filesystems(
prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None):
# species filesystem
print('rxn_info', rxn_info)
rxn_fs = autofile.fs.reaction(prefix)
sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile')
rxn_fs[-1].create(sort_rxn_info)
rxn_prefix = rxn_fs[-1].path(sort_rxn_info)
# theory filesystem
thy_fs = autofile.fs.theory(rxn_prefix)
thy_fs[-1].create(mod_thy_info[1:])
thy_prefix = thy_fs[-1].path(mod_thy_info[1:])
if ts_locs is None:
ts_locs = (0,)
ts_fs = autofile.fs.transition_state(thy_prefix)
ts_fs[-1].create(ts_locs)
ts_prefix = ts_fs[-1].path(ts_locs)
# conformer
cnf_fs = autofile.fs.conformer(ts_prefix)
if locs is not None:
cnf_fs[-1].create(locs)
cnf_prefix = cnf_fs[-1].path(locs)
else:
cnf_prefix = None
return (
(rxn_fs, thy_fs, ts_fs, cnf_fs),
(rxn_prefix, thy_prefix, ts_prefix, cnf_prefix))
def read_user_file(dct, keyword):
if dct[keyword] is None:
print(
'ERROR: No filename is specified for {}'.format(keyword) +
'Script will exit')
sys.exit()
file_name = dct[keyword]
return autofile.io_.read_file(file_name)
def read_user_filesystem(dct):
if dct['save_filesystem'] is None:
print(
'ERROR: No save_filesystem}' +
'Script will exit')
sys.exit()
return dct['save_filesystem']
def choose_cutoff_distance(geo):
rqhs = [x * 0.1 for x in range(26, 38, 2)]
chosen_ts_gra = []
chosen_oversaturated_atom = None
for rqh in rqhs:
ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3)
ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo)
ts_gras = automol.graph.connected_components(ts_gras)
if len(ts_gras) != 1:
continue
for ts_gra_i in ts_gras:
vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True)
oversaturated_atoms = [atm for atm, val in vals.items() if val < 0]
if len(oversaturated_atoms) == 1:
chosen_ts_gra = ts_gras[0]
chosen_oversaturated_atom = oversaturated_atoms[0]
break
if chosen_oversaturated_atom is None:
print('could not figure out which H is being transfered')
sys.exit()
return chosen_ts_gra, chosen_oversaturated_atom
def get_zrxn(geo, rxn_info, rxn_class):
ts_gra, oversaturated_atom = choose_cutoff_distance(geo)
atoms_bnd = automol.graph.atoms_bond_keys(ts_gra)
bonds = atoms_bnd[oversaturated_atom]
if len(bonds) != 2:
print('too many bonds to transfered atom for me to figure out')
print('I promise i will be smarter in the future')
sys.exit()
breaking_bond, forming_bond = bonds
# when we move on to other reaction types we have to check for double
# bonds when doing bond orders
forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1}
back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9}
forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct)
backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct)
reactant_gras = automol.graph.without_dummy_bonds(
automol.graph.without_fractional_bonds(forward_gra))
reactant_gras = automol.graph.connected_components(reactant_gras)
product_gras = automol.graph.without_dummy_bonds(
automol.graph.without_fractional_bonds(backward_gra))
product_gras = automol.graph.connected_components(product_gras)
ts_gras = [forward_gra, backward_gra]
rxn_gras = [reactant_gras, product_gras]
rxn_smis = [[], []]
for i, side in enumerate(rxn_info[0]):
for ich in side:
rxn_smis[i].append(automol.inchi.smiles(ich))
ts_smis = [[], []]
ts_ichs = [[], []]
for rgra in reactant_gras:
try:
rich = automol.graph.inchi(rgra, stereo=True)
except IndexError:
rich = automol.graph.inchi(rgra)
rsmi = automol.inchi.smiles(rich)
ts_ichs[0].append(rich)
ts_smis[0].append(rsmi)
for pgra in product_gras:
try:
pich = automol.graph.inchi(pgra, stereo=True)
except IndexError:
pich = automol.graph.inchi(pgra)
psmi = automol.inchi.smiles(pich)
ts_ichs[1].append(pich)
ts_smis[1].append(psmi)
reactant_match = False
product_match = False
if ts_smis[0] == rxn_smis[0]:
reactant_match = True
elif ts_smis[0][::-1] == rxn_smis[0]:
ts_ichs[0] = ts_ichs[0][::-1]
ts_smis[0] = ts_smis[0][::-1]
reactant_match = True
else:
ts_ichs = ts_ichs[::-1]
ts_smis = ts_smis[::-1]
ts_gras = ts_gras[::-1]
rxn_gras = rxn_gras[::-1]
if ts_smis[0] == rxn_smis[0]:
reactant_match = True
elif ts_smis[0][::-1] == rxn_smis[0]:
ts_ichs[0] = ts_ichs[0][::-1]
ts_smis[0] = ts_smis[0][::-1]
reactant_match = True
if reactant_match:
if ts_smis[1] == rxn_smis[1]:
product_match = True
elif ts_smis[1][::-1] == rxn_smis[-1]:
ts_ichs[1] = ts_ichs[1][::-1]
ts_smis[1] = ts_smis[1][::-1]
product_match = True
if reactant_match and product_match:
reactant_keys = []
for gra in rxn_gras[0]:
reactant_keys.append(automol.graph.atom_keys(gra))
product_keys = []
for gra in rxn_gras[1]:
product_keys.append(automol.graph.atom_keys(gra))
std_rxn = automol.reac.Reaction(
rxn_class, *ts_gras, reactant_keys, product_keys)
ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(
std_rxn, geo)
std_zrxn = automol.reac.relabel_for_zmatrix(
std_rxn, zma_keys, dummy_key_dct)
rxn_info = (ts_ichs, *rxn_info[1:])
ts_geo = automol.zmat.geometry(ts_zma)
# geo_reorder_dct = {}
# dummies = []
# for dummy in dummy_key_dct.keys():
# add_idx = 1
# for dumm_j in dummies:
# if dummy > dumm_j:
# add_idx += 1
# dummies.append(dummy + add_idx)
# remove_idx = 0
# for idx_i, idx_j in enumerate(zma_keys):
# if idx_i in dummies:
# remove_idx -= 1
# else:
# geo_reorder_dct[idx_i + remove_idx] = idx_j
# ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct)
else:
print(
'The reactants and products found for the transition state' +
'did not match those specified in user input')
sys.exit()
return std_zrxn, ts_zma, ts_geo, rxn_info
def main(insert_dct):
prefix = read_user_filesystem(insert_dct)
# Read in the input and output files that we
# Are inserting into the filesystem
inp_str = read_user_file(insert_dct, 'input_file')
out_str = read_user_file(insert_dct, 'output_file')
# parse method from insert input file
thy_info = parse_user_theory(insert_dct)
# parse out geo information first, to make sure
# user save specifications match output
prog, method, basis, _ = thy_info
ene = elstruct.reader.energy(prog, method, out_str)
geo = elstruct.reader.opt_geometry(prog, out_str)
if geo is None:
print(
'No geometry could be parsed from output' +
'Check that the program matches user specied' +
' {}'.format(prog) + ' and method matches' +
' {}'.format(method))
sys.exit()
# Parse out user specified save location
zrxn = None
if insert_dct['saddle']:
rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct)
zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class)
# for zrxn_i in zrxns:
# forw_form_key = automol.reac.forming_bond_keys(zrxn_i)
# back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True)
# forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i)
# back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True)
# forward_gra = automol.graph.without_stereo_parities(
# automol.graph.without_dummy_bonds(
# automol.graph.without_fractional_bonds(
# zrxn_i.forward_ts_graph)))
# forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key)
# backward_gra = automol.graph.without_stereo_parities(
# automol.graph.without_dummy_bonds(
# automol.graph.without_fractional_bonds(
# zrxn_i.backward_ts_graph)))
# backward_gra = automol.graph.add_bonds(backward_gra, back_form_key)
# if zrxn_i.class_ == 'hydrogen abstraction':
# forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key)
# backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key)
# print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph))
# print('forRXN', automol.graph.string(forward_gra))
# print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph))
# print('bacRXN', automol.graph.string(backward_gra))
# if forward_gra == automol.geom.graph(geo, stereo=False):
# zrxn = zrxn_i
# zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo)
# elif backward_gra == automol.geom.graph(geo, stereo=False):
# zrxn = automol.reac.reverse(zrxn_i)
# zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo)
# if zrxn is None:
# print(
# 'Your geometry did not match any of the attempted ' +
# 'zrxns, which are the following')
# for zrxn_i in zrxns:
# print(zrxns)
# sys.exit()
# # hess = elstruct.reader.hessian(prog, out_str)
# Hess = None
# If hess is None:
# print(
# 'No hessian found in output, cannot save ' +
# 'a transition state without a hessian')
# sys.exit()
# run_path = insert_dct['run_path']
# if run_path is None:
# run_path = os.getcwd()
# run_fs = autofile.fs.run(run_path)
# freq_run_path = run_fs[-1].path(['hessian'])
# run_fs[-1].create(['hessian'])
# script_str = autorun.SCRIPT_DCT['projrot']
# freqs, _, imags, _ = autorun.projrot.frequencies(
# script_str, freq_run_path, [geo], [[]], [hess])
# if len(imags) != 1:
# print(
# 'Can only save a transition state that has a single' +
# 'imaginary frequency, projrot found the following' +
# 'frequencies: ' + ','.join(imags))
# sys.exit()
else:
spc_info = parse_user_species(insert_dct)
mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info)
locs = parse_user_locs(insert_dct)
# Check that the save location matches geo information
if not insert_dct['saddle']:
if not species_match(geo, spc_info):
print(
'I refuse to save this geometry until user specified' +
' info matches the info in user given output')
sys.exit()
# Check that the rid/cid info matches the filesystem
fs_array, prefix_array = create_species_filesystems(
prefix, spc_info, mod_thy_info, locs=None)
else:
fs_array, prefix_array = create_reaction_filesystems(
prefix, rxn_info, mod_thy_info,
ts_locs=insert_dct['ts_locs'], locs=None)
cnf_fs = fs_array[-1]
if not locs_match(geo, cnf_fs, locs):
print(
'I refuse to save this geometry until user specified' +
' info matches the info in user given output')
sys.exit()
inf_obj = autofile.schema.info_objects.run(
job=elstruct.Job.OPTIMIZATION, prog=prog, version='',
method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS)
ret = (inf_obj, inp_str, out_str)
_, saved_geos, saved_enes = _saved_cnf_info(
cnf_fs, mod_thy_info)
if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn):
sym_id = _sym_unique(
geo, ene, saved_geos, saved_enes)
if sym_id is None:
if cnf_fs[0].file.info.exists():
rinf_obj = cnf_fs[0].file.info.read()
else:
rinf_obj = autofile.schema.info_objects.conformer_trunk(0)
rinf_obj.nsamp = 1
if cnf_fs[1].file.info.exists([locs[0]]):
cinf_obj = cnf_fs[1].file.info.read(locs[0])
cnsampd = cinf_obj.nsamp
cnsampd += 1
cinf_obj.nsamp = cnsampd
else:
cinf_obj = autofile.schema.info_objects.conformer_branch(0)
cinf_obj.nsamp = 1
cnf_fs[1].create([locs[0]])
cnf_fs[0].file.info.write(rinf_obj)
cnf_fs[1].file.info.write(cinf_obj, [locs[0]])
hess, freqs, imags = None, None, None
if hess is not None and zrxn is not None:
hess_inf_obj = autofile.schema.info_objects.run(
job=elstruct.Job.HESSIAN, prog=prog, version='',
method=method, basis=basis,
status=autofile.schema.RunStatus.SUCCESS)
hess_ret = (hess_inf_obj, inp_str, out_str)
save_saddle_point(
zrxn, ret, hess_ret, freqs, imags,
mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs,
zma_locs=(0,), zma=zma)
else:
_save_unique_parsed_conformer(
mod_thy_info, cnf_fs, locs, (geo, zma, ene),
inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,))
print(
'geometry is now saved at {}'.format(cnf_fs[-1].path(locs)))
else:
print(
'the geometry in the output is not unique to filesystem' +
'... not saving')
def species_match(geo, spc_info):
match = True
ich, _, mul = spc_info
mults_allowed = automol.graph.possible_spin_multiplicities(
automol.inchi.graph(ich, stereo=False))
geo_ich = automol.geom.inchi(geo, stereo=True)
if ich != geo_ich:
print(
'user specified inchi {}'.format(ich) +
'does not match inchi from output {}'.format(geo_ich) +
'which is based on geometry from output:\n' +
'{}'.format(automol.geom.string(geo)))
match = False
if mul not in mults_allowed:
print(
'user specified mult of {}'.format(mul) +
'is not an allowed multiplicty for inchi {}'.format(ich))
match = False
return match
def locs_match(geo, cnf_fs, locs):
match = True
rid = locs[0]
geo_rid = rng_loc_for_geo(geo, cnf_fs)
if geo_rid is not None:
if geo_rid != rid:
print(
'Error: rid mismatch for the filesystem at' +
' {}'.format(cnf_fs[0].path()) +
'\nthe expected rid for this geo is {}'.format(geo_rid) +
'\nthe user rid in input file is {}'.format(rid))
match = False
return match
def rng_loc_for_geo(geo, cnf_fs):
rid = None
frag_geo = _fragment_ring_geo(geo)
if frag_geo is not None:
frag_zma = automol.geom.zmatrix(frag_geo)
checked_rids = []
for locs in cnf_fs[-1].existing():
current_rid, _ = locs
if current_rid in checked_rids:
continue
if cnf_fs[-1].file.geometry.exists(locs):
checked_rids.append(current_rid)
locs_geo = cnf_fs[-1].file.geometry.read(locs)
frag_locs_geo = _fragment_ring_geo(locs_geo)
if frag_locs_geo is None:
rid = locs[0]
break
frag_locs_zma = automol.geom.zmatrix(frag_locs_geo)
if automol.zmat.almost_equal(
frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4):
rid = locs[0]
break
return rid
def parse_script_input(script_input_file):
script_input = autofile.io_.read_file(script_input_file).splitlines()
insert_dct = {
'save_filesystem': None,
'smiles': None,
'inchi': None,
'mult': None,
'charge': None,
'rid': None,
'cid': None,
'theory': None,
'program': None,
'method': None,
'basis': None,
'orb_res': None,
'input_file': None,
'output_file': None,
'ts_locs': None,
'ts_mult': None,
'rxn_class': None,
'zrxn_file': None,
'run_path': None,
'saddle': False,
}
for i, line in enumerate(script_input):
if len(line) < 2:
continue
elif '!' in line[0]:
continue
line = line.split('!')[0]
if ':' not in line:
print(
'ERROR: line\n({}) {}\n is not parsable, '.format(i, line) +
'script will exit until input is resolved to avoid' +
' filesystem contamination.' +
'Comment lines should contain "!"' +
'Key format should be:\n' +
'<Keyword>: <Value>\n' +
'Allowed keywords are:\n' +
'{}'.format('\n'.join(list(insert_dct.keys())))
)
sys.exit()
keyword, value = line.split(':')
if keyword in insert_dct:
if 'None' in value:
value = None
elif keyword in ['mult', 'charge', 'ts_mult']:
values = []
for val in value.split(','):
values.append(int(val))
if len(values) == 1:
value = values[0]
else:
value = values
elif keyword in ['ts_locs']:
value = (int(value),)
elif keyword in ['rxn_class']:
# strip whitespaces form either side of reaction
# class but not in between words
value = value.split()
for i, val in enumerate(value):
value[i] = val.replace(' ', '')
value = ' '.join(value)
elif keyword not in ['smiles', 'inchi']:
value = value.replace(' ', '')
else:
value = value.split(' = ')
if len(value) > 1:
insert_dct['saddle'] = True
reactants, products = value
reactants = reactants.split(' + ')
products = products.split(' + ')
values = [[], []]
for reactant in reactants:
values[0].append(reactant.replace(' ', ''))
for product in products:
values[1].append(product.replace(' ', ''))
value = values
else:
value = value[0].replace(' ', '')
print(keyword, value)
insert_dct[keyword] = value
else:
print(
'ERROR: Keyword {} is not recognized'.format(keyword) +
'script will exit until inpupt is resolved to avoid' +
' filesystem contamination.' +
'Allowed keywords are:\n' +
'{}'.format('\n'.join(list(insert_dct.keys())))
)
sys.exit()
return insert_dct
if __name__ == '__main__':
SCRIPT_INPUT_FILE = 'insert_options.txt'
insert_dct = parse_script_input(SCRIPT_INPUT_FILE)
main(insert_dct)
| 1.984375 | 2 |
tinycord/middleware/interactions_create.py | HazemMeqdad/TinyCord | 0 | 12799511 | import typing
if typing.TYPE_CHECKING:
from ..client import Client
from ..core import Gateway, GatewayDispatch
from ..models import SlashContext, Option
async def interaction_create(client: "Client", gateway: "Gateway", event: "GatewayDispatch") -> typing.List[typing.Awaitable]:
"""
|coro|
This event called when the client is ready.
It does provide the client and the user that was ready.
Parameters
----------
client : `Client`
The main client.
gateway : `Gateway`
The gateway that dispatched the event.
event : `GatewayDispatch`
The event that was dispatched.
"""
data = event.data
# options = [Option(i["type"], name=i["name"], description=i["description"], required=)
# for i in data.get("data")["options"]]
context = SlashContext(
client,
event["type"],
id=data["id"],
application_id=data["application_id"],
command_id=data.get("data")["id"],
name=data.get("data")["name"],
options=Option(),
)
return "on_interaction_create", [
context
]
""" The event that was dispatched. """
def export():
""" Exports the function. """
return interaction_create
| 2.328125 | 2 |
promoterz/statistics.py | emillj/gekkoJaponicus | 0 | 12799512 | #!/bin/python
from deap import tools
import numpy as np
import os
statisticsNames = {'avg': 'Average profit',
'std': 'Profit variation',
'min': 'Minimum profit',
'max': 'Maximum profit',
'size': 'Population size',
'maxsize': 'Max population size'}
def getStatisticsMeter():
stats = tools.Statistics(lambda ind: ind.fitness.values[0])
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
return stats
def write_evolution_logs(i, stats, filename="output/evolution_gen.csv"):
#print(i, stats)
if type(stats) == dict:
message = ','.join([str(x) for x in [i,stats['avg'],
stats['std'],
stats['min'],
stats['max'],
stats['dateRange']]])
elif type(stats) == list:
message = ','.join([str(x) for x in [i] + stats])
else:
raise
#print(message)
if i == 0 and os.path.isfile(filename):
os.remove(filename)
f=open(filename, 'a+')
f.write(message+"\n")
#print(message)
f.close()
| 2.625 | 3 |
djangoProject/mysite/views.py | bt1401/Django | 0 | 12799513 | <reponame>bt1401/Django
from django import urls
from django.db.models.fields import URLField
from django.shortcuts import render, redirect
from django.http import HttpResponse, request
from django.views import View
from.models import Title, Headline, Artical
from.forms import PostForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
import requests
from bs4 import BeautifulSoup
# Create your views here.
class Scrape(View):
def get(self, request):
#Artical.objects.all().delete()
Headline.objects.all().delete()
session = requests.Session()
session.headers = {"User-Agent": "Googlebot/2.1 (+http://www.google.com/bot.html)"}
url = "https://vietnamnet.vn/vn/thoi-su/"
content = session.get(url).content
soup = BeautifulSoup(content, "html.parser")
News = soup.find_all('div', {"class":"clearfix item"})
for article in News:
linkx = article.find('a', {"class":"m-t-5 w-240 d-ib thumb left m-r-20"})
link=linkx['href']
imagex = article.find('img', {"class":"lazy"})
image = imagex['src']
titlex = article.find('a', {"class":"f-18 title"})
title = titlex.text
authorx = article.find('a', {"class":"box-subcate-style4-namecate"})
author = authorx.text
timex = article.find('span', {"class":"time"})
time = timex.text
textx = article.find('div', {"class":"lead"})
text = textx.text
new_headline = Headline()
new_headline.title = title
new_headline.image = image
new_headline.author = author
new_headline.time = time
new_headline.text = text
new_headline.url = "https://vietnamnet.vn/"+link
new_headline.save()
headlines = Headline.objects.all()[::-1]
context = {'object_list': headlines,}
return render(request, "mysite/scrape.html", context)
def post(self, request):
list_link = Headline.objects.values_list('url', flat=True)
link = request.POST.get('link')
id_get = request.POST.get('id')
if (link in list_link):
session = requests.Session()
session.headers = {"User-Agent": "Googlebot/2.1 (+http://www.google.com/bot.html)"}
url = link
content = session.get(url).content
soup = BeautifulSoup(content, "html.parser")
News = soup.find_all('div', {"class":"ArticleDetail w-660 d-ib"})
for artical in News:
image = artical.find("img", {"class": ""})['src']
title = artical.find("h1", {"class": "title f-22 c-3e"}).text
text = artical.find("div", {"class": "ArticleContent"}).text
author = artical.find("strong", {"class": ""}).text
time = artical.find("span", {"class": "ArticleDate"}).text
if not Artical.objects.filter(title=title):
new_post = Artical()
new_post.id = id_get
new_post.image = image
new_post.title = title
new_post.text = text
new_post.author = author
new_post.time = time
new_post.save()
return redirect(f'/post/{id_get}')
else:
return redirect(f'/post/{id_get}')
def show_detail(request, id_get):
headlines = Artical.objects.get(id = id_get)
context = {'object_list': headlines,}
return render(request, "mysite/scrape_detail.html", context)
class HomeClass(View):
def get(self, request):
list_title = Title.objects.all()
context = {"baiviet" : list_title}
return render(request, 'mysite/home.html', context)
class LoginClass(View):
def get(self, request):
return render(request, 'mysite/login.html')
def post(self, request):
username = request.POST.get('user_name')
password = request.POST.get('pass_word')
my_user = authenticate(username=username, password=password)
if my_user is None:
return render(request, 'mysite/login_unsuccess.html')
login(request, my_user)
return render(request, 'mysite/login_success.html')
class AddPost(LoginRequiredMixin,View):
login_url='/login/'
def get(self, request):
f = PostForm()
context = {'fm': f}
return render(request,'mysite/add_post.html',context )
def post(self, request):
f = PostForm(request.POST)
if not f.is_valid():
return render(request, 'mysite/add_unsuccess.html')
if request.user.has_perm('mysite.add_post'):
f.save()
else:
return HttpResponse('You do not have access!')
return render(request, 'mysite/add_success.html')
class AdminView(ListView):
model = Title
template_name = 'mysite/admin_site.html'
class TestDetailView(DetailView):
model = Title
template_name = 'mysite/admin_detailview.html'
class UpdatePost(UpdateView):
model = Title
template_name = 'mysite/update_post.html'
fields = ['title', 'body_text', 'date']
class DeletePost(DeleteView):
model = Title
template_name = 'mysite/delete_post.html'
success_url = reverse_lazy('mysite:admin-site')
| 2.4375 | 2 |
src/tickers.py | rotomer/nlp-project | 0 | 12799514 | <reponame>rotomer/nlp-project
SNP_TICKERS = ['AAPL', 'XOM', 'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM',
'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC',
'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC',
'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL',
'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX',
'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX',
'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC',
'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL',
'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP',
'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE',
'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED',
'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL',
'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP',
'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST',
'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO',
'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW',
'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD',
'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI',
'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF',
'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE',
'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP',
'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG',
'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA',
'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT',
'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL',
'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX',
'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX',
'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC',
'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN',
'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR',
'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP',
'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ',
'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC',
'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV',
'TIE']
ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE',
'ACI',
'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE',
'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP',
'AGYS',
'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS',
'ALB',
'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME',
'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS',
'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG',
'ARO',
'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML',
'ATNI',
'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE',
'AXP',
'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC',
'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG',
'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK',
'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL',
'BRKS',
'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA',
'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY',
'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT',
'CBU',
'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL',
'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW',
'CHS',
'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS',
'CLP',
'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK',
'CNL',
'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP',
'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL',
'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX',
'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT',
'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD',
'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI',
'DHR',
'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV',
'DOW',
'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA',
'DVN',
'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN',
'EGP',
'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR',
'ENZ',
'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS',
'ESV',
'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE',
'EXPO',
'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO',
'FDS',
'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV',
'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN',
'FNP',
'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD',
'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI',
'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR',
'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL',
'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES',
'HF',
'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME',
'HMN',
'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT',
'HPY',
'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY',
'HTLD',
'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON',
'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB',
'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI',
'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC',
'IVC',
'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC',
'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN',
'KBH',
'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT',
'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK',
'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH',
'LHCG',
'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC',
'LNCE',
'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR',
'LTC',
'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN',
'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI',
'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED',
'MEI',
'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI',
'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV',
'MPC',
'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR',
'MTB',
'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL',
'NAFC',
'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE',
'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC',
'NOV',
'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI',
'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX',
'NYB',
'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC',
'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY',
'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI',
'PBY',
'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY',
'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI',
'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC',
'PNFP',
'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS',
'PQ',
'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI',
'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP',
'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE',
'RECN',
'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD',
'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG',
'RSH',
'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT',
'SAH',
'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE',
'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW',
'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG',
'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK',
'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI',
'SPTN',
'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL',
'STLD',
'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK',
'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA',
'SYY',
'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL',
'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX',
'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB',
'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI',
'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC',
'TYL',
'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL',
'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS',
'USB',
'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI',
'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM',
'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG',
'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL',
'WGO',
'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR',
'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN',
'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM',
'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ']
TICKERS = sorted(list(set(SNP_TICKERS) & set(ALL_TICKERS)))
| 1.40625 | 1 |
chapter_3/redirection-options.py | bimri/programming_python | 0 | 12799515 | "Other Redirection Options: os.popen and subprocess Revisited"
'''
the built-in os.popen function and its subprocess.Popen relative,
which provide a way to redirect another command’s streams from
within a Python program. these tools can be used to run a shell
command line.
Other Redirection Options: os.popen and subprocess Revisited
Near the end of the preceding chapter, we took a first look at the built-in os.popen
function and its subprocess.Popen relative, which provide a way to redirect another
command’s streams from within a Python program. As we saw, these tools can be used
to run a shell command line (a string we would normally type at a DOS or csh prompt)
but also provide a Python file-like object connected to the command’s output stream—
reading the file object allows a script to read another program’s output. I suggested that
these tools may be used to tap into input streams as well.
Because of that, the os.popen and subprocess tools are another way to redirect streams
of spawned programs and are close cousins to some of the techniques we just met.
Their effect is much like the shell | command-line pipe syntax for redirecting streams
to programs (in fact, their names mean “pipe open”), but they are run within a script
and provide a file-like interface to piped streams. They are similar in spirit to the
redirect function, but are based on running programs (not calling functions), and the
command’s streams are processed in the spawning script as files (not tied to class objects).
These tools redirect the streams of a program that a script starts, instead of
redirecting the streams of the script itself.
'''
"Redirecting input or output with os.popen"
'''
In fact, by passing in the desired mode flag, we redirect either a spawned program’s
output or input streams to a file in the calling scripts, and we can obtain the spawned
program’s exit status code from the close method (None means “no error” here).
C:\...\PP4E\System\Streams> type hello-out.py
print('Hello shell world')
C:\...\PP4E\System\Streams> type hello-in.py
inp = input()
open('hello-in.txt', 'w').write('Hello ' + inp + '\n')
'''
'''
Python scripts can read output from other programs
and scripts like these, too, using code like the following:
C:\...\PP4E\System\Streams> python
>>> import os
>>> pipe = os.popen('python hello-out.py') # 'r' is default--read stdout
>>> pipe.read()
print(pipe.close()) # exit status: None is good
'''
"""
But Python scripts can also provide input to spawned programs’ standard input
streams—passing a “w” mode argument, instead of the default “r”, connects the returned
object to the spawned program’s input stream. What we write on the spawning
end shows up as input in the program started:
>>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write to program stdin
>>> pipe.write('Gumby\n')
>>> pipe.close() # \n at end is optional
>>> open('hello-in.txt').read() # output sent to a file
The popen call is also smart enough to run the command string as an independent
process on platforms that support such a notion. It accepts an optional third argument
that can be used to control buffering of written text.
"""
"Redirecting input and output with subprocess"
'''
For even more control over the streams of spawned programs, we can employ the
subprocess module. module can emulate os.popen functionality, but it can also achieve feats such as
bidirectional stream communication (accessing both a program’s input and output)
and tying the output of one program to the input of another.
this module provides multiple ways to spawn a program and get both its
standard output text and exit status.
C:\...\PP4E\System\Streams> python
>>> from subprocess import Popen, PIPE, call
>>> X = call('python hello-out.py') # convenience
>>> X
>>> pipe = Popen('python hello-out.py', stdout=PIPE)
>>> pipe.communicate()[0] # (stdout, stderr)
>>> pipe.returncode # exit status
>>> pipe = Popen('python hello-out.py', stdout=PIPE)
>>> pipe.stdout.read() # read all output
>>> pipe.wait() # exit status
'''
"""
Redirecting and connecting to the spawned program’s input stream is just as simple,
though a bit more complex than the os.popen approach with 'w' file mode
>>> pipe = Popen('python hello-in.py', stdin=PIPE)
>>> pipe.stdin.write(b'Pokey\n')
>>> pipe.stdin.close()
>>> pipe.wait()
>>> open('hello-in.txt').read() # output sent to a file
In fact, we can use obtain both the input and output streams of a spawned program with
this module.
C:\...\PP4E\System\Streams> type writer.py
print("Help! Help! I'm being repressed!")
print(42)
C:\...\PP4E\System\Streams> type reader.py
print('Got this: "%s"' % input())
import sys
data = sys.stdin.readline()[:-1]
print('The meaning of life is', data, int(data) * 2)
"""
'''
the following connects two programs,
by piping the output of one Python script into another, first with shell syntax,
and then with the subprocess module:
C:\...\PP4E\System\Streams> python writer.py | python reader.py
Got this: "Help! Help! I'm being repressed!"
The meaning of life is 42 84
C:\...\PP4E\System\Streams> python
>>> from subprocess import Popen, PIPE
>>> p1 = Popen('python writer.py', stdout=PIPE)
>>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE)
>>> output = p2.communicate()[0]
>>> output
b'Got this: "Help! Help! I\'m being repressed!"\r\nThe meaning of life is 42 84\r\n'
>>> p2.returncode
0
We can get close to this with os.popen, but that the fact that its pipes are read or write
(and not both) prevents us from catching the second script’s output in our code:
>>> import os
>>> p1 = os.popen('python writer.py', 'r')
>>> p2 = os.popen('python reader.py', 'w')
>>> p2.write( p1.read() )
36
>>> X = p2.close()
Got this: "Help! Help! I'm being repressed!"
The meaning of life is 42 84
>>> print(X)
None
'''
| 3.59375 | 4 |
DeepTreeAttention/callbacks/callbacks.py | zoeyingz/DeepTreeAttention | 1 | 12799516 | #Callbacks
"""Create training callbacks"""
import os
import numpy as np
import pandas as pd
from datetime import datetime
from DeepTreeAttention.utils import metrics
from DeepTreeAttention.visualization import visualize
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import Callback, TensorBoard
from tensorflow import expand_dims
class F1Callback(Callback):
def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10):
"""F1 callback
Args:
n: number of epochs to run. If n=4, function will run every 4 epochs
y_true: instead of iterating through the dataset every time, just do it once and pass the true labels to the function
"""
self.experiment = experiment
self.eval_dataset = eval_dataset
self.label_names = label_names
self.submodel = submodel
self.n = n
self.train_shp = train_shp
self.y_true = y_true
def on_train_end(self, logs={}):
y_pred = []
sites = []
#gather site and species matrix
y_pred = self.model.predict(self.eval_dataset)
if self.submodel in ["spectral","spatial"]:
y_pred = y_pred[0]
#F1
macro, micro = metrics.f1_scores(self.y_true, y_pred)
self.experiment.log_metric("Final MicroF1", micro)
self.experiment.log_metric("Final MacroF1", macro)
#Log number of predictions to make sure its constant
self.experiment.log_metric("Prediction samples",y_pred.shape[0])
results = pd.DataFrame({"true":np.argmax(self.y_true, 1),"predicted":np.argmax(y_pred, 1)})
#assign labels
if self.label_names:
results["true_taxonID"] = results.true.apply(lambda x: self.label_names[x])
results["predicted_taxonID"] = results.predicted.apply(lambda x: self.label_names[x])
#Within site confusion
site_lists = self.train_shp.groupby("taxonID").siteID.unique()
site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists)
self.experiment.log_metric(name = "Within_site confusion[training]", value = site_confusion)
plot_lists = self.train_shp.groupby("taxonID").plotID.unique()
plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists)
self.experiment.log_metric(name = "Within_plot confusion[training]", value = plot_confusion)
domain_lists = self.train_shp.groupby("taxonID").domainID.unique()
domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists)
self.experiment.log_metric(name = "Within_domain confusion[training]", value = domain_confusion)
#Genus of all the different taxonID variants should be the same, take the first
scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict()
genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict)
self.experiment.log_metric(name = "Within Genus confusion", value = genus_confusion)
#Most confused
most_confused = results.groupby(["true_taxonID","predicted_taxonID"]).size().reset_index(name="count")
most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values("count", ascending=False)
self.experiment.log_table("most_confused.csv",most_confused.values)
def on_epoch_end(self, epoch, logs={}):
if not epoch % self.n == 0:
return None
y_pred = []
sites = []
#gather site and species matrix
y_pred = self.model.predict(self.eval_dataset)
if self.submodel in ["spectral","spatial"]:
y_pred = y_pred[0]
#F1
macro, micro = metrics.f1_scores(self.y_true, y_pred)
self.experiment.log_metric("MicroF1", micro)
self.experiment.log_metric("MacroF1", macro)
#Log number of predictions to make sure its constant
self.experiment.log_metric("Prediction samples",y_pred.shape[0])
class ConfusionMatrixCallback(Callback):
def __init__(self, experiment, dataset, label_names, y_true, submodel):
self.experiment = experiment
self.dataset = dataset
self.label_names = label_names
self.submodel = submodel
self.y_true = y_true
def on_train_end(self, epoch, logs={}):
y_pred = self.model.predict(self.dataset)
if self.submodel is "metadata":
name = "Metadata Confusion Matrix"
elif self.submodel in ["ensemble"]:
name = "Ensemble Matrix"
else:
name = "Confusion Matrix"
cm = self.experiment.log_confusion_matrix(
self.y_true,
y_pred,
title=name,
file_name= name,
labels=self.label_names,
max_categories=90,
max_example_per_cell=1)
class ImageCallback(Callback):
def __init__(self, experiment, dataset, label_names, submodel=False):
self.experiment = experiment
self.dataset = dataset
self.label_names = label_names
self.submodel = submodel
def on_train_end(self, epoch, logs={}):
"""Plot sample images with labels annotated"""
#fill until there is atleast 20 images
images = []
y_pred = []
y_true = []
limit = 20
num_images = 0
for data, label in self.dataset:
if num_images < limit:
pred = self.model.predict(data)
images.append(data)
if self.submodel:
y_pred.append(pred[0])
y_true.append(label[0])
else:
y_pred.append(pred)
y_true.append(label)
num_images += label.shape[0]
else:
break
images = np.vstack(images)
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
true_taxonID = [self.label_names[x] for x in y_true]
pred_taxonID = [self.label_names[x] for x in y_pred]
counter = 0
for label, prediction, image in zip(true_taxonID, pred_taxonID, images):
figure = visualize.plot_prediction(image=image,
prediction=prediction,
label=label)
self.experiment.log_figure(figure_name="{}_{}".format(label, counter))
counter += 1
def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False):
"""Create a set of callbacks
Args:
experiment: a comet experiment object
train_data: a tf data object to generate data
validation_data: a tf data object to generate data
train_shp: the original shapefile for the train data to check site error
"""
#turn off callbacks for metadata
callback_list = []
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=10,
min_delta=0.1,
min_lr=0.00001,
verbose=1)
callback_list.append(reduce_lr)
#Get the true labels since they are not shuffled
y_true = [ ]
for data, label in validation_data:
if submodel in ["spatial","spectral"]:
label = label[0]
y_true.append(label)
y_true = np.concatenate(y_true)
if not submodel in ["spatial","spectral"]:
confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel)
callback_list.append(confusion_matrix)
f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp)
callback_list.append(f1)
#if submodel is None:
#plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel)
#callback_list.append(plot_images)
if log_dir is not None:
print("saving tensorboard logs at {}".format(log_dir))
tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30)
callback_list.append(tensorboard)
return callback_list
| 2.609375 | 3 |
mini_projects/shrubbery/shrubbery.py | Ustabil/Python-part-one | 0 | 12799517 | # program template for mini-project 0
# Modify the print statement according to
# the mini-project instructions
#CodeSkulptor link:
#http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py
print "We want... a shrubbery!" | 1.773438 | 2 |
tests/dlkit/primordium/locale/types/test_format.py | UOC/dlkit | 2 | 12799518 | <reponame>UOC/dlkit<gh_stars>1-10
import pytest
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.locale.types.format import get_type_data
class TestFormat(object):
def test_get_type_data_with_format(self):
results = get_type_data('troff')
assert results['identifier'] == 'TROFF'
assert results['domain'] == 'DisplayText Formats'
assert results['display_name'] == 'troff Format Type'
assert results['display_label'] == 'troff'
assert results['description'] == 'The display text format type for the troff format.'
def test_unknown_type(self):
with pytest.raises(errors.NotFound):
get_type_data('foo')
| 2.078125 | 2 |
app/targetbalance.py | woudt/bunq2ifttt | 27 | 12799519 | <reponame>woudt/bunq2ifttt
"""
Target balance
Handles the target balance internal/external actions
"""
import json
import uuid
from flask import request
import bunq
import payment
def target_balance_internal():
""" Execute a target balance internal action """
data = request.get_json()
print("[target_balance_internal] input: {}".format(json.dumps(data)))
if "actionFields" not in data:
errmsg = "missing actionFields"
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
fields = data["actionFields"]
errmsg = check_fields(True, fields)
if errmsg:
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# the account NL42BUNQ0123456789 is used for test payments
if fields["account"] == "NL42BUNQ0123456789":
return json.dumps({"data": [{"id": uuid.uuid4().hex}]})
# retrieve balance
config = bunq.retrieve_config()
if fields["payment_type"] == "DIRECT":
balance = get_balance(config, fields["account"],
fields["other_account"])
if isinstance(balance, tuple):
balance, balance2 = balance
transfer_amount = fields["amount"] - balance
if transfer_amount > balance2:
transfer_amount = balance2
else:
balance = get_balance(config, fields["account"])
if isinstance(balance, float):
transfer_amount = fields["amount"] - balance
if isinstance(balance, str):
errmsg = balance
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# construct payment message
if "{:.2f}".format(fields["amount"]) == "0.00":
errmsg = "No transfer needed, balance already ok"
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
if transfer_amount > 0 and "top up" in fields["direction"]:
paymentmsg = {
"amount": {
"value": "{:.2f}".format(transfer_amount),
"currency": "EUR"
},
"counterparty_alias": {
"type": "IBAN",
"value": fields["account"],
"name": "x"
},
"description": fields["description"]
}
account = fields["other_account"]
elif transfer_amount < 0 and "skim" in fields["direction"]:
paymentmsg = {
"amount": {
"value": "{:.2f}".format(-transfer_amount),
"currency": "EUR"
},
"counterparty_alias": {
"type": "IBAN",
"value": fields["other_account"],
"name": "x"
},
"description": fields["description"]
}
account = fields["account"]
else:
errmsg = "No transfer needed, balance already ok"
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
print(paymentmsg)
# get id and check permissions
if fields["payment_type"] == "DIRECT":
accid, enabled = payment.check_source_account(True, False, config,
account)
else:
accid, enabled = payment.check_source_account(False, True, config,
account)
if accid is None:
errmsg = "unknown account: "+account
if not enabled:
errmsg = "Payment type not enabled for account: "+account
if errmsg:
print("[target_balance_internal] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# execute the payment
if fields["payment_type"] == "DIRECT":
result = bunq.post("v1/user/{}/monetary-account/{}/payment"
.format(config["user_id"], accid), paymentmsg)
else:
paymentmsg = {"number_of_required_accepts": 1, "entries": [paymentmsg]}
result = bunq.post("v1/user/{}/monetary-account/{}/draft-payment"
.format(config["user_id"], accid), paymentmsg)
print(result)
if "Error" in result:
return json.dumps({"errors": [{
"status": "SKIP",
"message": result["Error"][0]["error_description"]
}]}), 400
return json.dumps({"data": [{
"id": str(result["Response"][0]["Id"]["id"])}]})
def target_balance_external():
""" Execute a target balance external action """
data = request.get_json()
print("[target_balance_external] input: {}".format(json.dumps(data)))
if "actionFields" not in data:
errmsg = "missing actionFields"
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
fields = data["actionFields"]
errmsg = check_fields(False, fields)
if errmsg:
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# the account NL42BUNQ0123456789 is used for test payments
if fields["account"] == "NL42BUNQ0123456789":
return json.dumps({"data": [{"id": uuid.uuid4().hex}]})
# retrieve balance
config = bunq.retrieve_config()
balance = get_balance(config, fields["account"])
if isinstance(balance, str):
errmsg = balance
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
transfer_amount = fields["amount"] - balance
# check for zero transfer
if "{:.2f}".format(fields["amount"]) == "0.00":
errmsg = "No transfer needed, balance already ok"
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# get account id and check permission
if transfer_amount > 0:
accid = None
for acc in config["accounts"]:
if acc["iban"] == fields["account"]:
accid = acc["id"]
enabled = False
if "permissions" in config:
if fields["account"] in config["permissions"]:
if "PaymentRequest" in config["permissions"]\
[fields["account"]]:
enabled = config["permissions"][fields["account"]]\
["PaymentRequest"]
else:
accid, enabled = payment.check_source_account(False, True, config,
fields["account"])
if accid is None:
errmsg = "unknown account: "+fields["account"]
if not enabled:
errmsg = "Not permitted for account: "+fields["account"]
if errmsg:
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
# send request / execute payment
if transfer_amount > 0 and "top up" in fields["direction"]:
bmvalue = fields["request_phone_email_iban"].replace(" ", "")
if "@" in bmvalue:
bmtype = "EMAIL"
elif bmvalue[:1] == "+" and bmvalue[1:].isdecimal():
bmtype = "PHONE_NUMBER"
elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal():
bmtype = "IBAN"
else:
errmsg = "Unrecognized as email, phone or iban: "+bmvalue
print("[request_inquiry] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message":\
errmsg}]}), 400
msg = {
"amount_inquired": {
"value": "{:.2f}".format(transfer_amount),
"currency": "EUR",
},
"counterparty_alias": {
"type": bmtype,
"name": bmvalue,
"value": bmvalue
},
"description": fields["request_description"],
"allow_bunqme": True,
}
print(json.dumps(msg))
config = bunq.retrieve_config()
result = bunq.post("v1/user/{}/monetary-account/{}/request-inquiry"\
.format(config["user_id"], accid), msg, config)
elif transfer_amount < 0 and "skim" in fields["direction"]:
paymentmsg = {
"amount": {
"value": "{:.2f}".format(-transfer_amount),
"currency": "EUR"
},
"counterparty_alias": {
"type": "IBAN",
"value": fields["payment_account"],
"name": fields["payment_name"]
},
"description": fields["payment_description"]
}
print(paymentmsg)
paymentmsg = {"number_of_required_accepts": 1, "entries": [paymentmsg]}
result = bunq.post("v1/user/{}/monetary-account/{}/draft-payment"
.format(config["user_id"], accid), paymentmsg)
else:
errmsg = "No transfer needed, balance already ok"
print("[target_balance_external] ERROR: "+errmsg)
return json.dumps({"errors": [{"status": "SKIP", "message": errmsg}]})\
, 400
print(result)
if "Error" in result:
return json.dumps({"errors": [{
"status": "SKIP",
"message": result["Error"][0]["error_description"]
}]}), 400
return json.dumps({"data": [{
"id": str(result["Response"][0]["Id"]["id"])}]})
def check_fields(internal, fields):
""" Check the fields """
# check expected fields
if internal:
expected_fields = ["account", "amount", "other_account", "direction",
"payment_type", "description"]
else:
expected_fields = ["account", "amount", "direction", "payment_account",
"payment_name", "payment_description",
"request_phone_email_iban", "request_description"]
for field in expected_fields:
if field not in fields:
return "missing field: "+field
# strip spaces from account numbers
fields["account"] = fields["account"].replace(" ", "")
if internal:
fields["other_account"] = fields["other_account"].replace(" ", "")
else:
fields["payment_account"] = fields["payment_account"].replace(" ", "")
# check amount
try:
orig = fields["amount"]
fields["amount"] = float(fields["amount"])
except ValueError:
fields["amount"] = -1
if fields["amount"] <= 0:
return "only positive amounts allowed: "+orig
return None
def get_balance(config, account, account2=None):
""" Retrieve the balance of one or two accounts """
balances = bunq.retrieve_account_balances(config)
if account2 is None and account in balances:
return balances[account]
if account in balances and account2 in balances:
return balances[account], balances[account2]
if account not in balances:
return "Account balance not found "+account
return "Account balance not found "+account2
| 2.53125 | 3 |
setup.py | Hasenpfote/malloc_tracer | 2 | 12799520 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from setuptools import setup
# Get version without importing, which avoids dependency issues
def get_version():
with open('malloc_tracer/version.py') as version_file:
return re.search(r"""__version__\s+=\s+(['"])(?P<version>.+?)\1""",
version_file.read()).group('version')
def _long_description():
with open('README.rst', 'r') as f:
return f.read()
if __name__ == '__main__':
setup(
name='malloc_tracer',
version=get_version(),
description='This is a debugging tool for tracing malloc that occurs inside a function or class.',
long_description=_long_description(),
author='Hasenpfote',
author_email='<EMAIL>',
url='https://github.com/Hasenpfote/malloc_tracer',
download_url='',
packages = ['malloc_tracer'],
keywords=['debug', 'debugging-tool', 'tracemalloc'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Utilities'
],
python_requires='>=3.4',
install_requires=[],
)
| 1.976563 | 2 |
parse.py | endorama/parse-medici-torino | 3 | 12799521 | <gh_stars>1-10
import datetime
import fileinput
import functools
import json
import os
import re
import sys
import unittest
import urllib.parse
import requests
from fixups import ESPANDI_INDIRIZZO
MESI = {
'gennaio': 1,
'febbraio': 2,
'marzo': 3,
'aprile': 4,
'maggio': 5,
'giugno': 6,
'luglio': 7,
'agosto': 8,
'settembre': 9,
'ottobre': 10,
'novembre': 11,
'dicembre': 12,
}
AGGIORNAMENTO_RE = re.compile(r"AGGIORNAMENTO: (?P<giorno>\d+) (?P<mese>\w+) (?P<anno>\d+)")
CIRCOSCRIZIONE_RE = re.compile(r"CIRCOSCRIZIONE (?P<numero>\d+): (?P<nome>.+)")
MMG_RE = re.compile(r"MMG")
NOME_DOTTORE_RE = re.compile(r"(?P<nome>[\w\s']+) \[(?P<codice>\w+)\]")
BLOCCO_ASSOCIAZIONE_RE = re.compile(r"Associazione:")
INDIRIZZO_RE = re.compile(r"(?P<indirizzo>.+) TORINO (?P<cap>\d+) \(TORINO\) Telefono: ?(?P<telefono>\d*)?")
FAX_RE = re.compile(r"FAX \d+")
TELEFONO_RE = re.compile(r"(TELEFONO.*:\s*)?(?P<telefono>\d+)$")
BLOCCO_ORARI_RE = re.compile(r"Giorno")
GIORNO_RE = re.compile(r"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)")
ORARI_DA_RE = re.compile(r"Dalle")
ORARIO_RE = re.compile(r"(?P<orario>\d{2}:\d{2})")
ORARI_A_RE = re.compile(r"Alle")
BLOCCO_NOTE_RE = re.compile(r"Note")
def do_geocoding(indirizzo, token):
mapbox_geocoding_v5 = "https://api.mapbox.com/geocoding/v5/mapbox.places/"
url = "{}{}.json?limit=1&country=IT&access_token={}".format(
mapbox_geocoding_v5,
urllib.parse.quote(indirizzo, safe=""),
token,
)
response = requests.get(url)
data = response.json()
feature = data["features"][0]
if "address" in feature["place_type"]:
if "Torino Turin" in feature["place_name"] or "Collegno" in feature["place_name"]:
return feature["center"]
return None
@functools.lru_cache(maxsize=128)
def geocoding(indirizzo, token):
# sostituiamo i sc. con scala per farli piacere a mapbox
indirizzo = indirizzo.replace(" sc. ", " scala ")
posizione = do_geocoding(indirizzo, token)
if posizione:
return posizione
# se non abbiamo trovato l'indirizzo proviamo a sistemarlo a mano
correzione = [(k, v) for k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo]
if correzione:
via, via_corretta = correzione[0]
indirizzo_corretto = indirizzo.replace(via, via_corretta)
posizione = do_geocoding(indirizzo_corretto, token)
if posizione:
return posizione
print("Geocoding fallito per {}".format(indirizzo), file=sys.stderr)
return None
if __name__ == '__main__':
# se abbiamo un token di mapbox nell'environment facciamo il geocoding degli indirizzi
mapbox_token = os.getenv("MAPBOX_ACCESS_TOKEN")
documento = {
'aggiornamento': None,
'circoscrizione_numero': None,
'circoscrizione_nome': None,
'mmg': None,
'dottori': None,
}
dottori = []
dottore = None
blocco_associazione = False
blocco_note = False
indirizzo = None
for line in fileinput.input():
line = line.strip('\x0c')
if not line.strip():
continue
match = AGGIORNAMENTO_RE.match(line)
if match:
match_dict = match.groupdict()
update = datetime.date(
int(match_dict['anno']),
MESI[match_dict['mese'].lower()],
int(match_dict['giorno'])
)
documento['aggiornamento'] = update.isoformat()
continue
match = CIRCOSCRIZIONE_RE.match(line)
if match:
match_dict = match.groupdict()
documento['circoscrizione_numero'] = match_dict['numero']
documento['circoscrizione_nome'] = match_dict['nome']
continue
match = MMG_RE.match(line)
if match:
documento['mmg'] = True
continue
match = NOME_DOTTORE_RE.match(line)
if match:
match_dict = match.groupdict()
blocco_note = False
if dottore:
dottore['indirizzi'].append(indirizzo)
dottori.append(dottore)
indirizzo = None
dottore = {
'nome': match_dict['nome'],
'codice': match_dict['codice'],
'associazione': [],
'indirizzi': [],
}
continue
match = BLOCCO_ASSOCIAZIONE_RE.match(line)
if match:
blocco_associazione = True
continue
match = INDIRIZZO_RE.match(line)
if match:
match_dict = match.groupdict()
blocco_note = False
blocco_associazione = False
if indirizzo:
dottore['indirizzi'].append(indirizzo)
indirizzo = {
'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']),
'telefono': [match_dict['telefono']],
'giorni': [],
'ore': [],
'note': [],
}
continue
# ci sono dottori senza associazione
if blocco_associazione:
dottore['associazione'].append(line.strip())
continue
match = FAX_RE.match(line)
if match:
continue
match = TELEFONO_RE.match(line)
if match:
match_dict = match.groupdict()
indirizzo['telefono'].append(match_dict['telefono'])
continue
match = BLOCCO_ORARI_RE.match(line)
if match:
continue
match = GIORNO_RE.match(line)
if match:
match_dict = match.groupdict()
indirizzo['giorni'].append(match_dict['giorno'])
continue
match = ORARI_DA_RE.match(line)
if match:
continue
match = ORARIO_RE.match(line)
if match:
match_dict = match.groupdict()
indirizzo['ore'].append(match_dict['orario'])
continue
match = ORARI_A_RE.match(line)
if match:
continue
match = BLOCCO_NOTE_RE.match(line)
if match:
blocco_note = True
continue
if blocco_note:
indirizzo['note'].append(line.strip())
continue
print(line, file=sys.stderr)
# l'ultimo dottore
if dottore:
dottore['indirizzi'].append(indirizzo)
dottori.append(dottore)
indirizzo = None
for dottore in dottori:
for indirizzo in dottore['indirizzi']:
# Proviamo a sistemare gli orari
num_orari = len(indirizzo['ore']) / 2
indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni'])
num_orari = int(num_orari)
orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)]
if indirizzo['orario_affidabile']:
indirizzo['orari'] = [
{'giorno': giorno, 'da': orario[0], 'a': orario[1]}
for giorno, orario in zip(indirizzo['giorni'], orari)
]
else:
indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a} for da, a in orari]
if mapbox_token:
posizione = geocoding(indirizzo['indirizzo'], mapbox_token)
indirizzo['posizione'] = posizione
documento['dottori'] = dottori
print(json.dumps(documento))
class ParseTestCase(unittest.TestCase):
def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self):
match = NOME_DOTTORE_RE.match("NUR ADDO' [01234]")
match_dict = match.groupdict()
self.assertEqual(match_dict, {"nome": "NUR ADDO'", "codice": "01234"})
| 2.625 | 3 |
tests/linear/assembly/test_assembly.py | LMNS3d/sharpy | 0 | 12799522 | <filename>tests/linear/assembly/test_assembly.py
'''
Test assembly
<NAME>, 29 May 2018
'''
import os
import copy
import warnings
import unittest
import itertools
import numpy as np
import scipy.linalg as scalg
import sharpy.utils.h5utils as h5utils
import sharpy.linear.src.assembly as assembly
import sharpy.linear.src.multisurfaces as multisurfaces
import sharpy.linear.src.surface as surface
import sharpy.linear.src.libuvlm as libuvlm
import sharpy.utils.algebra as algebra
np.set_printoptions(linewidth=200,precision=3)
def max_error_tensor(Pder_an,Pder_num):
'''
Finds the maximum error analytical derivatives Pder_an. The error is:
- relative, if the element of Pder_an is nonzero
- absolute, otherwise
The function returns the absolute and relative error tensors, and the
maximum error.
@warning: The relative error tensor may contain NaN or Inf if the
analytical derivative is zero. These elements are filtered out during the
search for maximum error, and absolute error is checked.
'''
Eabs=np.abs(Pder_num-Pder_an)
nnzvec=Pder_an!=0
Erel=np.zeros(Pder_an.shape)
Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec])
# Relative error check: remove NaN and inf...
iifinite=np.isfinite(Erel)
err_max=0.0
for err_here in Erel[iifinite]:
if np.abs(err_here)>err_max:
err_max=err_here
# Zero elements check
iizero=np.abs(Pder_an)<1e-15
for der_here in Pder_num[iizero]:
if np.abs(der_here)>err_max:
err_max=der_here
return err_max, Eabs, Erel
class Test_assembly(unittest.TestCase):
''' Test methods into assembly module '''
def setUp(self):
# select test case
fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5'
haero = h5utils.readh5(fname)
tsdata = haero.ts00000
# # Rotating cases
# fname = './basic_rotating_wing/basic_wing.data.h5'
# haero = h5utils.readh5(fname)
# tsdata = haero.data.aero.timestep_info[-1]
# tsdata.omega = []
# for ss in range(haero.data.aero.n_surf):
# tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6])
MS=multisurfaces.MultiAeroGridSurfaces(tsdata)
MS.get_normal_ind_velocities_at_collocation_points()
MS.verify_non_penetration()
MS.verify_aic_coll()
MS.get_joukovski_qs()
MS.verify_joukovski_qs()
self.MS=MS
def test_nc_dqcdzeta(self):
'''
For each output surface, where induced velocity is computed, all other
surfaces are looped.
For wakes, only TE is displaced.
'''
print('----------------------------- Testing assembly.test_nc_dqcdzeta')
MS=self.MS
n_surf=MS.n_surf
# analytical
Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star)
# check option
Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list)
Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) )
_,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp)
# max absolute error
ermax=np.max(ErAbs)
# relative error at max abs error point
iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape)
ermax_rel=ErRel[iimax]
assert ermax_rel<1e-16,\
'option Merge=True not working correctly, relative error (%.3e) too high!' %ErRel
# allocate numerical
Derlist_num=[]
for ii in range(n_surf):
sub=[]
for jj in range(n_surf):
sub.append(0.0*Dervert_list[ii][jj])
Derlist_num.append(sub)
# store reference circulation and normal induced velocities
MS.get_normal_ind_velocities_at_collocation_points()
Zeta0=[]
Zeta0_star=[]
Vind0=[]
N0=[]
ZetaC0=[]
for ss in range(n_surf):
Zeta0.append(MS.Surfs[ss].zeta.copy())
ZetaC0.append(MS.Surfs[ss].zetac.copy('F'))
Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())
Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy())
N0.append(MS.Surfs[ss].normals.copy())
# calculate vis FDs
Steps=[1e-6,]
step=Steps[0]
### loop input surfs
for ss_in in range(n_surf):
Surf_in=MS.Surfs[ss_in]
Surf_star_in=MS.Surfs_star[ss_in]
M_in,N_in=Surf_in.maps.M,Surf_in.maps.N
# perturb
for kk in range(3*Surf_in.maps.Kzeta):
cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) )
# perturb bound. vertices and collocation
Surf_in.zeta=Zeta0[ss_in].copy()
Surf_in.zeta[cc,mm,nn]+=step
Surf_in.generate_collocations()
# perturb wake TE
if mm==M_in:
Surf_star_in.zeta=Zeta0_star[ss_in].copy()
Surf_star_in.zeta[cc,0,nn]+=step
### prepare output surfaces
# - ensure normals are unchanged
# - del ind. vel on output to ensure they are re-computed
for ss_out in range(n_surf):
Surf_out=MS.Surfs[ss_out]
Surf_out.normals=N0[ss_out].copy()
del Surf_out.u_ind_coll_norm
try:
del Surf_out.u_ind_coll
except AttributeError:
pass
### recalculate
MS.get_normal_ind_velocities_at_collocation_points()
# restore
Surf_in.zeta=Zeta0[ss_in].copy()
Surf_in.zetac=ZetaC0[ss_in].copy('F')
Surf_star_in.zeta=Zeta0_star[ss_in].copy()
# estimate derivatives
for ss_out in range(n_surf):
Surf_out=MS.Surfs[ss_out]
dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step
Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C')
### check error
for ss_out in range(n_surf):
for ss_in in range(n_surf):
Der_an=Dervert_list[ss_out][ss_in].copy()
if ss_in==ss_out:
Der_an=Der_an+Dercoll_list[ss_out]
Der_num=Derlist_num[ss_out][ss_in]
_,ErAbs,ErRel=max_error_tensor(Der_an,Der_num)
# max absolute error
ermax=np.max(ErAbs)
# relative error at max abs error point
iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape)
ermax_rel=ErRel[iimax]
print('Bound%.2d->Bound%.2d\tFDstep\tErrAbs\tErrRel'%(ss_in,ss_out))
print('\t\t\t%.1e\t%.1e\t%.1e' %(step,ermax,ermax_rel))
assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!'
# fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4))
# ax1=fig.add_subplot(131)
# ax1.spy(ErAbs,precision=1e2*step)
# ax1.set_title('error abs %d to %d' %(ss_in,ss_out))
# ax2=fig.add_subplot(132)
# ax2.spy(ErRel,precision=1e2*step)
# ax2.set_title('error rel %d to %d' %(ss_in,ss_out))
# ax3=fig.add_subplot(133)
# ax3.spy(Dercoll_list[ss_out],precision=50*step)
# ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out))
# #plt.show()
# plt.close()
def test_uc_dncdzeta(self,PlotFlag=False):
print('---------------------------------- Testing assembly.uc_dncdzeta')
MS=self.MS
n_surf=MS.n_surf
MS.get_ind_velocities_at_collocation_points()
MS.get_normal_ind_velocities_at_collocation_points()
for ss in range(n_surf):
print('Surface %.2d:' %ss)
Surf=MS.Surfs[ss]
# generate non-zero field of external force
Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0
Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0
Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0
Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape)
### analytical derivative
# ind velocities computed already
Surf.get_input_velocities_at_collocation_points()
Der=assembly.uc_dncdzeta(Surf)
### numerical derivative
#Surf.get_normal_input_velocities_at_collocation_points()
u_tot0=Surf.u_ind_coll+Surf.u_input_coll
u_norm0=Surf.project_coll_to_normal(u_tot0)
u_norm0_vec=u_norm0.reshape(-1,order='C')
zeta0=Surf.zeta
DerNum=np.zeros(Der.shape)
Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6])
Er_max=0.0*Steps
for ss in range(len(Steps)):
step=Steps[ss]
for jj in range(3*Surf.maps.Kzeta):
# perturb
cc_pert=Surf.maps.ind_3d_vert_vect[0][jj]
mm_pert=Surf.maps.ind_3d_vert_vect[1][jj]
nn_pert=Surf.maps.ind_3d_vert_vect[2][jj]
zeta_pert=zeta0.copy()
zeta_pert[cc_pert,mm_pert,nn_pert]+=step
# calculate new normal velocity
Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert,
u_ext=Surf.u_ext,gamma=Surf.gamma)
u_norm=Surf_pert.project_coll_to_normal(u_tot0)
u_norm_vec=u_norm.reshape(-1,order='C')
# FD derivative
DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step
er_max=np.max(np.abs(Der-DerNum))
print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )
assert er_max<5e1*step, 'Error larger than 50 times step size'
Er_max[ss]=er_max
# assert error decreases with step size
for ss in range(1,len(Steps)):
assert Er_max[ss]<Er_max[ss-1],\
'Error not decreasing as FD step size is reduced'
print('------------------------------------------------------------ OK')
if PlotFlag:
pass
# fig = plt.figure('Spy Der',figsize=(10,4))
# ax1 = fig.add_subplot(121)
# ax1.spy(Der,precision=step)
# ax2 = fig.add_subplot(122)
# ax2.spy(DerNum,precision=step)
# plt.show()
def test_nc_domegazetadzeta(self):
"""
Variation at colocation points due to geometrical variations at vertices
Needs to be tested with a case that actually rotates
"""
print('----------------------------- Testing assembly.test_nc_domegazetadzeta')
MS=self.MS
n_surf=MS.n_surf
# analytical
Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star)
# allocate numerical
# Derlist_num=[]
# for ii in range(n_surf):
# sub=[]
# for jj in range(n_surf):
# sub.append(0.0*Dervert_list[ii][jj])
# Derlist_num.append(sub)
# Store the initial values of the variabes
Zeta0=[]
Zeta0_star=[]
N0=[]
ZetaC0=[]
for ss in range(n_surf):
Zeta0.append(MS.Surfs[ss].zeta.copy())
ZetaC0.append(MS.Surfs[ss].zetac.copy('F'))
Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())
N0.append(MS.Surfs[ss].normals.copy())
# Computation
Steps=[1e-2, 1e-4, 1e-6]
nsteps = len(Steps)
error = np.zeros((nsteps,))
for istep in range(nsteps):
step = Steps[istep]
for ss in range(n_surf):
Surf=MS.Surfs[ss]
Surf_star=MS.Surfs_star[ss]
M,N=Surf.maps.M,Surf.maps.N
perturb_vector = np.zeros(3*Surf.maps.Kzeta)
# PERTURBATION OF THE SURFACE
for kk in range(3*Surf.maps.Kzeta):
# generate a random perturbation between the 90% and the 110% of the step
perturb_vector[kk] += step*(0.2*np.random.rand()+0.9)
cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) )
# perturb bound. vertices and collocation
Surf.zeta=Zeta0[ss].copy()
Surf.zeta[cc,mm,nn] += perturb_vector[kk]
# perturb wake TE
if mm==M:
Surf_star.zeta=Zeta0_star[ss].copy()
Surf_star.zeta[cc,0,nn] += perturb_vector[kk]
Surf.generate_collocations()
# COMPUTE THE DERIVATIVES
Der_an = np.zeros(Surf.maps.K)
Der_an = np.dot(Dervert_list[ss], perturb_vector)
Der_num = np.zeros(Surf.maps.K)
ipanel = 0
skew_omega = algebra.skew(Surf.omega)
for mm in range(M):
for nn in range(N):
Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) -
np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn])))
ipanel += 1
# COMPUTE THE ERROR
error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max())
print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) )
assert error[istep]<5e1*step, 'Error larger than 50 times the step size'
if istep > 0:
assert error[istep]<=error[istep-1],\
'Error not decreasing as FD step size is reduced'
print('------------------------------------------------------------ OK')
def test_dfqsdgamma_vrel0(self):
print('----------------------------- Testing assembly.dfqsdgamma_vrel0')
MS=self.MS
n_surf=MS.n_surf
Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star)
Er_max=[]
Er_max_star=[]
Steps=[1e-2,1e-4,1e-6,]
for ss in range(n_surf):
Der_an=Der_list[ss]
Der_star_an=Der_star_list[ss]
Surf=MS.Surfs[ss]
Surf_star=MS.Surfs_star[ss]
M,N=Surf.maps.M,Surf.maps.N
K=Surf.maps.K
fqs0=Surf.fqs.copy()
gamma0=Surf.gamma.copy()
for step in Steps:
Der_num=0.0*Der_an
Der_star_num=0.0*Der_star_an
### Bound
for pp in range(K):
mm=Surf.maps.ind_2d_pan_scal[0][pp]
nn=Surf.maps.ind_2d_pan_scal[1][pp]
Surf.gamma=gamma0.copy()
Surf.gamma[mm,nn]+=step
Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:])
df=(Surf.fqs-fqs0)/step
Der_num[:,pp]=df.reshape(-1,order='C')
er_max=np.max(np.abs(Der_an-Der_num))
print('Surface %.2d - bound:' %ss)
print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )
assert er_max<5e1*step, 'Error larger than 50 times step size'
Er_max.append(er_max)
### Wake
Surf.gamma=gamma0.copy()
gammaw_TE0=Surf_star.gamma[0,:].copy()
M_star,N_star=Surf_star.maps.M,Surf_star.maps.N
K_star=Surf_star.maps.K
for nn in range(N):
pp=np.ravel_multi_index( (0,nn), (M_star,N_star))
gammaw_TE=gammaw_TE0.copy()
gammaw_TE[nn]+=step
Surf.get_joukovski_qs(gammaw_TE=gammaw_TE)
df=(Surf.fqs-fqs0)/step
Der_star_num[:,pp]=df.reshape(-1,order='C')
er_max=np.max(np.abs(Der_star_an-Der_star_num))
print('Surface %.2d - wake:' %ss)
print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )
assert er_max<5e1*step, 'Error larger than 50 times step size'
Er_max_star.append(er_max)
Surf.gamma=gamma0.copy()
### Warning: this test fails: the dependency on gamma is linear, hence
# great accuracy is obtained even with large steps. In fact, reducing
# the step quickly introduced round-off error.
# # assert error decreases with step size
# for ii in range(1,len(Steps)):
# assert Er_max[ii]<Er_max[ii-1],\
# 'Error not decreasing as FD step size is reduced'
# assert Er_max_star[ii]<Er_max_star[ii-1],\
# 'Error not decreasing as FD step size is reduced'
def test_dfqsdzeta_vrel0(self):
'''
Note: the get_joukovski_qs method re-computes the induced velocity
at the panel segments. A copy of Surf is required to ensure that other
tests are not affected.
'''
print('------------------------------ Testing assembly.dfqsdzeta_vrel0')
MS=self.MS
n_surf=MS.n_surf
Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star)
Er_max=[]
Steps=[1e-2,1e-4,1e-6,]
for ss in range(n_surf):
Der_an=Der_list[ss]
Surf=copy.deepcopy(MS.Surfs[ss])
#Surf_star=MS.Surfs_star[ss]
M,N=Surf.maps.M,Surf.maps.N
K=Surf.maps.K
Kzeta=Surf.maps.Kzeta
fqs0=Surf.fqs.copy()
zeta0=Surf.zeta.copy()
for step in Steps:
Der_num=0.0*Der_an
for kk in range(3*Kzeta):
Surf.zeta=zeta0.copy()
ind_3d=np.unravel_index(kk, (3,M+1,N+1) )
Surf.zeta[ind_3d]+=step
Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:])
df=(Surf.fqs-fqs0)/step
Der_num[:,kk]=df.reshape(-1,order='C')
er_max=np.max(np.abs(Der_an-Der_num))
print('Surface %.2d - bound:' %ss)
print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )
assert er_max<5e1*step, 'Error larger than 50 times step size'
Er_max.append(er_max)
def test_dfqsdzeta_omega(self):
'''
Note: the get_joukovski_qs method re-computes the induced velocity
at the panel segments. A copy of Surf is required to ensure that other
tests are not affected.
Needs to be tested with a case that actually rotates
'''
print('------------------------------ Testing assembly.dfqsdzeta_omega')
# rename
MS=self.MS
n_surf=MS.n_surf
# Compute the anaytical derivative of the case
Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star)
# Initialize
Er_max=[]
# Define steps to run
Steps=[1e-2,1e-4,1e-6,]
for ss in range(n_surf):
# Select the surface with the analytica derivatives
Der_an=Der_an_list[ss]
# Copy to avoid modifying the original for other tests
Surf=copy.deepcopy(MS.Surfs[ss])
# Define variables
M,N=Surf.maps.M,Surf.maps.N
K=Surf.maps.K
Kzeta=Surf.maps.Kzeta
# Save the reference values at equilibrium
fqs0=Surf.fqs.copy()
zeta0=Surf.zeta.copy()
u_input_seg0=Surf.u_input_seg.copy()
for step in Steps:
# Initialize
Der_num = 0.0*Der_an
# Loop through the different grid modifications (three directions per vertex point)
for kk in range(3*Kzeta):
# Initialize to remove previous movements
Surf.zeta=zeta0.copy()
# Define DoFs where modifications will take place and modify the grid
ind_3d=np.unravel_index(kk, (3,M+1,N+1) )
Surf.zeta[ind_3d]+=step
# Recompute get_ind_velocities_at_segments and recover the previous grid
Surf.get_input_velocities_at_segments()
Surf.zeta=zeta0.copy()
# Compute new forces
Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:])
df=(Surf.fqs-fqs0)/step
Der_num[:,kk]=df.reshape(-1,order='C')
er_max=np.max(np.abs(Der_an-Der_num))
print('Surface %.2d - bound:' %ss)
print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )
assert er_max<5e1*step, 'Error larger than 50 times step size'
Er_max.append(er_max)
def test_dfqsduinput(self):
'''
Step change in input velocity is allocated to both u_ext and zeta_dot
'''
print('---------------------------------- Testing assembly.dfqsduinput')
MS=self.MS
n_surf=MS.n_surf
Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star)
Er_max=[]
Steps=[1e-2,1e-4,1e-6,]
for ss in range(n_surf):
Der_an=Der_list[ss]
#Surf=copy.deepcopy(MS.Surfs[ss])
Surf=MS.Surfs[ss]
#Surf_star=MS.Surfs_star[ss]
M,N=Surf.maps.M,Surf.maps.N
K=Surf.maps.K
Kzeta=Surf.maps.Kzeta
fqs0=Surf.fqs.copy()
u_ext0=Surf.u_ext.copy()
zeta_dot0=Surf.zeta_dot.copy()
for step in Steps:
Der_num=0.0*Der_an
for kk in range(3*Kzeta):
Surf.u_ext=u_ext0.copy()
Surf.zeta_dot=zeta_dot0.copy()
ind_3d=np.unravel_index(kk, (3,M+1,N+1) )
Surf.u_ext[ind_3d]+=0.5*step
Surf.zeta_dot[ind_3d]+=-0.5*step
Surf.get_input_velocities_at_segments()
Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:])
df=(Surf.fqs-fqs0)/step
Der_num[:,kk]=df.reshape(-1,order='C')
er_max=np.max(np.abs(Der_an-Der_num))
print('Surface %.2d - bound:' %ss)
print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )
assert er_max<5e1*step, 'Error larger than 50 times step size'
Er_max.append(er_max)
def test_dfqsdvind_gamma(self):
print('------------------------------ Testing assembly.dfqsdvind_gamma')
MS=self.MS
n_surf=MS.n_surf
# analytical
Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star)
# allocate numerical
Der_list_num=[]
Der_star_list_num=[]
for ii in range(n_surf):
sub=[]
sub_star=[]
for jj in range(n_surf):
sub.append(0.0*Der_list[ii][jj])
sub_star.append(0.0*Der_star_list[ii][jj])
Der_list_num.append(sub)
Der_star_list_num.append(sub_star)
# store reference circulation and force
Gamma0=[]
Gammaw0=[]
Fqs0=[]
for ss in range(n_surf):
Gamma0.append(MS.Surfs[ss].gamma.copy())
Gammaw0.append(MS.Surfs_star[ss].gamma.copy())
Fqs0.append(MS.Surfs[ss].fqs.copy())
# calculate vis FDs
#Steps=[1e-2,1e-4,1e-6,]
Steps=[1e-5,]
step=Steps[0]
###### bound
for ss_in in range(n_surf):
Surf_in=MS.Surfs[ss_in]
# perturb
for pp in range(Surf_in.maps.K):
mm=Surf_in.maps.ind_2d_pan_scal[0][pp]
nn=Surf_in.maps.ind_2d_pan_scal[1][pp]
Surf_in.gamma=Gamma0[ss_in].copy()
Surf_in.gamma[mm,nn]+=step
# recalculate induced velocity everywhere
MS.get_ind_velocities_at_segments(overwrite=True)
# restore circulation: (include only induced velocity contrib.)
Surf_in.gamma=Gamma0[ss_in].copy()
# estimate derivatives
for ss_out in range(n_surf):
Surf_out=MS.Surfs[ss_out]
fqs0=Fqs0[ss_out].copy()
Surf_out.get_joukovski_qs(
gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:])
df=(Surf_out.fqs-fqs0)/step
Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C')
###### wake
for ss_in in range(n_surf):
Surf_in=MS.Surfs_star[ss_in]
# perturb
for pp in range(Surf_in.maps.K):
mm=Surf_in.maps.ind_2d_pan_scal[0][pp]
nn=Surf_in.maps.ind_2d_pan_scal[1][pp]
Surf_in.gamma=Gammaw0[ss_in].copy()
Surf_in.gamma[mm,nn]+=step
# recalculate induced velocity everywhere
MS.get_ind_velocities_at_segments(overwrite=True)
# restore circulation: (include only induced velocity contrib.)
Surf_in.gamma=Gammaw0[ss_in].copy()
# estimate derivatives
for ss_out in range(n_surf):
Surf_out=MS.Surfs[ss_out]
fqs0=Fqs0[ss_out].copy()
Surf_out.get_joukovski_qs(
gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used here!
df=(Surf_out.fqs-fqs0)/step
Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C')
### check error
Er_max=[]
Er_max_star=[]
for ss_out in range(n_surf):
for ss_in in range(n_surf):
Der_an=Der_list[ss_out][ss_in]
Der_num=Der_list_num[ss_out][ss_in]
ErMat=Der_an-Der_num
ermax=np.max(np.abs(ErMat))
print('Bound%.2d->Bound%.2d\tFDstep\tError'%(ss_in,ss_out))
print('\t\t\t%.1e\t%.1e' %(step,ermax))
assert ermax<50*step, 'Test failed!'
Der_an=Der_star_list[ss_out][ss_in]
Der_num=Der_star_list_num[ss_out][ss_in]
ErMat=Der_an-Der_num
ermax=np.max(np.abs(ErMat))
print('Wake%.2d->Bound%.2d\tFDstep\tError'%(ss_in,ss_out))
print('\t\t\t%.1e\t%.1e' %(step,ermax))
assert ermax<50*step, 'Test failed!'
# fig = plt.figure('Spy Der',figsize=(10,4))
# ax1 = fig.add_subplot(111)
# ax1.spy(ErMat,precision=50*step)
# plt.show()
def test_dvinddzeta(self):
'''
For each output surface, there induced velocity is computed, all other
surfaces are looped.
For wakes, only TE is displaced.
'''
def comp_vind(zetac,MS):
# comute induced velocity
V=np.zeros((3,))
for ss in range(n_surf):
Surf_in=MS.Surfs[ss]
Surf_star_in=MS.Surfs_star[ss]
V+=Surf_in.get_induced_velocity(zetac)
V+=Surf_star_in.get_induced_velocity(zetac)
return V
print('----------------------------------- Testing assembly.dvinddzeta')
MS=self.MS
n_surf=MS.n_surf
zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3])
Dercoll=np.zeros((3,3))
Dervert_list=[]
for ss_in in range(n_surf):
dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True)
dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in],
IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M)
Dercoll+=dcoll_b+dcoll_w
Dervert_list.append(dvert_b+dvert_w)
# allocate numerical
Dercoll_num=np.zeros((3,3))
Dervert_list_num=[]
for ii in range(n_surf):
Dervert_list_num.append(0.0*Dervert_list[ii])
# store reference grid
Zeta0=[]
Zeta0_star=[]
for ss in range(n_surf):
Zeta0.append(MS.Surfs[ss].zeta.copy())
Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())
V0=comp_vind(zetac,MS)
# calculate vis FDs
#Steps=[1e-2,1e-4,1e-6,]
Steps=[1e-6,]
step=Steps[0]
### vertices
for ss_in in range(n_surf):
Surf_in=MS.Surfs[ss_in]
Surf_star_in=MS.Surfs_star[ss_in]
M_in,N_in=Surf_in.maps.M,Surf_in.maps.N
# perturb
for kk in range(3*Surf_in.maps.Kzeta):
cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) )
# perturb bound
Surf_in.zeta=Zeta0[ss_in].copy()
Surf_in.zeta[cc,mm,nn]+=step
# perturb wake TE
if mm==M_in:
Surf_star_in.zeta=Zeta0_star[ss_in].copy()
Surf_star_in.zeta[cc,0,nn]+=step
# recalculate induced velocity everywhere
Vnum=comp_vind(zetac,MS)
dv=(Vnum-V0)/step
Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C')
# restore
Surf_in.zeta=Zeta0[ss_in].copy()
if mm==M_in:
Surf_star_in.zeta=Zeta0_star[ss_in].copy()
### check error at colloc
Dercoll_num=np.zeros((3,3))
for cc in range(3):
zetac_pert=zetac.copy()
zetac_pert[cc]+=step
Vnum=comp_vind(zetac_pert,MS)
Dercoll_num[:,cc]=(Vnum-V0)/step
ercoll=np.max(np.abs(Dercoll-Dercoll_num))
print('Error coll.\tFDstep\tErrAbs')
print('\t\t%.1e\t%.1e' %(step,ercoll))
#if ercoll>10*step: embed()
assert ercoll<10*step, 'Error at collocation point'
### check error at vert
for ss_in in range(n_surf):
Der_an=Dervert_list[ss_in]
Der_num=Dervert_list_num[ss_in]
ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num)
# max absolute error
ermax=np.max(ErAbs)
# relative error at max abs error point
iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape)
ermax_rel=ErRel[iimax]
print('Bound and wake%.2d\tFDstep\tErrAbs\tErrRel'%ss_in)
print('\t\t\t%.1e\t%.1e\t%.1e' %(step,ermax,ermax_rel))
assert ercoll<10*step, 'Error at vertices'
# fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4))
# ax1=fig.add_subplot(121)
# ax1.spy(ErAbs,precision=1e2*step)
# ax1.set_title('error abs %d' %(ss_in))
# ax2=fig.add_subplot(122)
# ax2.spy(ErRel,precision=1e2*step)
# ax2.set_title('error rel %d' %(ss_in))
# #plt.show()
# plt.close()
def test_dfqsdvind_zeta(self):
'''
For each output surface, there induced velocity is computed, all other
surfaces are looped.
For wakes, only TE is displaced.
'''
print('------------------------------- Testing assembly.dfqsdvind_zeta')
MS=self.MS
n_surf=MS.n_surf
# analytical
Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star)
# allocate numerical
Derlist_num=[]
for ii in range(n_surf):
sub=[]
for jj in range(n_surf):
sub.append(0.0*Dervert_list[ii][jj])
Derlist_num.append(sub)
# store reference circulation and force
Zeta0=[]
Zeta0_star=[]
Fqs0=[]
for ss in range(n_surf):
Zeta0.append(MS.Surfs[ss].zeta.copy())
Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())
Fqs0.append(MS.Surfs[ss].fqs.copy())
# calculate vis FDs
#Steps=[1e-2,1e-4,1e-6,]
Steps=[1e-6,]
step=Steps[0]
### loop input surfs
for ss_in in range(n_surf):
Surf_in=MS.Surfs[ss_in]
Surf_star_in=MS.Surfs_star[ss_in]
M_in,N_in=Surf_in.maps.M,Surf_in.maps.N
# perturb
for kk in range(3*Surf_in.maps.Kzeta):
cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) )
# perturb bound
Surf_in.zeta=Zeta0[ss_in].copy()
Surf_in.zeta[cc,mm,nn]+=step
# perturb wake TE
if mm==M_in:
Surf_star_in.zeta=Zeta0_star[ss_in].copy()
Surf_star_in.zeta[cc,0,nn]+=step
# recalculate induced velocity everywhere
MS.get_ind_velocities_at_segments(overwrite=True)
# restore zeta: (include only induced velocity contrib.)
Surf_in.zeta=Zeta0[ss_in].copy()
Surf_star_in.zeta=Zeta0_star[ss_in].copy()
# estimate derivatives
for ss_out in range(n_surf):
Surf_out=MS.Surfs[ss_out]
fqs0=Fqs0[ss_out].copy()
Surf_out.get_joukovski_qs(
gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:])
df=(Surf_out.fqs-fqs0)/step
Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C')
### check error
for ss_out in range(n_surf):
for ss_in in range(n_surf):
Der_an=Dervert_list[ss_out][ss_in].copy()
if ss_in==ss_out:
Der_an=Der_an+Dercoll_list[ss_out]
Der_num=Derlist_num[ss_out][ss_in]
ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num)
# max absolute error
ermax=np.max(ErAbs)
# relative error at max abs error point
iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape)
ermax_rel=ErRel[iimax]
print('Bound%.2d->Bound%.2d\tFDstep\tErrAbs\tErrRel'%(ss_in,ss_out))
print('\t\t\t%.1e\t%.1e\t%.1e' %(step,ermax,ermax_rel))
assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!'
# fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4))
# ax1=fig.add_subplot(131)
# ax1.spy(ErAbs,precision=1e2*step)
# ax1.set_title('error abs %d to %d' %(ss_in,ss_out))
# ax2=fig.add_subplot(132)
# ax2.spy(ErRel,precision=1e2*step)
# ax2.set_title('error rel %d to %d' %(ss_in,ss_out))
# ax3=fig.add_subplot(133)
# ax3.spy(Dercoll_list[ss_out],precision=50*step)
# ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out))
# #plt.show()
# plt.close()
def test_dfunstdgamma_dot(self):
'''
Test derivative of unsteady aerodynamic force with respect to changes in
panel circulation.
Warning: test assumes the derivative of the unsteady force only depends on
Gamma_dot, which is true only for steady-state linearisation points
'''
MS=self.MS
Ders_an=assembly.dfunstdgamma_dot(MS.Surfs)
step=1e-6
Ders_num=[]
n_surf=len(MS.Surfs)
for ss in range(n_surf):
Surf=MS.Surfs[ss]
Kzeta,K=Surf.maps.Kzeta,Surf.maps.K
M,N=Surf.maps.M,Surf.maps.N
Dnum=np.zeros((3*Kzeta,K))
# get refernce values
Surf.get_joukovski_unsteady()
Gamma_dot0=Surf.gamma_dot.copy()
F0=Surf.funst.copy()
for pp in range(K):
mm,nn=np.unravel_index( pp, (M,N) )
Surf.gamma_dot=Gamma_dot0.copy()
Surf.gamma_dot[mm,nn]+=step
Surf.get_joukovski_unsteady()
dF=(Surf.funst-F0)/step
Dnum[:,pp]=dF.reshape(-1)
# restore
Surf.gamma_dot=Gamma_dot0.copy()
### verify
ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum)
# max absolute error
ermax=np.max(ErAbs)
# relative error at max abs error point
iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape)
ermax_rel=ErRel[iimax]
print('Bound%.2d\t\t\tFDstep\tErrAbs\tErrRel'%(ss,))
print('\t\t\t%.1e\t%.1e\t%.1e' %(step,ermax,ermax_rel))
assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!'
def test_wake_prop(self):
MS=self.MS
C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star)
n_surf=len(MS.Surfs)
for ss in range(n_surf):
Surf=MS.Surfs[ss]
Surf_star=MS.Surfs_star[ss]
N=Surf.maps.N
K_star=Surf_star.maps.K
C=C_list[ss]
Cstar=Cstar_list[ss]
# add noise to circulations
gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape )
gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape )
gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1))
gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1)))
assert np.max(np.abs(gvec-gvec_ref))<1e-15,\
'Prop. from trailing edge not correct'
if __name__=='__main__':
unittest.main()
# T=Test_assembly()
# T.setUp()
# ### force equation (qs term)
# T.test_dvinddzeta()
# T.test_dfqsdvind_zeta() # run setUp after this test
# T.setUp()
# T.test_dfqsdvind_gamma()
# T.test_dfqsduinput()
# T.test_dfqsdzeta_vrel0()
# T.test_dfqsdgamma_vrel0()
# ### state equation terms
# T.test_uc_dncdzeta()
# T.test_nc_dqcdzeta()
### force equation (unsteady)
# T.test_dfunstdgamma_dot()
| 2.203125 | 2 |
CIFA10/resnet.py | sy2616/DATA | 2 | 12799523 | <filename>CIFA10/resnet.py
import torch
from torch import nn
from torch.nn import functional as F
class ResBl(nn.Module):
def __init__(self,ch_in,ch_out,stride=1):
super(ResBl,self).__init__()
self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1)
self.bn1=nn.BatchNorm2d(ch_out)
self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1)
self.bn2=nn.BatchNorm2d(ch_out)
self.extra=nn.Sequential()
if ch_out!=ch_in:
self.extra=nn.Sequential(
nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out=F.relu(self.bn1(self.conv1(x)))
out=self.bn2(self.conv2(out))
out=self.extra(x)+out
return out
class Resnet(nn.Module):
def __init__(self):
super(Resnet,self).__init__()
self.con1=nn.Sequential(
nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0),
nn.BatchNorm2d(64)
)
self.blc1=ResBl(64,128,stride=2)
self.blc2=ResBl(128,256,stride=2)
self.blc3=ResBl(256,512,stride=2)
self.blc4=ResBl(512,512,stride=2)
self.outlayer=nn.Linear(512*1*1,10)
def forward(self,x):
x=F.relu(self.con1(x))
x=self.blc1(x)
x=self.blc2(x)
x=self.blc3(x)
x=self.blc4(x)
#print(x.shape)
x=F.adaptive_avg_pool2d(x,[1,1])
#print(x.shape)
x=x.view(x.size(0),-1)
x=self.outlayer(x)
return x
# def main():
# blk=ResBl(64,128,stride=4)
# tmp = torch.randn(2, 64, 32, 32)
# out=blk(tmp)
# print('block:',out.shape)
#
# x=torch.randn(2,3,32,32)
# model=Resnet()
# out=model(x)
# print('resnet:',out.shape)
#
#
# if __name__ == '__main__':
# main()
| 2.6875 | 3 |
tools.py | CoderChen01/smart_trash_can | 0 | 12799524 | <reponame>CoderChen01/smart_trash_can
import base64
from PIL import Image, ImageDraw, ImageFont
import cv2
import configs
FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25)
def draw_image(img, text, color):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE)
return img
def cv2base64(image):
base64_str = cv2.imencode('.jpg',image)[1].tostring()
base64_str = base64.b64encode(base64_str).decode('utf8')
return base64_str
def id2data(_id):
_labels = [
'_',
'有害垃圾',
'可回收垃圾',
'厨余垃圾',
'其他垃圾'
]
class_name, object_name = configs.PREDICT_LABELS[_id].split('/')
class_id = _labels.index(class_name)
return {
'class_id': str(class_id),
'class_name': class_name,
'object_name': object_name
} | 2.734375 | 3 |
rtpipe/cli.py | caseyjlaw/rtpipe | 9 | 12799525 | import rtpipe.RT as rt
import rtpipe.parsecands as pc
import rtpipe.parsesdm as ps
import rtpipe.reproduce as reproduce
import click, os, glob
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.captureWarnings(True)
logger = logging.getLogger(__name__)
@click.group('rtpipe')
def cli():
pass
@cli.command()
@click.argument('filename')
@click.option('--paramfile', default='')
@click.option('--bdfdir', default='')
@click.option('--scan', default=1)
def read(filename, paramfile, bdfdir, scan):
""" Simple parse and return metadata for pipeline for first scan """
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False)
@cli.command()
@click.argument('filename', type=str)
@click.option('--scan', type=int, default=0)
@click.option('--paramfile', type=str, default='rtpipe_cbe.conf')
@click.option('--logfile', type=bool, default=False)
@click.option('--bdfdir', default='')
def searchone(filename, scan, paramfile, logfile, bdfdir):
""" Searches one scan of filename
filename is name of local sdm ('filename.GN' expected locally).
scan is scan number to search. if none provided, script prints all.
assumes filename is an sdm.
"""
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
if scan != 0:
d = rt.set_pipeline(filename, scan, paramfile=paramfile,
fileroot=os.path.basename(filename), logfile=logfile)
rt.pipeline(d, range(d['nsegments']))
# clean up and merge files
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys())
else:
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile,
fileroot=os.path.basename(filename), logfile=logfile)
@cli.command()
@click.argument('filename')
@click.option('--snrmin', default=0.)
@click.option('--snrmax', default=999.)
@click.option('--bdfdir', default='')
def mergeall(filename, snrmin, snrmax, bdfdir):
""" Merge cands/noise files over all scans
Tries to find scans from filename, but will fall back to finding relevant files if it does not exist.
"""
filename = os.path.abspath(filename)
bignumber = 500
if os.path.exists(filename):
scans = ps.read_scans(filename, bdfdir=bdfdir)
scanlist = sorted(scans.keys())
else:
logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename))
filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename))))
try:
scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist]))
except IndexError:
logger.warn('Could not parse filenames for scans. Looking over big range.')
scanlist = range(bignumber)
logger.info('Merging over scans {0}'.format(scanlist))
for scan in scanlist:
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax)
@cli.command()
@click.argument('filename', type=str)
@click.option('--html', type=bool, default=True, help='Create html version')
@click.option('--basenb', type=str, default='', help='Full path to base notebook. Default to distribution version')
@click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications')
def nbcompile(filename, html, basenb, agdir):
""" Compile the baseinteract.ipynb notebook into an analysis notebook for filename """
filename = os.path.abspath(filename)
pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir)
@cli.command()
@click.argument('candsfile', type=str)
@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')
def list_cands(candsfile, threshold):
""" Print candidates above abs(snr) in candsfile """
reproduce.list_cands(candsfile, threshold)
@cli.command()
@click.argument('candsfile', type=str)
@click.argument('candnum', type=int)
@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')
def refine_cand(candsfile, candnum, threshold):
""" Run refinement search for candnum in list_cands with abs(snr) > threshold """
reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold)
@cli.command()
@click.argument('candsfile', type=str)
@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')
def refine_cands(candsfile, threshold):
""" Run refinement search and save candidates for all in candsfile with snr > threshold """
reproduce.refine_cands(candsfile, threshold=threshold)
if __name__ == '__main__':
cli()
| 2.21875 | 2 |
collabera_python.py | dedds001/snowflake-tutorials | 0 | 12799526 | <reponame>dedds001/snowflake-tutorials<filename>collabera_python.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 14:45:54 2019
@author: deborahedds
"""
###get files from http
import requests
r=requests.get("https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv")
r.status_code
##200 status ok, it work
##403 athorization denied
##500 server
##read as a text file
r.text
###Get the second from the last row
r.text.split('\n')[-2]
##write into a file
a="/Users/deborahedds/Downloads/testfile04"
f1=open(a, "w")
f1.write(r.text.split('\n')[-2])
f1.close()
##import from s3
import requests
url = "https://collabera-aws-training.s3.amazonaws.com/employees01.csv"
headers = {
'Host': "collabera-aws-training.s3.amazonaws.com",
'X-Amz-Content-Sha256': "e3numbergeneratedbypostman55",
'X-Amz-Date': "20190703T185532Z",
'Authorization': "AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key",
'User-Agent': "PostmanRuntime/7.15.0",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "<PASSWORD>",
'accept-encoding': "gzip, deflate",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers)
print(response.text)
##get the second to last row number 177 +2
int(response.text.split('\n')[-2].split(',')[3].split()[0]) +2
a="/Users/deborahedds/Downloads/testfile06"
f1=open(a, "w")
f1.write(str(int(response.text.split('\n')[-2].split(',')[3].split()[0]) +2))
f1.close()
###import datetime and get today's date
import datetime as dt
dt.datetime.today()
##print today's date using ddmmyy format
ddmmyy=print(dt.datetime.now().strftime('%d-%m-%y'))
| 2.53125 | 3 |
auth_custom/apps.py | u-transnet/utransnet-gateway | 0 | 12799527 | <reponame>u-transnet/utransnet-gateway
from django.apps import AppConfig
class AuthCustomConfig(AppConfig):
name = 'auth_custom'
| 1.304688 | 1 |
vespene/migrations/0009_remove_workerpool_sudo_password.py | vespene-io/vespene | 680 | 12799528 | # Generated by Django 2.1.2 on 2018-12-16 13:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vespene', '0008_auto_20181106_2233'),
]
operations = [
migrations.RemoveField(
model_name='workerpool',
name='sudo_password',
),
]
| 1.296875 | 1 |
pypet/tests/integration/environment_test.py | dilawar/pypet | 85 | 12799529 | <gh_stars>10-100
__author__ = '<NAME>'
import os
import platform
import logging
import time
import numpy as np
from pypet.trajectory import Trajectory, load_trajectory
from pypet.utils.explore import cartesian_product
from pypet.environment import Environment
from pypet.storageservice import HDF5StorageService
from pypet import pypetconstants, Result, manual_run
import pypet.pypetexceptions as pex
import sys
import unittest
try:
import psutil
except ImportError:
psutil = None
try:
import dill
except ImportError:
dill = None
import scipy.sparse as spsp
import random
from pypet import Parameter
import tables as pt
from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\
get_root_logger, parse_args, get_log_config, get_log_path
from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\
simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \
multiply_with_graceful_exit
def add_one_particular_item(traj, store_full):
traj.hi = Result('hi', 42, 'hi!')
traj.f_store()
traj.f_remove_child('hi')
class SlowResult(Result):
def _load(self, load_dict):
time.sleep(3)
super(SlowResult, self)._load(load_dict)
class FullStorageTest(TrajectoryComparator):
tags = 'integration', 'hdf5', 'environment' # Test tags
def test_full_store(self):
filename = make_temp_dir('full_store.hdf5')
with Environment(filename=filename,
log_config=get_log_config()) as env:
traj = env.v_trajectory
traj.par.x = Parameter('x', 3, 'jj')
traj.f_explore({'x': [1,2,3]})
env.f_run(add_one_particular_item, True)
traj = load_trajectory(index=-1, filename=filename)
self.assertTrue('hi' in traj)
def with_niceness(traj):
if traj.multiproc:
if hasattr(os, 'nice'):
trajnice = traj.niceness
osnice = os.nice(0)
else:
trajnice = traj.niceness
osnice = psutil.Process().nice()
if trajnice != osnice:
if traj.use_scoop:
import scoop
if (not scoop.IS_RUNNING or scoop.IS_ORIGIN):
return
raise RuntimeError('traj niceness != os niceness; '
'%s != %s' % (str(trajnice), str(osnice)))
def add_large_data(traj):
np_array = np.random.rand(100, 1000, 10)
traj.f_add_result('l4rge', np_array)
traj.f_store_item('l4rge')
traj.f_remove_item('l4rge')
array_list = []
for irun in range(111):
array_list.append(np.random.rand(10))
traj.f_add_result('m4ny', *array_list)
class SimpleEnvironmentTest(TrajectoryComparator):
tags = 'integration', 'hdf5', 'environment', 'quick'
def test_make_default_file_when_giving_directory_without_slash(self):
filename = make_temp_dir('test.hdf5')
head, tail = os.path.split(filename)
env = Environment(filename=head)
the_file_name = env.v_traj.v_name + '.hdf5'
head, tail = os.path.split(env.v_traj.v_storage_service.filename)
self.assertEqual(tail, the_file_name)
class EnvironmentTest(TrajectoryComparator):
tags = 'integration', 'hdf5', 'environment'
def set_mode(self):
self.mode = 'LOCK'
self.multiproc = False
self.gc_interval = None
self.ncores = 1
self.use_pool=True
self.use_scoop=False
self.freeze_input=False
self.pandas_format='fixed'
self.pandas_append=False
self.complib = 'zlib'
self.complevel=9
self.shuffle=True
self.fletcher32 = False
self.encoding = 'utf8'
self.log_stdout=False
self.wildcard_functions = None
self.niceness = None
self.port = None
self.timeout = None
self.add_time=True
self.graceful_exit = False
def explore_complex_params(self, traj):
matrices_csr = []
for irun in range(3):
spsparse_csr = spsp.lil_matrix((111,111))
spsparse_csr[3,2+irun] = 44.5*irun
matrices_csr.append(spsparse_csr.tocsr())
matrices_csc = []
for irun in range(3):
spsparse_csc = spsp.lil_matrix((111,111))
spsparse_csc[3,2+irun] = 44.5*irun
matrices_csc.append(spsparse_csc.tocsc())
matrices_bsr = []
for irun in range(3):
spsparse_bsr = spsp.lil_matrix((111,111))
spsparse_bsr[3,2+irun] = 44.5*irun
matrices_bsr.append(spsparse_bsr.tocsr().tobsr())
matrices_dia = []
for irun in range(3):
spsparse_dia = spsp.lil_matrix((111,111))
spsparse_dia[3,2+irun] = 44.5*irun
matrices_dia.append(spsparse_dia.tocsc().todia())
self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']),
np.array(['Cinco', 'Seis', 'Siette']),
np.array(['Ocho', 'Nueve', 'Diez'])],
'int':[1,2,3],
'csr_mat' : matrices_csr,
'csc_mat' : matrices_csc,
'bsr_mat' : matrices_bsr,
'dia_mat' : matrices_dia,
'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]}
with self.assertRaises(pex.NotUniqueNodeError):
traj.f_explore(self.explore_dict)
traj.f_shrink(force=True)
par_dict = traj.parameters.f_to_dict()
for param_name in par_dict:
param = par_dict[param_name]
if param.v_name in self.explore_dict:
param.f_unlock()
if param.v_explored:
param._shrink()
self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']),
np.array(['Cinco', 'Seis', 'Siette']),
np.array(['Ocho', 'Nueve', 'Diez'])],
'Normal.int':[1,2,3],
'csr_mat' : matrices_csr,
'csc_mat' : matrices_csc,
'bsr_mat' : matrices_bsr,
'dia_mat' : matrices_dia,
'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]}
traj.f_explore(self.explore_dict)
def explore(self, traj):
self.explored ={'Normal.trial': [0],
'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])],
'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]}
self.explored['csr_mat'][0][1,2]=44.0
self.explored['csr_mat'][1][2,2]=33
self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr()
self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr()
traj.f_explore(cartesian_product(self.explored))
def explore_large(self, traj):
self.explored ={'Normal.trial': [0,1]}
traj.f_explore(cartesian_product(self.explored))
def tearDown(self):
self.env.f_disable_logging()
super(EnvironmentTest, self).tearDown()
def setUp(self):
self.set_mode()
self.logfolder = make_temp_dir(os.path.join('experiments',
'tests',
'Log'))
random.seed()
self.trajname = make_trajectory_name(self)
self.filename = make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'test%s.hdf5' % self.trajname))
env = Environment(trajectory=self.trajname, filename=self.filename,
file_title=self.trajname,
log_stdout=self.log_stdout,
log_config=get_log_config(),
results_per_run=5,
wildcard_functions=self.wildcard_functions,
derived_parameters_per_run=5,
multiproc=self.multiproc,
ncores=self.ncores,
wrap_mode=self.mode,
use_pool=self.use_pool,
gc_interval=self.gc_interval,
freeze_input=self.freeze_input,
fletcher32=self.fletcher32,
complevel=self.complevel,
complib=self.complib,
shuffle=self.shuffle,
pandas_append=self.pandas_append,
pandas_format=self.pandas_format,
encoding=self.encoding,
niceness=self.niceness,
use_scoop=self.use_scoop,
port=self.port,
add_time=self.add_time,
timeout=self.timeout,
graceful_exit=self.graceful_exit)
traj = env.v_trajectory
traj.v_standard_parameter=Parameter
## Create some parameters
self.param_dict={}
create_param_dict(self.param_dict)
### Add some parameter:
add_params(traj,self.param_dict)
#remember the trajectory and the environment
self.traj = traj
self.env = env
@unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported under non Unix.')
def test_niceness(self):
###Explore
self.explore(self.traj)
self.env.f_run(with_niceness)
self.assertTrue(self.traj.f_is_completed())
def test_file_overwriting(self):
self.traj.f_store()
with pt.open_file(self.filename, mode='r') as file:
nchildren = len(file.root._v_children)
self.assertTrue(nchildren > 0)
env2 = Environment(filename=self.filename,
log_config=get_log_config())
traj2 = env2.v_trajectory
traj2.f_store()
self.assertTrue(os.path.exists(self.filename))
with pt.open_file(self.filename, mode='r') as file:
nchildren = len(file.root._v_children)
self.assertTrue(nchildren > 1)
env3 = Environment(filename=self.filename, overwrite_file=True,
log_config=get_log_config())
self.assertFalse(os.path.exists(self.filename))
env2.f_disable_logging()
env3.f_disable_logging()
def test_time_display_of_loading(self):
filename = make_temp_dir('sloooow.hdf5')
env = Environment(trajectory='traj', add_time=True, filename=filename,
log_stdout=False,
log_config=get_log_config(),
dynamic_imports=SlowResult,
display_time=0.1)
traj = env.v_traj
res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk')
traj.f_store()
service_logger = traj.v_storage_service._logger
root = logging.getLogger('pypet')
old_level = root.level
service_logger.setLevel(logging.INFO)
root.setLevel(logging.INFO)
traj.f_load(load_data=3)
service_logger.setLevel(old_level)
root.setLevel(old_level)
path = get_log_path(traj)
mainfilename = os.path.join(path, 'LOG.txt')
with open(mainfilename, mode='r') as mainf:
full_text = mainf.read()
self.assertTrue('nodes/s)' in full_text)
env.f_disable_logging()
def make_run_large_data(self):
self.env.f_run(add_large_data)
def make_run(self):
### Make a test run
simple_arg = -13
simple_kwarg= 13.0
results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg)
self.are_results_in_order(results)
def test_a_large_run(self):
get_root_logger().info('Testing large run')
self.traj.f_add_parameter('TEST', 'test_run')
###Explore
self.explore_large(self.traj)
self.make_run_large_data()
self.assertTrue(self.traj.f_is_completed())
# Check if printing and repr work
get_root_logger().info(str(self.env))
get_root_logger().info(repr(self.env))
newtraj = Trajectory()
newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj,newtraj)
size=os.path.getsize(self.filename)
size_in_mb = size/1000000.
get_root_logger().info('Size is %sMB' % str(size_in_mb))
self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb))
def test_two_runs(self):
self.traj.f_add_parameter('TEST', 'test_run')
self.traj.hdf5.purge_duplicate_comments = False
###Explore
self.explore(self.traj)
self.make_run()
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
size=os.path.getsize(self.filename)
size_in_mb = size/1000000.
get_root_logger().info('Size is %sMB' % str(size_in_mb))
self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))
mp_traj = self.traj
old_multiproc = self.multiproc
self.multiproc = False
### Make a new single core run
self.setUp()
self.traj.f_add_parameter('TEST', 'test_run')
self.traj.hdf5.purge_duplicate_comments = False
###Explore
self.explore(self.traj)
self.make_run()
# newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
size=os.path.getsize(self.filename)
size_in_mb = size/1000000.
get_root_logger().info('Size is %sMB' % str(size_in_mb))
self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))
self.compare_trajectories(mp_traj, self.traj)
self.multiproc = old_multiproc
def test_errors(self):
tmp = make_temp_dir('cont')
if dill is not None:
env1 = Environment(continuable=True, continue_folder=tmp,
log_config=None, filename=self.filename)
with self.assertRaises(ValueError):
env1.f_run_map(multiply_args, [1], [2], [3])
with self.assertRaises(ValueError):
Environment(multiproc=True, use_pool=False, freeze_input=True,
filename=self.filename, log_config=None)
env3 = Environment(log_config=None, filename=self.filename)
with self.assertRaises(ValueError):
env3.f_run_map(multiply_args)
with self.assertRaises(ValueError):
Environment(use_scoop=True, immediate_postproc=True)
with self.assertRaises(ValueError):
Environment(use_pool=True, immediate_postproc=True)
with self.assertRaises(ValueError):
Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp)
with self.assertRaises(ValueError):
Environment(use_scoop=True, wrap_mode='QUEUE')
with self.assertRaises(ValueError):
Environment(automatic_storing=False,
continuable=True, continue_folder=tmp)
with self.assertRaises(ValueError):
Environment(port='www.nosi.de', wrap_mode='LOCK')
def test_run(self):
self.traj.f_add_parameter('TEST', 'test_run')
###Explore
self.explore(self.traj)
self.make_run()
self.assertTrue(self.traj.f_is_completed())
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
size=os.path.getsize(self.filename)
size_in_mb = size/1000000.
get_root_logger().info('Size is %sMB' % str(size_in_mb))
self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))
def test_just_one_run(self):
self.make_run()
self.assertTrue(self.traj.f_is_completed())
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
self.assertTrue(len(newtraj) == 1)
size=os.path.getsize(self.filename)
size_in_mb = size/1000000.
get_root_logger().info('Size is %sMB' % str(size_in_mb))
self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb))
with self.assertRaises(TypeError):
self.explore(self.traj)
def test_run_complex(self):
self.traj.f_add_parameter('TEST', 'test_run_complex')
###Explore
self.explore_complex_params(self.traj)
self.make_run()
self.assertTrue(self.traj.f_is_completed())
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False):
### Load The Trajectory and check if the values are still the same
newtraj = Trajectory()
newtraj.v_storage_service=HDF5StorageService(filename=self.filename)
newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new,
load_parameters=2, load_derived_parameters=2, load_results=2,
load_other_data=2)
return newtraj
def test_expand(self):
###Explore
self.traj.f_add_parameter('TEST', 'test_expand')
self.explore(self.traj)
self.make_run()
self.expand()
get_root_logger().info('\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$')
self.make_run()
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
def test_expand_after_reload(self):
self.traj.f_add_parameter('TEST', 'test_expand_after_reload')
###Explore
self.explore(self.traj)
self.make_run()
traj_name = self.traj.v_name
self.env = Environment(trajectory=self.traj,
log_stdout=False,
log_config=get_log_config())
self.traj = self.env.v_trajectory
self.traj.f_load(name=traj_name)
self.traj.res.f_remove()
self.traj.dpar.f_remove()
self.expand()
get_root_logger().info('\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \n')
self.make_run()
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj, newtraj)
def expand(self):
self.expanded ={'Normal.trial': [1],
'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])],
'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]}
self.expanded['csr_mat'][0][1,2]=44.0
self.expanded['csr_mat'][1][2,2]=33
self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr()
self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr()
self.traj.f_expand(cartesian_product(self.expanded))
self.traj.f_store()
################## Overview TESTS #############################
def test_switch_ON_large_tables(self):
self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables')
###Explore
self.explore(self.traj)
self.env._traj.config.hdf5.overview.results_overview = 1
self.env._traj.config.hdf5.overview.derived_parameters_overview = 1
self.make_run()
hdf5file = pt.open_file(self.filename)
overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview')
should = ['derived_parameters_overview', 'results_overview']
for name in should:
self.assertTrue(name in overview_group, '%s not in overviews but it should!' % name)
hdf5file.close()
self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2)
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name)
self.compare_trajectories(newtraj,self.traj)
def test_switch_off_all_tables(self):
###Explore
self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables')
self.explore(self.traj)
self.env._traj.config.hdf5.overview.results_overview = 0
self.env._traj.config.hdf5.overview.derived_parameters_overview = 0
self.env._traj.config.hdf5.overview.derived_parameters_summary = 0
self.env._traj.config.hdf5.overview.results_summary = 0
self.env._traj.config.hdf5.purge_duplicate_comments = 0
self.env._traj.config.hdf5.overview.parameters_overview = 0
self.env._traj.config.hdf5.overview.config_overview = 0
self.env._traj.config.hdf5.overview.explored_parameters_overview = 0
self.make_run()
hdf5file = pt.open_file(self.filename)
overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview')
should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys()
for name in should_not:
name = name.split('.')[-1] # Get only the name of the table, no the full name
self.assertTrue(not name in overview_group, '%s in overviews but should not!' % name)
hdf5file.close()
def test_store_form_tuple(self):
self.traj.f_store()
self.traj.f_add_result('TestResItem', 42, 43)
with self.assertRaises(ValueError):
self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5))
self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem))
self.traj.results.f_remove_child('TestResItem')
self.assertTrue('TestResItem' not in self.traj)
self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON)
self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'}))
self.assertTrue(self.traj.TestResItem, 42)
def test_store_single_group(self):
self.traj.f_store()
self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42)
self.traj.f_store_item('new.group')
# group is below test not new, so ValueError thrown:
with self.assertRaises(ValueError):
self.traj.parameters.new.f_remove_child('group')
# group is below test not new, so ValueError thrown:
with self.assertRaises(ValueError):
self.traj.parameters.new.f_store_child('group')
# group has children and recursive is false
with self.assertRaises(TypeError):
self.traj.parameters.new.f_remove_child('test')
self.traj.new.f_remove_child('test', recursive=True)
self.assertTrue('new.group' not in self.traj)
self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON)
self.assertTrue(self.traj.new.group.v_annotations.annotation, 42)
self.traj.f_delete_item('new.test.group')
with self.assertRaises(pex.DataNotInStorageError):
self.traj.parameters.f_load_child('new.test.group',
load_data=pypetconstants.LOAD_SKELETON)
def test_switch_on_all_comments(self):
self.explore(self.traj)
self.traj.hdf5.purge_duplicate_comments=0
self.make_run()
hdf5file = pt.open_file(self.filename)
traj_group = hdf5file.get_node(where='/', name= self.traj.v_name)
for node in traj_group._f_walk_groups():
if 'SRVC_LEAF' in node._v_attrs:
self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs,
'There is no comment in node %s!' % node._v_name)
hdf5file.close()
def test_purge_duplicate_comments(self):
self.explore(self.traj)
with self.assertRaises(RuntimeError):
self.traj.hdf5.purge_duplicate_comments = 1
self.traj.overview.results_summary = 0
self.make_run()
self.traj.f_get('purge_duplicate_comments').f_unlock()
self.traj.hdf5.purge_duplicate_comments=1
self.traj.f_get('results_summary').f_unlock()
self.traj.overview.results_summary=1
self.make_run()
hdf5file = pt.open_file(self.filename, mode='a')
ncomments = {}
try:
traj_group = hdf5file.get_node(where='/',name= self.traj.v_name)
for node in traj_group._f_walk_groups():
if ('/derived_parameters/' in node._v_pathname or
'/results/' in node._v_pathname):
if 'SRVC_LEAF' in node._v_attrs:
if 'SRVC_INIT_COMMENT' in node._v_attrs:
comment = node._v_attrs['SRVC_INIT_COMMENT']
if comment not in ncomments:
ncomments[comment] = 0
ncomments[comment] += 1
finally:
hdf5file.close()
self.assertGreaterEqual(len(ncomments), 1)
self.assertTrue(all(x == 1 for x in ncomments.values()))
def test_NOT_purge_duplicate_comments(self):
self.explore(self.traj)
self.traj.f_get('purge_duplicate_comments').f_unlock()
self.traj.hdf5.purge_duplicate_comments=0
self.traj.f_get('results_summary').f_unlock()
self.traj.overview.results_summary=0
self.make_run()
hdf5file = pt.open_file(self.filename, mode='a')
ncomments = {}
try:
traj_group = hdf5file.get_node(where='/',name= self.traj.v_name)
for node in traj_group._f_walk_groups():
if ('/derived_parameters/' in node._v_pathname or
'/results/' in node._v_pathname):
if 'SRVC_LEAF' in node._v_attrs:
if 'SRVC_INIT_COMMENT' in node._v_attrs:
comment = node._v_attrs['SRVC_INIT_COMMENT']
if comment not in ncomments:
ncomments[comment] = 0
ncomments[comment] += 1
finally:
hdf5file.close()
self.assertGreaterEqual(len(ncomments), 1)
self.assertTrue(any(x > 1 for x in ncomments.values()))
def my_run_func(idx):
return 'hello_%d' % idx
def my_set_func(idx):
return 'huhu_%d' % idx
class TestOtherHDF5Settings(EnvironmentTest):
tags = 'integration', 'hdf5', 'environment', 'hdf5_settings'
def set_mode(self):
EnvironmentTest.set_mode(self)
self.mode = 'LOCK'
self.multiproc = False
self.ncores = 1
self.use_pool=True
self.pandas_format='table'
self.pandas_append=True
self.complib = 'blosc'
self.complevel=2
self.shuffle=False
self.fletcher32 = False
self.encoding='latin1'
self.graceful_exit = True
class TestOtherHDF5Settings2(EnvironmentTest):
tags = 'integration', 'hdf5', 'environment', 'hdf5_settings'
def set_mode(self):
EnvironmentTest.set_mode(self)
self.mode = 'LOCK'
self.multiproc = False
self.ncores = 1
self.use_pool=True
self.pandas_format='table'
self.pandas_append=False
self.complib = 'lzo'
self.complevel=2
self.shuffle=False
self.fletcher32 = True
self.encoding='latin1'
self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func}
class ResultSortTest(TrajectoryComparator):
tags = 'integration', 'hdf5', 'environment'
def set_mode(self):
self.mode = 'LOCK'
self.multiproc = False
self.ncores = 1
self.use_pool=True
self.log_stdout=False
self.freeze_input=False
self.use_scoop = False
self.log_config = True
self.port = None
self.graceful_exit = True
def tearDown(self):
self.env.f_disable_logging()
super(ResultSortTest, self).tearDown()
def setUp(self):
self.set_mode()
self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5'))
self.trajname = make_trajectory_name(self)
env = Environment(trajectory=self.trajname,filename=self.filename,
file_title=self.trajname,
log_stdout=self.log_stdout,
log_config=get_log_config() if self.log_config else None,
multiproc=self.multiproc,
wrap_mode=self.mode,
ncores=self.ncores,
use_pool=self.use_pool,
use_scoop=self.use_scoop,
port=self.port,
freeze_input=self.freeze_input,
graceful_exit=self.graceful_exit)
traj = env.v_trajectory
traj.v_standard_parameter=Parameter
traj.f_add_parameter('x',99)
traj.f_add_parameter('y',99)
self.env=env
self.traj=traj
def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2):
### Load The Trajectory and check if the values are still the same
newtraj = Trajectory()
newtraj.v_storage_service=HDF5StorageService(filename=self.filename)
newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new,
load_derived_parameters=how, load_results=how)
return newtraj
def explore(self,traj):
self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]}
traj.f_explore(self.explore_dict)
def explore_cartesian(self,traj):
self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]})
traj.f_explore(self.explore_dict)
def expand(self,traj):
self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]}
with self.assertRaises(ValueError):
traj.f_expand(self.expand_dict)
self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]}
traj.f_expand(self.expand_dict)
def test_if_results_are_sorted_correctly_manual_runs(self):
###Explore
self.explore(self.traj)
self.traj.f_store(only_init=True)
man_multiply = manual_run()(multiply_with_storing)
for idx in self.traj.f_iter_runs(yields='idx'):
self.assertTrue(isinstance(idx, int))
man_multiply(self.traj)
traj = self.traj
traj.f_store()
self.assertTrue(len(traj), 5)
self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj,newtraj)
def test_if_results_are_sorted_correctly_using_map(self):
###Explore
self.explore(self.traj)
args1=[10*x for x in range(len(self.traj))]
args2=[100*x for x in range(len(self.traj))]
args3=list(range(len(self.traj)))
results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3)
self.assertEqual(len(results), len(self.traj))
traj = self.traj
self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct_map(traj, args1, args2, args3)
for res in results:
self.assertEqual(len(res), 2)
self.assertTrue(isinstance(res[0], int))
self.assertTrue(isinstance(res[1], int))
idx = res[0]
self.assertEqual(self.traj.res.runs[idx].z, res[1])
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.assertEqual(len(traj), 5)
self.compare_trajectories(self.traj,newtraj)
def test_if_results_are_sorted_correctly(self):
###Explore
self.explore(self.traj)
results = self.env.f_run(multiply)
self.are_results_in_order(results)
self.assertEqual(len(results), len(self.traj))
traj = self.traj
self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
for res in results:
self.assertEqual(len(res), 2)
self.assertTrue(isinstance(res[0], int))
self.assertTrue(isinstance(res[1], int))
idx = res[0]
self.assertEqual(self.traj.res.runs[idx].z, res[1])
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj,newtraj)
def test_graceful_exit(self):
###Explore
self.explore_cartesian(self.traj)
results = self.env.f_run(multiply_with_graceful_exit)
self.are_results_in_order(results)
self.assertFalse(self.traj.f_is_completed())
def test_f_iter_runs(self):
###Explore
self.explore(self.traj)
results = self.env.f_run(multiply)
self.are_results_in_order(results)
traj = self.traj
self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
for idx, run_name in enumerate(self.traj.f_iter_runs()):
newtraj.v_crun=run_name
self.traj.v_idx = idx
newtraj.v_idx = idx
nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))
self.assertTrue('run_%08d' % (idx+1) not in nameset)
self.assertTrue('run_%08d' % idx in nameset)
self.assertTrue(traj.v_crun == run_name)
self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %
(str(newtraj.crun.z),str(traj.x),str(traj.y)))
for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')):
run_name = traj.f_idx_to_run(idx)
self.assertTrue(traj is self.traj)
newtraj.v_crun=run_name
self.traj.v_idx = idx
newtraj.v_idx = idx
nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))
self.assertTrue('run_%08d' % (idx+1) not in nameset)
self.assertTrue('run_%08d' % idx in nameset)
self.assertTrue(traj.v_crun == run_name)
self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %
(str(newtraj.crun.z),str(traj.x),str(traj.y)))
for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')):
run_name = traj.f_idx_to_run(idx)
self.assertTrue(traj is not self.traj)
newtraj.v_crun=run_name
self.traj.v_idx = idx
newtraj.v_idx = idx
nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))
self.assertTrue('run_%08d' % (idx+1) not in nameset)
self.assertTrue('run_%08d' % idx in nameset)
self.assertTrue(traj.v_crun == run_name)
self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %
(str(newtraj.crun.z),str(traj.x),str(traj.y)))
traj = self.traj
self.assertTrue(traj.v_idx == -1)
self.assertTrue(traj.v_crun is None)
self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY)
self.assertTrue(newtraj.v_idx == idx)
def test_f_iter_runs_auto_load(self):
###Explore
self.explore(self.traj)
results = self.env.f_run(multiply)
self.are_results_in_order(results)
traj = self.traj
self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
newtraj = Trajectory()
newtraj.v_storage_service=HDF5StorageService(filename=self.filename)
newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0)
newtraj.v_auto_load = True
newtraj.par.f_load_child('y', load_data=1)
for idx, run_name in enumerate(self.traj.f_iter_runs()):
newtraj.v_crun=run_name
self.traj.v_idx = idx
newtraj.v_idx = idx
nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))
self.assertTrue('run_%08d' % (idx+1) not in nameset)
self.assertTrue('run_%08d' % idx in nameset)
self.assertTrue(traj.v_crun == run_name)
self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' %
(str(newtraj.crun.z),str(newtraj.x),str(newtraj.y)))
traj = self.traj
self.assertTrue(traj.v_idx == -1)
self.assertTrue(traj.v_crun is None)
self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY)
self.assertTrue(newtraj.v_idx == idx)
def test_expand(self):
###Explore
self.explore(self.traj)
results = self.env.f_run(multiply)
self.are_results_in_order(results)
get_root_logger().info(results)
traj = self.traj
self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0])))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
traj_name = self.env.v_trajectory.v_name
del self.env
self.env = Environment(trajectory=self.traj,
log_stdout=False,
log_config=get_log_config())
self.traj = self.env.v_trajectory
self.traj.f_load(name=traj_name)
self.expand(self.traj)
results = self.env.f_run(multiply)
self.are_results_in_order(results)
traj = self.traj
self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+
len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj,newtraj)
def test_expand_after_reload(self):
###Explore
self.explore(self.traj)
results = self.env.f_run(multiply)
self.are_results_in_order(results)
traj = self.traj
self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
self.expand(self.traj)
self.env.f_run(multiply)
traj = self.traj
self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\
len(list(self.explore_dict.values())[0]))
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.check_if_z_is_correct(traj)
newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)
self.traj.f_load_skeleton()
self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)
self.compare_trajectories(self.traj,newtraj)
def check_if_z_is_correct_map(self,traj, args1, args2, args3):
for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3):
traj.v_idx=x
self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' %
(str(traj.crun.z),str(traj.x),str(traj.y)))
traj.v_idx=-1
def check_if_z_is_correct(self,traj):
traj.v_shortcuts=False
for x in range(len(traj)):
traj.v_idx=x
z = traj.res.runs.crun.z
x = traj.par.x
y = traj.par.y
self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' %
(str(z),str(x),str(y)))
traj.v_idx=-1
traj.v_shortcuts=True
# def test_runfunc(traj, list_that_changes):
# traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx)
# list_that_changes[traj.v_idx] = 1000
# class DeepCopyTest(TrajectoryComparator):
#
# def test_deep_copy_data(self):
#
# self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5')
# self.logfolder = make_temp_dir('experiments/tests/Log')
# self.trajname = make_trajectory_name(self)
#
# env = Environment(trajectory=self.trajname,filename=self.filename,
# file_title=self.trajname, log_folder=self.logfolder,
# log_stdout=False,
# multiproc=False,
# deep_copy_data=True)
#
# traj = env.v_trajectory
#
# traj.f_add_parameter('dummy', 1)
# traj.f_explore({'dummy':[12, 3, 3, 4]})
#
# list_that_should_not_change = [42, 42, 42, 42]
#
# env.f_run(test_runfunc, list_that_should_not_change)
#
# traj.v_auto_load=True
#
# for irun, val in enumerate(list_that_should_not_change):
# self.assertTrue(list_that_should_not_change[irun] == 42)
# x=traj.results.runs[irun].kkk
# self.assertTrue(x==42+irun)
#
# def test_not_deep_copy_data(self):
# self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5')
# self.logfolder = make_temp_dir('experiments/tests/Log')
# self.trajname = make_trajectory_name(self)
#
# env = Environment(trajectory=self.trajname,filename=self.filename,
# file_title=self.trajname, log_folder=self.logfolder,
# log_stdout=False,
# multiproc=False,
# deep_copy_data=False)
#
# traj = env.v_trajectory
#
# traj.f_add_parameter('dummy', 1)
# traj.f_explore({'dummy':[12, 3, 3, 4]})
#
# list_that_should_change = [42, 42, 42, 42]
#
# env.f_run(test_runfunc, list_that_should_change)
#
# traj.v_auto_load=True
#
# for irun, val in enumerate(list_that_should_change):
# self.assertTrue(list_that_should_change[irun] == 1000)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args)
| 1.921875 | 2 |
lib/python3.7/site-packages/ravencoin/core/__init__.py | RavenGraph/api | 0 | 12799530 | <gh_stars>0
# Copyright (C) 2018 The python-ravencoinlib developers
#
# This file is part of python-ravencoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-ravencoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import sys
import bitcoin.core
from bitcoin.core import *
from bitcoin.core.script import OP_RETURN
if sys.version > '3':
_bytes = bytes
else:
_bytes = lambda x: bytes(bytearray(x))
# Core definitions
COIN = 100000000
MAX_BLOCK_SIZE = 2000000 # after assets deployed
MAX_BLOCK_WEIGHT = 8000000
MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25?
WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed])
class CoreMainParams(bitcoin.core.CoreChainParams):
MAX_MONEY = 21000000000 * COIN
NAME = 'mainnet'
GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000'))
SUBSIDY_HALVING_INTERVAL = 2100000
PROOF_OF_WORK_LIMIT = 2**256-1 >> 20
# Burn Amounts
nIssueAssetBurnAmount = 500 * COIN
nReissueAssetBurnAmount = 100 * COIN
nIssueSubAssetBurnAmount = 100 * COIN
nIssueUniqueAssetBurnAmount = 5 * COIN
# Burn Addresses
strIssueAssetBurnAddress = "RXissueAssetXXXXXXXXXXXXXXXXXhhZGt"
strReissueAssetBurnAddress = "RXReissueAssetXXXXXXXXXXXXXXVEFAWu"
strIssueSubAssetBurnAddress = "RXissueSubAssetXXXXXXXXXXXXXWcwhwL"
strIssueUniqueAssetBurnAddress = "RXissueUniqueAssetXXXXXXXXXXWEAe58"
# Global Burn Address
strGlobalBurnAddress = "RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV"
class CoreTestNetParams(bitcoin.core.CoreMainParams):
MAX_MONEY = 21000000000 * COIN
NAME = 'testnet'
GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000'))
SUBSIDY_HALVING_INTERVAL = 2100000
PROOF_OF_WORK_LIMIT = 2**256-1 >> 20
# Burn Amounts
nIssueAssetBurnAmount = 500 * COIN;
nReissueAssetBurnAmount = 100 * COIN;
nIssueSubAssetBurnAmount = 100 * COIN;
nIssueUniqueAssetBurnAmount = 5 * COIN;
# Burn Addresses
strIssueAssetBurnAddress = "n1issueAssetXXXXXXXXXXXXXXXXWdnemQ"
strReissueAssetBurnAddress = "n1ReissueAssetXXXXXXXXXXXXXXWG9NLd"
strIssueSubAssetBurnAddress = "n1issueSubAssetXXXXXXXXXXXXXbNiH6v"
strIssueUniqueAssetBurnAddress = "n1issueUniqueAssetXXXXXXXXXXS4695i"
# Global Burn Address
strGlobalBurnAddress = "n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP"
class CoreRegTestParams(bitcoin.core.CoreTestNetParams):
MAX_MONEY = 21000000000 * COIN
NAME = 'regtest'
GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000'))
SUBSIDY_HALVING_INTERVAL = 150
PROOF_OF_WORK_LIMIT = 2**256-1 >> 1
# Burn Amounts
nIssueAssetBurnAmount = 500 * COIN;
nReissueAssetBurnAmount = 100 * COIN;
nIssueSubAssetBurnAmount = 100 * COIN;
nIssueUniqueAssetBurnAmount = 5 * COIN;
# Burn Addresses
strIssueAssetBurnAddress = "n1issueAssetXXXXXXXXXXXXXXXXWdnemQ"
strReissueAssetBurnAddress = "n1ReissueAssetXXXXXXXXXXXXXXWG9NLd"
strIssueSubAssetBurnAddress = "n1issueSubAssetXXXXXXXXXXXXXbNiH6v"
strIssueUniqueAssetBurnAddress = "n1issueUniqueAssetXXXXXXXXXXS4695i"
# Global Burn Address
strGlobalBurnAddress = "n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP"
# monkey patching
bitcoin.core.COIN = COIN
bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE
bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT
bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS
bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC
bitcoin.core.CoreMainParams = CoreMainParams
bitcoin.core.CoreTestNetParams = CoreTestNetParams
bitcoin.core.CoreRegTestParams = CoreRegTestParams
def GetParams():
return bitcoin.core.coreparams
| 1.835938 | 2 |
conditions/toy_shop.py | MaggieIllustrations/softuni-github-programming | 0 | 12799531 | <reponame>MaggieIllustrations/softuni-github-programming
holiday_price = float(input())
puzzle_count = int(input())
dolls_count = int(input())
teddy_bears_count = int(input())
minions_count = int(input())
trucks_count = int(input())
#
total_price_dolls = dolls_count * 3
total_price_puzzles = puzzle_count * 2.6
total_price_teddy_bears = teddy_bears_count * 4.10
total_price_minions = minions_count * 8.20
total_price_trucks = trucks_count * 2
total_price = total_price_puzzles + total_price_dolls + \
total_price_teddy_bears + total_price_minions + \
total_price_trucks
total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count
if total_amount_toys >= 50:
discount = total_price * 0.25
total_price = total_price - discount
rent = total_price * 0.1
earning_after_rent = total_price - rent
if earning_after_rent >= holiday_price:
earning_left = earning_after_rent - holiday_price
print(f"Yes! {earning_left:.2f} lv left.")
else:
needed_money = holiday_price - earning_after_rent
print(f"Not enough money! {needed_money:.2f} lv left.")
else:
rent = total_price * 0.1
earning_after_rent = total_price - rent
if earning_after_rent >= holiday_price:
earning_left = earning_after_rent - holiday_price
print(f"Yes! {earning_left:.2f} lv left.")
else:
needed_money = holiday_price - earning_after_rent
print(f"Not enough money! {needed_money:.2f} lv needed.")
| 4.03125 | 4 |
profile_generator/generator_test.py | nethy/profile-generator | 0 | 12799532 | from unittest import TestCase
from unittest.mock import Mock, patch
from profile_generator import generator
from profile_generator.generator import (
ConfigFileReadError,
InvalidConfigFileError,
NoConfigFileError,
OutputDirCreationFailure,
ProfileWriteError,
TemplateFileReadError,
)
from profile_generator.schema import object_of, type_of
class ProfileGeneratorTest(TestCase):
@patch("sys.argv", ["app.py", "one.json", "two.json"])
def test_get_config_files_returns_config_files(self) -> None:
self.assertEqual(["one.json", "two.json"], generator.get_config_files())
@patch("sys.argv", ["app.py"])
def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None:
self.assertRaises(NoConfigFileError, generator.get_config_files)
@patch(
"profile_generator.util.file.create_dir", lambda *xs: "/root/" + "/".join(xs)
)
def test_create_output_dir_raises_returns_created_dir_path(self) -> None:
self.assertEqual("/root/profiles", generator.create_output_dir())
@patch("profile_generator.util.file.create_dir")
def test_create_output_dir_raises_error_when_cannot_create_dir(
self, create_dir: Mock
) -> None:
create_dir.side_effect = OSError
self.assertRaises(OutputDirCreationFailure, generator.create_output_dir)
@patch("profile_generator.util.file.read_file")
@patch(
"profile_generator.util.file.get_full_path", lambda *xs: "/root/" + "/".join(xs)
)
def test_get_profile_template_returns_template_file_content(
self, read_file: Mock
) -> None:
read_file.return_value = "file content"
self.assertEqual("file content", generator.get_profile_template())
read_file.assert_called_once_with("/root/templates/raw_therapee.pp3")
@patch("profile_generator.util.file.read_file")
def test_get_profile_template_raises_error_when_cannot_read_template_file(
self, read_file: Mock
) -> None:
read_file.side_effect = OSError
self.assertRaises(TemplateFileReadError, generator.get_profile_template)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_loads_configuration_files(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": 2}'
schema = object_of({"a": type_of(int)})
config = generator.load_configuration_file("config.json", schema)
self.assertEqual({"a": 2}, config)
read_file.assert_called_once_with("config.json")
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_config_file_cannot_be_read(
self, read_file: Mock
) -> None:
schema = object_of({"a": type_of(int)})
read_file.side_effect = OSError
self.assertRaises(
ConfigFileReadError,
generator.load_configuration_file,
"config.json",
schema,
)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_contains_variable_error(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": "$a"}'
schema = object_of({"a": type_of(str)})
self.assertRaises(
InvalidConfigFileError,
generator.load_configuration_file,
"config.json",
schema,
)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_config_file_is_invalid(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": false}'
schema = object_of({"a": type_of(int)})
self.assertRaises(
InvalidConfigFileError,
generator.load_configuration_file,
"config.json",
schema,
)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_config_file_is_invalid_json(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": false'
schema = object_of({"a": type_of(int)})
self.assertRaises(
InvalidConfigFileError,
generator.load_configuration_file,
"config.json",
schema,
)
def test_create_profile_content_should_create_profile_content(self) -> None:
template = "{a}"
cfg = {"a": "1"}
marshall = lambda x: x
content = generator.create_profile_content(template, cfg, marshall)
self.assertEqual(content, "1")
@classmethod
@patch("profile_generator.util.file.write_file")
def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None:
name = "profile_name"
content = "1"
output_dir = "dir"
generator.persist_profile(name, content, output_dir)
write_file.assert_called_once_with(content, output_dir, f"{name}.pp3")
@patch("profile_generator.util.file.write_file")
def test_persist_profile_should_raise_error_when_writing_file_failed(
self, write_file: Mock
) -> None:
name = "profile_name"
content = "1"
output_dir = "dir"
write_file.side_effect = OSError
self.assertRaises(
ProfileWriteError,
generator.persist_profile,
name,
content,
output_dir,
)
| 2.703125 | 3 |
lib/conda.py | rdmolony/scaffold | 0 | 12799533 | <reponame>rdmolony/scaffold<filename>lib/conda.py
def run_in_env(c, command, env):
commands = ['eval "$(conda shell.bash hook)"', f'conda activate {env}']
commands.append(command)
c.run(' && '.join(commands))
| 1.695313 | 2 |
python/python_logging_note.py | tszandy/notes | 0 | 12799534 | import logging
#store loggging file in ~/filename.log with encoding utf-8 and anything above log level logging DEBUG which is everything.
logging.basicConfig(filename="filename.log",encoding="utf-8",level = logging.DEBUG)
logging.debug()
logging.info()
logging,warning()
logging.error()
logging.critical()
# One logger go to one file_handler, one logger go to different file_handle with different log level.
file_handler = logging.FileHandler(filename = "filename.log")
file_handler.setLevel(logging.DEBUG)
file_handler.set_name()
format_string = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.formatter(format_string)
file_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(file_handler)
logging.StreamHandler()
try:
run()
except:
logging.exception('Got exception on main handler')
raise
#logging level
level:numeric_value
CRITICAL : 50
ERROR : 40
WARNING : 30
INFO : 20
DEBUG : 10
NOTSET : 0
| 2.984375 | 3 |
quiz/users/serializers.py | diego-marcelino/valora-quiz | 0 | 12799535 | from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_simplejwt.tokens import RefreshToken
User = get_user_model()
class SignupSerializer(serializers.ModelSerializer):
"""Serializer for signup user."""
password = serializers.CharField(max_length=150,
min_length=6,
write_only=True)
def create(self, validated_data):
"""Create a new user."""
return User.objects.create_user(**validated_data)
class Meta:
"""Meta information for signup serializer."""
model = User
fields = ['username', 'name', 'role', 'password']
extra_kwargs = {
'username': {
'required': True
},
'role': {
'required': True
},
'password': {
'required': True
}
}
ref_name = 'Sign up credentials'
class LoginSerializer(serializers.Serializer):
"""Serializer for login user."""
password = serializers.CharField(max_length=150,
min_length=5,
write_only=True)
username = serializers.CharField(max_length=150,
min_length=5,
write_only=True)
def validate(self, attrs):
"""Validate credentials and get user tokens."""
username = attrs.get('username', '')
password = attrs.get('password', '')
user = authenticate(username=username, password=password)
if not user:
raise AuthenticationFailed(_('Invalid credentials'))
refresh = RefreshToken.for_user(user)
return {'access': str(refresh.access_token), 'refresh': str(refresh)}
class Meta:
"""Meta information for login serializer."""
ref_name = 'Login credentials'
| 2.296875 | 2 |
2017/20/asteroids.py | xocasdashdash/advent-of-code | 0 | 12799536 | data = open("input","r").read()
def parse(particle):
return [list(map(int, p[3:-1].split(","))) for p in particle.split(", ")]
def step(d):
d[1][0] += d[2][0]
d[1][1] += d[2][1]
d[1][2] += d[2][2]
d[0][0] += d[1][0]
d[0][1] += d[1][1]
d[0][2] += d[1][2]
def part1(data):
particles = [parse(d) for d in data.split('\n')]
while True:
for d in particles:
step(d)
m = sum([abs(e) for e in particles[0][0]])
min_n = 0
for i, d in enumerate(particles):
if sum([abs(e) for e in d[0]]) < m:
min_n = i
m = sum([abs(e) for e in d[0]])
print(min_n)
def part2(data):
particles = [parse(d) for d in data.split('\n')]
while True:
positions = {}
delete = []
for i, d in enumerate(particles):
step(d)
if tuple(d[0]) in positions:
delete += [i, positions[tuple(d[0])]]
else:
positions[tuple(d[0])] = i
particles = [d for i, d in enumerate(particles) if i not in delete]
print(len(particles))
#part1(data)
part2(data)
| 3.09375 | 3 |
create_indexes.py | AuthEceSoftEng/jira-apache-downloader | 0 | 12799537 | <reponame>AuthEceSoftEng/jira-apache-downloader<gh_stars>0
import pymongo
from properties import database_host_and_port
if __name__ == "__main__":
client = pymongo.MongoClient(database_host_and_port)
db = client["jidata"]
db["issues"].create_index('projectname')
db["users"].create_index('projectname')
db["comments"].create_index('issue')
db["comments"].create_index('projectname')
db["events"].create_index('issue')
db["events"].create_index('projectname')
db["worklogs"].create_index('issue')
db["worklogs"].create_index('projectname')
| 2.09375 | 2 |
src/signalalign/remove_sa_analyses.py | UCSC-nanopore-cgl/signalAlign | 5 | 12799538 | #!/usr/bin/env python
"""Remove embedded signalalign analyses from files"""
########################################################################
# File: remove_sa_analyses.py
# executable: remove_sa_analyses.py
#
# Author: <NAME>
# History: 02/06/19 Created
########################################################################
import os
from py3helpers.utils import list_dir
from py3helpers.multiprocess import *
from argparse import ArgumentParser
from signalalign.fast5 import Fast5
import numpy as np
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--directory', '-d', required=True, action='store',
dest='dir', type=str, default=None,
help="Path to directory of fast5 files")
parser.add_argument('--analysis', required=False, action='store_true',
dest='analysis', default=False,
help="Remove all analysis files")
parser.add_argument('--basecall', required=False, action='store_true',
dest='basecall', default=False,
help="Remove all basecall files")
parser.add_argument('--signalalign', required=False, action='store_true',
dest='signalalign', default=False,
help="Remove all signalalign files")
parser.add_argument('--threads', required=False, action='store',
dest='threads', default=1, type=int,
help="number of threads to run")
args = parser.parse_args()
return args
def remove_sa_analyses(fast5):
"""Remove signalalign analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys()) if "SignalAlign" in x]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh = fh.repack()
fh.close()
return counter
def remove_basecall_analyses(fast5):
"""Remove basecall analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys()) if "Basecall" in x]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh = fh.repack()
fh.close()
return counter
def remove_analyses(fast5):
"""Remove analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys())]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh.delete("Analyses")
fh = fh.repack()
fh.close()
return counter
def main():
args = parse_args()
function_to_run = None
if args.analysis:
function_to_run = remove_analyses
else:
if args.signalalign or not args.basecall:
function_to_run = remove_sa_analyses
elif args.basecall:
function_to_run = remove_basecall_analyses
assert function_to_run is not None, "Must select --analysis, --signalalign or --basecall."
service = BasicService(function_to_run, service_name="forward_multiprocess_aggregate_all_variantcalls")
files = list_dir(args.dir, ext="fast5")
total, failure, messages, output = run_service(service.run, files,
{}, ["fast5"], worker_count=args.threads)
print("Deleted {} analysis datasets deleted from {} files".format(np.asarray(output).sum(), len(files)))
if __name__ == '__main__':
main()
| 2.4375 | 2 |
Max_elem.py | ScriptErrorVGM/Project2021 | 0 | 12799539 | # 1
def max_elem(a):
max0 = a[0]
for elem in a:
if elem > max0:
max0 = elem
return max0
list0 = [2,3,4,5,6,7,1,2,3]
result = max_elem(list0)
print("#1 :",result) # return 7
# 2
list1 = [10,12,3,14,20,7,6,5]
list1.sort()
print("#2 :",list1[-1])
# 3
list2 = [3,5,9,7,1,5,8,8,7,5,6]
max_num = max(list2)
print("#3 :", max_num)
#4
from functools import reduce
list3 = [-5,-6,-7,-99,-67,-3,-4,-9]
print("#4 :",reduce(max, list3)) | 3.921875 | 4 |
frameworks/Python/falcon/app_orjson.py | http4k/FrameworkBenchmarks | 2 | 12799540 | #!/usr/bin/env python
import orjson
from falcon import media
from app import wsgi
# custom JSON handler
JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads)
extra_handlers = {
"application/json": JSONHandler,
"application/json; charset=UTF-8": JSONHandler
}
wsgi.req_options.media_handlers.update(extra_handlers)
wsgi.resp_options.media_handlers.update(extra_handlers)
| 1.71875 | 2 |
setup.py | kateliev/vfjLib | 5 | 12799541 | <filename>setup.py
from __future__ import absolute_import, division, print_function
import re
import sys
from codecs import open
from os import path
from setuptools import find_packages, setup
folderLib = 'Lib'
packageName = find_packages(folderLib)[0]
def get_version(*args):
verpath = (folderLib, packageName, '__init__.py')
verstrline = open(path.join(*verpath), "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
else:
return "undefined"
def get_absolute_path(*args):
"""Transform relative pathnames into absolute pathnames."""
directory = path.dirname(path.abspath(__file__))
return path.join(directory, *args)
def get_description(*args):
readmepath = get_absolute_path('README.md')
if path.exists(readmepath):
long_description = open(readmepath, encoding='utf-8').read()
else:
long_description = ''
return long_description
def get_requirements(*args):
"""Get requirements from pip requirement files."""
requirements = set()
with open(get_absolute_path(*args)) as handle:
for line in handle:
# Strip comments.
line = re.sub(r'^#.*|\s#.*', '', line)
# Ignore empty lines
if line and not line.isspace():
requirements.add(re.sub(r'\s+', '', line))
return sorted(requirements)
needs_pytest = {'pytest', 'test'}.intersection(sys.argv)
pytest_runner = ['pytest_runner'] if needs_pytest else []
needs_wheel = {'bdist_wheel'}.intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
setup(
name=packageName,
version=get_version(),
description='Low-level reader and writer for FontLab JSON (VFJ) font source files',
long_description=get_description(),
long_description_content_type='text/markdown',
url='https://github.com/kateliev/vfjLib',
download_url='https://github.com/kateliev/vfjLib/archive/master.zip',
author='<NAME>',
author_email='<EMAIL>',
license='LICENSE',
classifiers=[
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
],
keywords=['opentype', 'font', 'fontlab', 'vfj'],
package_dir={"": folderLib},
packages=[packageName],
include_package_data=True,
setup_requires=pytest_runner + wheel,
tests_require=[
'pytest>=2.8',
],
python_requires='>=2.7',
install_requires=get_requirements('requirements.txt'),
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'vfj=vfjLib:main',
# ],
#},
) | 2.1875 | 2 |
cottonformation/res/elasticbeanstalk.py | gitter-badger/cottonformation-project | 0 | 12799542 | # -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class EnvironmentOptionSetting(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Environment.OptionSetting"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html
Property Document:
- ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace
- ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname
- ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Environment.OptionSetting"
rp_Namespace: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Namespace"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace"""
rp_OptionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "OptionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname"""
p_ResourceName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value"""
@attr.s
class ApplicationVersionSourceBundle(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html
Property Document:
- ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket
- ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle"
rp_S3Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "S3Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket"""
rp_S3Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "S3Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key"""
@attr.s
class ApplicationMaxAgeRule(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.MaxAgeRule"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html
Property Document:
- ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled
- ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.MaxAgeRule"
p_DeleteSourceFromS3: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteSourceFromS3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3"""
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled"""
p_MaxAgeInDays: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxAgeInDays"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays"""
@attr.s
class ConfigurationTemplateSourceConfiguration(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname
- ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname"""
rp_TemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename"""
@attr.s
class EnvironmentTier(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Environment.Tier"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Environment.Tier"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name"""
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version"""
@attr.s
class ConfigurationTemplateConfigurationOptionSetting(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html
Property Document:
- ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace
- ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname
- ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting"
rp_Namespace: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Namespace"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace"""
rp_OptionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "OptionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname"""
p_ResourceName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value"""
@attr.s
class ApplicationMaxCountRule(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.MaxCountRule"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html
Property Document:
- ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled
- ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.MaxCountRule"
p_DeleteSourceFromS3: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteSourceFromS3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3"""
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled"""
p_MaxCount: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxCount"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount"""
@attr.s
class ApplicationApplicationVersionLifecycleConfig(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html
Property Document:
- ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule
- ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig"
p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib(
default=None,
converter=ApplicationMaxAgeRule.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)),
metadata={AttrMeta.PROPERTY_NAME: "MaxAgeRule"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule"""
p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib(
default=None,
converter=ApplicationMaxCountRule.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)),
metadata={AttrMeta.PROPERTY_NAME: "MaxCountRule"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule"""
@attr.s
class ApplicationApplicationResourceLifecycleConfig(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html
Property Document:
- ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole
- ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig"
p_ServiceRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ServiceRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole"""
p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib(
default=None,
converter=ApplicationApplicationVersionLifecycleConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)),
metadata={AttrMeta.PROPERTY_NAME: "VersionLifecycleConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig"""
#--- Resource declaration ---
@attr.s
class ConfigurationTemplate(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description
- ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid
- ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings
- ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn
- ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname
- ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ConfigurationTemplate"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description"""
p_EnvironmentId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "EnvironmentId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid"""
p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib(
default=None,
converter=ConfigurationTemplateConfigurationOptionSetting.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "OptionSettings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings"""
p_PlatformArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn"""
p_SolutionStackName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SolutionStackName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname"""
p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib(
default=None,
converter=ConfigurationTemplateSourceConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "SourceConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration"""
@attr.s
class Application(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html
Property Document:
- ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description
- ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application"
p_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description"""
p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib(
default=None,
converter=ApplicationApplicationResourceLifecycleConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceLifecycleConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig"""
@attr.s
class Environment(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Environment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname
- ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description
- ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name
- ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role
- ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings
- ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn
- ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname
- ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename
- ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier
- ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Environment"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname"""
p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CNAMEPrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description"""
p_EnvironmentName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "EnvironmentName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name"""
p_OperationsRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OperationsRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role"""
p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib(
default=None,
converter=EnvironmentOptionSetting.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "OptionSettings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings"""
p_PlatformArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn"""
p_SolutionStackName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SolutionStackName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname"""
p_TemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename"""
p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib(
default=None,
converter=EnvironmentTier.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)),
metadata={AttrMeta.PROPERTY_NAME: "Tier"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier"""
p_VersionLabel: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VersionLabel"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags"""
@property
def rv_EndpointURL(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values"""
return GetAtt(resource=self, attr_name="EndpointURL")
@attr.s
class ApplicationVersion(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ApplicationVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname
- ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ApplicationVersion"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname"""
rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib(
default=None,
converter=ApplicationVersionSourceBundle.from_dict,
validator=attr.validators.instance_of(ApplicationVersionSourceBundle),
metadata={AttrMeta.PROPERTY_NAME: "SourceBundle"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description"""
| 2.15625 | 2 |
distributed/protocol/cupy.py | replicahq/distributed | 0 | 12799543 | """
Efficient serialization GPU arrays.
"""
import cupy
from .cuda import cuda_serialize, cuda_deserialize
class PatchedCudaArrayInterface:
"""This class do two things:
1) Makes sure that __cuda_array_interface__['strides']
behaves as specified in the protocol.
2) Makes sure that the cuda context is active
when deallocating the base cuda array.
Notice, this is only needed when the array to deserialize
isn't a native cupy array.
"""
def __init__(self, ary):
cai = ary.__cuda_array_interface__
cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__["version"]
if cai.get("strides") is None and cai_cupy_vsn < 2:
cai.pop("strides", None)
self.__cuda_array_interface__ = cai
# Save a ref to ary so it won't go out of scope
self.base = ary
def __del__(self):
# Making sure that the cuda context is active
# when deallocating the base cuda array
try:
import numba.cuda
numba.cuda.current_context()
except ImportError:
pass
del self.base
@cuda_serialize.register(cupy.ndarray)
def serialize_cupy_ndarray(x):
# Making sure `x` is behaving
if not x.flags.c_contiguous:
x = cupy.array(x, copy=True)
header = x.__cuda_array_interface__.copy()
return header, [x]
@cuda_deserialize.register(cupy.ndarray)
def deserialize_cupy_array(header, frames):
(frame,) = frames
if not isinstance(frame, cupy.ndarray):
frame = PatchedCudaArrayInterface(frame)
arr = cupy.ndarray(
header["shape"], dtype=header["typestr"], memptr=cupy.asarray(frame).data
)
return arr
| 2.453125 | 2 |
doc/autosar4_api/examples/create_composition_component.py | SHolzmann/autosar | 199 | 12799544 | import autosar
ws = autosar.workspace("4.2.2")
components = ws.createPackage("ComponentTypes")
swc = components.createCompositionComponent("MyComposition")
print(swc.name)
| 1.789063 | 2 |
opyapi/schema/schema.py | dkraczkowski/opyapi | 5 | 12799545 | <reponame>dkraczkowski/opyapi
from abc import ABCMeta
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from .errors import ValidationError
from .types import Object
from .types import String
from .validators.validate import validate
class SchemaMeta(ABCMeta):
def __new__(mcs: "SchemaMeta", name: str, bases: tuple, namespace: dict, **kwargs):
if (
f"{namespace['__module__']}.{namespace['__qualname__']}"
== "opyapi.schema.schema.Schema"
):
return super().__new__(mcs, name, bases, namespace)
klass = super().__new__(mcs, name, bases, namespace)
required = []
if "required" in kwargs:
required = kwargs["required"]
schema_definition = Object(properties=klass.__annotations__, required=required)
klass.__schema__ = schema_definition
return klass
class Schema(metaclass=SchemaMeta):
__data__: Dict[str, Any]
__schema__: Object
__mappings__: Dict[Type, Dict]
def __init__(self, **kwargs) -> None:
super().__setattr__("__data__", {})
for key, value in kwargs.items():
self.__setattr__(key, value)
def __getattr__(self, attribute_name):
if attribute_name not in self.__schema__.properties:
raise AttributeError(
f"Attribute `{attribute_name}` is not specified in {self}."
)
return (
self.__data__[attribute_name] if attribute_name in self.__data__ else None
)
def __setattr__(self, attribute_name: str, value: Any) -> None:
if attribute_name not in self.__schema__.properties:
raise AttributeError(
f"Attribute `{attribute_name}` is not specified in {self}."
)
property_meta = self.__schema__.properties[attribute_name]
if isinstance(property_meta, String):
try:
value = property_meta.format_value(value)
except ValueError as e:
raise ValidationError(str(e))
else:
validate(value, property_meta)
self.__data__[attribute_name] = value
def to_dict(self) -> dict:
result = {}
for key, value in self.__data__.items():
if self.__schema__[key].write_only:
continue
result[key] = value
return result
@classmethod
def create_from(cls, obj: object) -> "Schema":
if isinstance(obj, dict):
return cls(**obj)
object_type: Type = type(obj)
if object_type not in cls.__mappings__:
raise ValueError(
f"Object of type {object_type} could not be mapped to {cls.__name__}. "
f"Have you forgot to define mapping for the object?"
)
mapping = cls.__mappings__[object_type]
result: Dict[str, Any] = {}
for key, attribute in cls.__schema__.properties.items():
if key not in mapping:
if attribute.nullable:
result[key] = None
else:
raise ValueError(
f"Property `{key}` is not nullable, "
f"and must be defined in mapping scheme for {obj.__class__}"
)
continue
mapped_key = mapping[key]
if isinstance(mapped_key, str):
result[key] = getattr(obj, mapped_key)
elif mapped_key is True or mapped_key == 1:
result[key] = getattr(obj, key)
elif callable(mapped_key):
result[key] = mapped_key(obj)
else:
raise ValueError(
f"Property {key} has invalid mapping setting for object {obj.__class__}."
)
return cls(**result)
__all__ = ["Schema"]
| 2.546875 | 3 |
Manannikov_K_DZ_1/test_job_#2.py | manannikovkonstantin/1824_GB_Python_1 | 0 | 12799546 | <gh_stars>0
def function(value):
degree = []
sum_numers = 0
for i in range(1, 1000, 2):
degree.append(i ** 3 + value)
for item in degree:
str_degree = str(item)
pred_sum = 0
for x in str_degree:
int_degree = int(x)
pred_sum += int_degree
del_7 = pred_sum % 7
if del_7 == 0:
sum_numers += int(str_degree)
print(sum_numers)
function(0)
function(17) | 3.5625 | 4 |
date_time/calendar.py | nayanapardhekar/Python | 37 | 12799547 | '''
Task
You are given a date. Your task is to find what the day is on that date.
Input Format
A single line of input containing the space separated month, day and year, respectively, in format.
Constraints
* 2000<year<3000
Output Format
Output the correct day in capital letters.
Sample Input
08 05 2015
Sample Output
WEDNESDAY
Explanation
The day on August 5th 2015 was WEDNESDAY.
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
import calendar as cal
day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'}
n=list(map(int,input().split()))
if n[2] in range(2001,3000):
n1=cal.weekday(n[2],n[0],n[1])
for i in day:
if i==n1:
print(day[i])
'''
output:
08 05 2015
WEDNESDAY
'''
| 4.1875 | 4 |
travel/bean/userwrapper.py | sausage-team/travel-notes | 0 | 12799548 | <gh_stars>0
from core.bean.wrapper import *
class UserWrapper(Wrapper):
filter = ['phone', 'password']
def __init__(self, status=0, data={}):
if isinstance(data, list):
for val in data:
self.remove_key(val)
else:
self.remove_key(data)
super().__init__(status, data) | 2.25 | 2 |
webapp/app.py | matthansen0/FitbitOnFHIR | 8 | 12799549 | import os, json
import cmd
import asyncio
from fitbit import Fitbit
from flask import Flask, render_template, url_for, session, redirect
from authlib.integrations.flask_client import OAuth
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import ResourceExistsError
from azure.eventhub.aio import EventHubProducerClient
from azure.eventhub import EventData
app = Flask(__name__)
app.config.from_object('config')
app.secret_key = '!secret'
oauth = OAuth(app)
client = oauth.register(name="fitbit")
# Step 1: Bring user to homepage to offer sync service with device cloud (fitbit in this example)
@app.route('/')
def home():
return render_template("index.html")
@app.route("/login")
def login():
redirect_uri = url_for('auth', _external=True)
return oauth.fitbit.authorize_redirect("")
@app.route('/auth')
def auth():
token = oauth.fitbit.authorize_access_token()
secretName = session["user"] = token["user_id"]
secretValue = token["refresh_token"]
app.secret_key = token["access_token"]
client = SecretClient(vault_url=app.config["VAULT_URL"], credential=DefaultAzureCredential())
try:
client.set_secret(secretName, secretValue)
except ResourceExistsError:
# assume user has renabled the service reset the key
client.begin_delete_secret(secretName)
# sync data with FHIR API using Io[M]T Connector
loop = asyncio.new_event_loop()
loop.run_until_complete(sync())
return "Successful Sync"
@app.route('/sync')
async def sync():
fit_client = Fitbit(user=session["user"], access_token=app.secret_key)
result = fit_client.init_sync()
# Create a producer client to send messages to the event hub.
# Specify a connection string to your event hubs namespace and
# the event hub name.
producer = EventHubProducerClient.from_connection_string(conn_str=app.config["EVENT_HUB_CONN_STR"])
async with producer:
# Create a batch.
event_data_batch = await producer.create_batch()
for item in result:
print(item)
event_data_batch.add(EventData(json.dumps(item, indent = 4)))
# Send the batch of events to the event hub.
await producer.send_batch(event_data_batch)
if __name__ == '__main__':
app.run()
| 2.421875 | 2 |
captain_hook/comms/base/base_comm.py | brantje/captain_hook | 2 | 12799550 | <gh_stars>1-10
from __future__ import absolute_import
import yaml
from os.path import join
class BaseComm:
def __init__(self, config):
self.config = config
def setup(self):
raise NotImplementedError
def communicate(self):
raise NotImplementedError
def _load_config(self):
config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb')
yaml_config = yaml.load(config_file.read())
config_file.close()
return yaml_config
| 2.296875 | 2 |
Subsets and Splits