filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_21499
|
import os
import unittest
import importlib
import datetime
import pytz
import logging
from django.test import TestCase
from django.conf import settings
from accounts.models import (
RcLdapUser,
RcLdapGroup
)
from accounts.models import User
def assert_test_env():
"""Helper method to verify that tests are not being executed in a production environment."""
# We can reasonably assume that no production resource will satisfy this criteria, so
# this is one of several safeguards against running the functional tests against prod.
assert os.environ.get('RCAMP_DEBUG') == 'True'
assert settings.DATABASES['rcldap']['PASSWORD'] == 'password'
# In an abundance of caution, also make sure that the LDAP and MySQL connections are configured
# to use the test services.
assert 'ldap' in settings.DATABASES['rcldap']['NAME']
assert 'database' in settings.DATABASES['default']['HOST']
# Probably not running against prod backends.
return True
def _assert_test_env_or_false():
"""
This method returns False if an AssertionError is thrown in assert_test_env. It exists only for
the unittest.skipUnless decorator, which requires a Boolean value and will not catch exceptions.
"""
is_test_env = True
try:
assert_test_env()
except AssertionError:
is_test_env = False
return is_test_env
def _purge_ldap_objects():
"""Helper method for purging LDAP objects between tests."""
assert_test_env()
ldap_users = RcLdapUser.objects.all()
for user in ldap_users:
user.delete()
ldap_groups = RcLdapGroup.objects.all()
for group in ldap_groups:
group.delete()
def get_auth_user_defaults():
"""Return a dictionary of reasonable defaults for auth users."""
auth_user_defaults = dict(
username = 'testuser',
password = 'password',
first_name = 'Test',
last_name = 'User',
email = '[email protected]'
)
return auth_user_defaults
def localize_timezone(year, month, day, zone):
"""Returns a timezone aware date object"""
date = datetime.datetime(year, month, day)
date_tz_aware = pytz.timezone(zone).localize(date)
return date_tz_aware
@unittest.skipUnless(_assert_test_env_or_false(),"Tests are not being run against a safe test environment!")
class SafeTestCase(TestCase):
"""
Subclass of the Django framework TestCase that verifies that current host environment does not
look like a production environment. If the test environment checks fail, then the test case is
skipped. Class and instance setUp and tearDown methods contain the same checks, as the database
connection settings can be changed within the context of of individual test cases.
IMPORTANT: Every unit or integration test should inherit from this class. For functional tests
user tests.utilities.functional.SafeStaticLiveServerTestCase instead.
"""
logging.disable(logging.CRITICAL)
databases = frozenset({'default', 'culdap', 'csuldap', 'rcldap'})
@classmethod
def setUpClass(cls):
assert_test_env()
super(SafeTestCase,cls).setUpClass()
@classmethod
def tearDownClass(cls):
assert_test_env()
super(SafeTestCase,cls).tearDownClass()
def setUp(self):
assert_test_env()
super(SafeTestCase,self).setUp()
def tearDown(self):
assert_test_env()
super(SafeTestCase,self).tearDown()
class SessionEnabledTestMixin:
"""
Mixin for Django test cases that use the TestClient that streamlines the process of setting and
modifying session variables. The get session method expects the TestClient as its only
argument. Usage:
>>> session = self.get_session(self.client)
>>> session['key'] = 'value'
>>> session.save()
"""
def _configure_session(self, client):
engine = importlib.import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self._store = store
client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def get_session(self, client):
if not hasattr(self,'_store'):
self._configure_session(client)
return self._store
|
the-stack_106_21500
|
import random
from datetime import datetime
import zeeguu_core
from zeeguu_core_test.model_test_mixin import ModelTestMixIn
from zeeguu_core_test.rules.exercise_rule import ExerciseRule
from zeeguu_core_test.rules.outcome_rule import OutcomeRule
from zeeguu_core_test.rules.user_rule import UserRule
from zeeguu_core.word_scheduling.arts.ab_testing import ABTesting
from zeeguu_core.word_scheduling import arts
class WordsToStudyTest(ModelTestMixIn):
def setUp(self):
super().setUp()
self.BOOKMARK_COUNT = 20
self.user_rule = UserRule()
self.user_rule.add_bookmarks(self.BOOKMARK_COUNT, exercises_count=1)
self.user = self.user_rule.user
def test_new_bookmark_has_the_highest_priority(self):
""" Adding a new bookmark, makes it the next thing to study """
# GIVEN
new_bookmark = self.user_rule.add_bookmarks(1)[0].bookmark
# WHEN
arts.update_bookmark_priority(zeeguu_core.db, self.user)
# THEN
bookmark = self.__get_bookmark_with_highest_priority()
# print (bookmark)
# print (new_bookmark)
self.assertTrue(new_bookmark == bookmark,
"The newly added bookmark has the highest priority")
def test_just_finished_bookmark_has_not_the_highest_priority(self):
# GIVEN
ABTesting._algorithms = [ABTesting._algorithms[random.randint(0, len(ABTesting._algorithms) - 1)]]
arts.update_bookmark_priority(zeeguu_core.db, self.user)
first_bookmark_to_study = self.__get_bookmark_with_highest_priority()
# WHEN
# Add an exercise
exercise_rule = ExerciseRule()
exercise_rule.exercise.time = datetime.now()
exercise_rule.exercise.solving_speed = 100
exercise_rule.exercise.outcome = OutcomeRule().correct
first_bookmark_to_study.add_new_exercise(exercise_rule.exercise)
arts.update_bookmark_priority(zeeguu_core.db, self.user)
# THEN
bookmark = self.__get_bookmark_with_highest_priority()
assert first_bookmark_to_study != bookmark
def __get_bookmark_with_highest_priority(self):
bookmarks_to_study = self.user.bookmarks_to_study()
if not bookmarks_to_study:
return None
return bookmarks_to_study[0]
def __get_bookmark_with_lowest_priority(self):
bookmarks_to_study = self.user.bookmarks_to_study()
if len(bookmarks_to_study) == 0:
return None
return bookmarks_to_study[-1]
|
the-stack_106_21501
|
import pytest, allure
from driver import startdriver
from pageElements import lenovo_product_page
from pageElements import lenovo_popup
@allure.story('Testing Reevoo modules on Lenovo product page')
class TestProductPage():
start_page = 'https://www.lenovo.com/gb/en/laptops/thinkpad/x-series/ThinkPad-X1-Carbon-6th-Gen/p/22TP2TXX16G'
def setup(self):
new_driver = startdriver.StartDriver()
self.driver = new_driver.start()
self.driver.implicitly_wait(5)
self.driver.get(self.start_page)
def teardown(self):
self.driver.quit()
@allure.feature('Find small elements on page Rating')
def test_find_reevoo_rating_small_bnt(self):
with allure.step('Find Rating module on the page'):
small_module = lenovo_product_page.ProductPage(self.driver).get_small_module_elements()
with allure.step('Check module is visible for users'):
assert small_module.is_displayed
@allure.feature('Find Customer reviews rating on page')
def test_reviews_score_not_a_null(self):
with allure.step('Find Reviews Rating on the page'):
reviews_score = lenovo_product_page.ProductPage(self.driver).get_product_rating()
with allure.step('Check if Rating is visible and adequate values'):
assert reviews_score is not None
@allure.feature('Find Customer reviews total numbers')
def test_check_total_reviews_numb(self):
with allure.step('Find module on the page'):
total_reviews = lenovo_product_page.ProductPage(self.driver).get_total_reviews_number()
with allure.step('Find total reviews > 0 and on the page'):
assert total_reviews > 0
@allure.feature('Open Customer Reviews popup by clicking btn')
def test_open_customer_review_popup(self):
with allure.step('Find module on the page'):
lenovo_popup.RevooPopup(self.driver).open_custom_reviews_popup()
with allure.step('Click on the icon -> open the Pop Up'):
popup = lenovo_popup.RevooPopup(self.driver).switch_to_customer_reviews_popup()
with allure.step('Check if Pop Up is visible form users and its connected to Reevoo'):
assert str(popup.get_attribute('title')) == 'Reevoo'
@allure.feature('Check if Rating and Reviews number is in Customer Reviews popup')
def test_rating_and_revirews_numb_in_popup(self):
with allure.step('Get values from product page'):
page_score = lenovo_product_page.ProductPage(self.driver).get_product_rating()
page_reviews = lenovo_product_page.ProductPage(self.driver).get_total_reviews_number()
with allure.step('Find module on the page'):
lenovo_popup.RevooPopup(self.driver).open_custom_reviews_popup()
with allure.step('Click on the icon -> open the Pop Up'):
popup = lenovo_popup.RevooPopup(self.driver).switch_to_customer_reviews_popup()
with allure.step('Check if Pop Up is visible form users and its connected to Reevoo'):
assert str(popup.get_attribute('title')) == 'Reevoo'
with allure.step('Check if Total review score in popup equeal to Product page'):
popup_score = lenovo_popup.RevooPopup(self.driver).get_rating_from_popup()
popup_reviews = lenovo_popup.RevooPopup(self.driver).get_total_reviews_num_from_popup()
assert page_score == popup_score
assert page_reviews == popup_reviews
@allure.feature('Check scores')
@pytest.mark.parametrize('score_params_names', ['Battery life',
'Design',
'Size and weight',
'Performance',
'Value for money',
'Overall rating'])
def test_customer_review_scores_is_available(self, score_params_names):
with allure.step('Make test preparation process'):
lenovo_popup.RevooPopup(self.driver).open_custom_reviews_popup()
lenovo_popup.RevooPopup(self.driver).switch_to_customer_reviews_popup()
with allure.step('Get List of scores with Score Name, Value and Element'):
score_list = lenovo_popup.RevooPopup(self.driver).custom_reviews_scores_table()
with allure.step('Check if score "%s" available' % score_params_names):
assert len(score_list) > 0
assert score_params_names in [next(iter(i)) for i in score_list]
with allure.step('Check if score "%s" has values and Not = 0' % score_params_names):
assert [float(x[score_params_names]) for x in score_list if score_params_names in x.keys()][0] != 0
def test_customer_review_total_rating_calculates_correctly(self):
with allure.step('Make test preparation process'):
page_score = lenovo_product_page.ProductPage(self.driver).get_product_rating()
lenovo_popup.RevooPopup(self.driver).open_custom_reviews_popup()
lenovo_popup.RevooPopup(self.driver).switch_to_customer_reviews_popup()
with allure.step('Get List of scores with Score Name, Value and Element'):
score_list = lenovo_popup.RevooPopup(self.driver).custom_reviews_scores_table()
score_values = [float(score[next(iter(score))]) for score in score_list if 'Overall rating' not in score.keys()]
calc_score = (sum(score_values) / len(score_values))
assert float(page_score) == round(calc_score, 1)
|
the-stack_106_21503
|
import functools
import os
import sqlite3
import threading
import typing
from pprint import pprint
from core import utils, config
from core.objects.annotation import Annotation
from core.objects.book import Book
from core.objects.file import File
_threadlocal = threading.local()
class DataExtractor:
@classmethod
def from_root(cls, path: str):
return cls(path)
def __init__(self, path: str)-> None:
self.path = path
self.db_path = os.path.join(
self.path, "system", "config", "books.db"
)
try:
self._cursor = sqlite3.connect(self.db_path)
except:
utils.fatal("Can't open sqlite database. Check if path %s is correct and "
"subpath system/config/books.db exists")
@property
def cursor(self):
if not hasattr(_threadlocal, "cursor"):
_threadlocal.cursor = sqlite3.connect(self.db_path)
return _threadlocal.cursor
def get_tags(self) -> typing.List[Annotation]:
d = self.cursor.execute("""
SELECT t.*, tn.*
FROM Tags t
JOIN TagNames tn ON t.TagID = tn.OID
JOIN Items it ON t.ItemID = it.OID
WHERE it.State = 0
ORDER BY t.TimeEdt DESC
""")
books = self.get_books()
out = [
Annotation(*i)
for i in d.fetchall()
]
parent_map = self._get_items_parent_map()
for a in out:
bookId = parent_map.get(a.ItemId, a.ItemId)
try:
book = next(filter(lambda b: b.OID == bookId, books))
except:
continue
a.BookId = bookId
a.BookTitle = book.Title
return out
def get_tag(self, tag_id):
return next(filter(lambda item: item.OID == int(tag_id), self.get_tags()))
@functools.lru_cache(maxsize=config.cache_size)
def _get_items_parent_map(self)-> typing.Dict[int, int]:
# map: childId - parentId
cur = self.cursor.execute("SELECT OID, ParentId FROM Items ORDER BY OID")
data = cur.fetchall()
out = {}
for child_id, parent_id in data:
if parent_id:
while parent_id in out and out[parent_id]:
parent_id = out[parent_id]
out[child_id] = parent_id
return out
@functools.lru_cache(maxsize=config.cache_size)
def get_books(self):
c = self.cursor.execute("SELECT * FROM Books")
c = c.fetchall()
return [
Book(*i)
for i in c
]
def annotations_count(self)-> typing.Dict[int, int]:
# returns BookId, annotations count
annotations = self.get_tags()
@functools.lru_cache(maxsize=config.cache_size)
def get_book(self, oid: int)-> Book:
cur_oid = oid
while True:
c = self.cursor.execute("""
SELECT OID, ParentID
FROM Items
WHERE OID = ?
""", [cur_oid])
cur_oid, parent_id = c.fetchone()
if parent_id is None:
break
cur_oid = parent_id
c = self.cursor.execute("SELECT * FROM Books WHERE OID = ?", [cur_oid]).fetchone()
return Book(*c)
@functools.lru_cache(maxsize=config.cache_size)
def get_book_file(self, book_id: int):
c = self.cursor.execute("""
SELECT f.*, p.*
FROM Files f JOIN Paths p ON f.PathID = p.OID WHERE BookID = ?
""", [book_id])
return File(*c.fetchone())
if __name__ == '__main__':
de = DataExtractor.from_root("/home/skaledin/Dropbox/BACKUPS/PocketBook/")
tags = de.get_tags()
tagId = 6576
tag = next(filter(lambda i: i.OID == tagId, de.get_tags()))
from core.controller import AnnotationsPageController
con = AnnotationsPageController()
con.data_extractor = de
con.view_annotation(tagId)
|
the-stack_106_21507
|
import logging
import traceback
import uuid
from collections import defaultdict
import pymongo
from blitzdb.backends.base import Backend as BaseBackend
from blitzdb.backends.base import NotInTransaction
from blitzdb.document import Document
from blitzdb.helpers import delete_value, get_value, set_value
from .queryset import QuerySet
logger = logging.getLogger(__name__)
class DotEncoder:
DOT_MAGIC_VALUE = ":a5b8afc131:"
@classmethod
def encode(cls, obj, path):
def replace_key(key):
if isinstance(key, str):
return key.replace(".", cls.DOT_MAGIC_VALUE)
return key
if isinstance(obj, dict):
return {replace_key(key): value for key, value in obj.items()}
return obj
@classmethod
def decode(cls, obj):
if isinstance(obj, dict):
return {
key.replace(cls.DOT_MAGIC_VALUE, "."): value
for key, value in obj.items()
}
return obj
class Backend(BaseBackend):
"""A MongoDB backend.
:param db: An instance of a `pymongo.database.Database
<http://api.mongodb.org/python/current/api/pymongo/database.html>`_ class
Example usage:
.. code-block:: python
from pymongo import connection
from blitzdb.backends.mongo import Backend as MongoBackend
c = connection()
my_db = c.test_db
#create a new BlitzDB backend using a MongoDB database
backend = MongoBackend(my_db)
"""
standard_encoders = BaseBackend.standard_encoders + [DotEncoder]
def __init__(self, db, autocommit=False, use_pk_based_refs=True, **kwargs):
super().__init__(**kwargs)
self.db = db
self._autocommit = autocommit
self._save_cache = defaultdict(lambda: {})
self._delete_cache = defaultdict(lambda: {})
self._update_cache = defaultdict(lambda: {})
self._use_pk_based_refs = use_pk_based_refs
self.in_transaction = False
def begin(self):
if self.in_transaction: # we're already in a transaction...
self.commit()
self.in_transaction = True
def rollback(self, transaction=None):
if not self.in_transaction:
raise NotInTransaction("Not in a transaction!")
self._save_cache = defaultdict(lambda: {})
self._delete_cache = defaultdict(lambda: {})
self._update_cache = defaultdict(lambda: {})
self.in_transaction = False
def commit(self, transaction=None):
try:
for collection, cache in self._save_cache.items():
for pk, attributes in cache.items():
try:
self.db[collection].save(attributes)
except:
logger.error(
"Error when saving the document with pk {} in collection {}".format(
attributes["pk"], collection
)
)
logger.error(
"Attributes (excerpt):"
+ str(dict(attributes.items()[:100]))
)
raise
for collection, cache in self._delete_cache.items():
for pk in cache:
self.db[collection].remove({"_id": pk})
for collection, cache in self._update_cache.items():
for pk, attributes in cache.items():
update_dict = {}
for key in ("$set", "$unset"):
if key in attributes and attributes[key]:
update_dict[key] = attributes[key]
if update_dict:
self.db[collection].update({"_id": pk}, update_dict)
finally:
# regardless what happens in the 'commit' operation, we clear the cache
self._save_cache = defaultdict(lambda: {})
self._delete_cache = defaultdict(lambda: {})
self._update_cache = defaultdict(lambda: {})
self.in_transaction = True
@property
def autocommit(self):
return self._autocommit
@autocommit.setter
def autocommit(self, value):
if value not in (True, False):
raise TypeError("Value must be boolean!")
self._autocommit = value
def delete_by_primary_keys(self, cls, pks):
collection = self.get_collection_for_cls(cls)
if self.autocommit:
for pk in pks:
self.db[collection].remove({"_id": pk})
else:
self._delete_cache[collection].update({pk: True for pk in pks})
def delete(self, obj):
self.call_hook("before_delete", obj)
collection = self.get_collection_for_cls(obj.__class__)
if obj.pk == None:
raise obj.DoesNotExist
if self.autocommit:
self.db[collection].remove({"_id": obj.pk})
else:
self._delete_cache[collection][obj.pk] = True
if obj.pk in self._save_cache[collection]:
del self._save_cache[collection][obj.pk]
def save_multiple(self, objs):
if not objs:
return
serialized_attributes_list = []
collection = self.get_collection_for_cls(objs[0].__class__)
for obj in objs:
self.call_hook("before_save", obj)
if obj.pk == None:
obj.pk = uuid.uuid4().hex
serialized_attributes = self.serialize(obj.attributes)
serialized_attributes["_id"] = obj.pk
serialized_attributes_list.append(serialized_attributes)
for attributes in serialized_attributes_list:
if self.autocommit:
self.db[collection].save(attributes)
else:
self._save_cache[collection][attributes["pk"]] = attributes
if attributes["pk"] in self._delete_cache[collection]:
del self._delete_cache[collection][attributes["pk"]]
def save(self, obj):
return self.save_multiple([obj])
def update(self, obj, set_fields=None, unset_fields=None, update_obj=True):
collection = self.get_collection_for_cls(obj.__class__)
if obj.pk == None:
raise obj.DoesNotExist("update() called on document without primary key!")
def serialize_fields(fields):
if isinstance(fields, (list, tuple)):
update_dict = {}
for key in fields:
try:
update_dict[key] = get_value(obj, key)
except KeyError:
pass
elif isinstance(fields, dict):
update_dict = fields.copy()
else:
raise TypeError("fields must be a list/tuple!")
return update_dict
if set_fields:
set_attributes = serialize_fields(set_fields)
else:
set_attributes = {}
if unset_fields:
unset_attributes = list(unset_fields)
else:
unset_attributes = []
self.call_hook("before_update", obj, set_attributes, unset_attributes)
set_attributes = {
key: self.serialize(value) for key, value in set_attributes.items()
}
if update_obj:
for key, value in set_attributes.items():
set_value(obj, key, value)
for key in unset_attributes:
delete_value(obj, key)
update_dict = {}
if set_attributes:
update_dict["$set"] = set_attributes
if unset_attributes:
update_dict["$unset"] = {key: "" for key in unset_attributes}
if not update_dict:
return # nothing to do...
if self.autocommit:
self.db[collection].update({"_id": obj.pk}, update_dict)
else:
if obj.pk in self._delete_cache[collection]:
raise obj.DoesNotExist(
"update() on document that is marked for deletion!"
)
if obj.pk in self._update_cache[collection]:
update_cache = self._update_cache[collection][obj.pk]
if set_attributes:
if "$set" not in update_cache:
update_cache["$set"] = {}
for key, value in set_attributes.items():
if "$unset" in update_cache and key in update_cache["$unset"]:
del update_cache["$unset"][key]
update_cache["$set"][key] = value
if unset_attributes:
if "$unset" not in update_cache:
update_cache["$unset"] = {}
for key in unset_attributes:
if "$set" in update_cache and key in update_cache["$set"]:
del update_cache["$set"][key]
update_cache["$unset"][key] = ""
else:
self._update_cache[collection][obj.pk] = update_dict
def serialize(
self,
obj,
convert_keys_to_str=True,
embed_level=0,
encoders=None,
autosave=True,
for_query=False,
path=None,
):
return super().serialize(
obj,
convert_keys_to_str=convert_keys_to_str,
embed_level=embed_level,
encoders=encoders,
autosave=autosave,
path=path,
for_query=for_query,
)
def create_indexes(self, cls_or_collection, params_list):
for params in params_list:
self.create_index(cls_or_collection, **params)
def ensure_indexes(self, include_pk=True):
for cls in self.classes:
meta_attributes = self.get_meta_attributes(cls)
if include_pk:
self.create_index(cls, fields={"pk": 1}, opts={"unique": True})
if "indexes" in meta_attributes:
self.create_indexes(cls, meta_attributes["indexes"])
def create_index(self, cls_or_collection, *args, **kwargs):
if not isinstance(cls_or_collection, str):
collection = self.get_collection_for_cls(cls_or_collection)
else:
collection = cls_or_collection
if "fields" not in kwargs:
raise AttributeError(
"You must specify the 'fields' parameter when creating an index!"
)
if "opts" in kwargs:
opts = kwargs["opts"]
else:
opts = {}
try:
self.db[collection].ensure_index(list(kwargs["fields"].items()), **opts)
except pymongo.errors.OperationFailure as failure:
traceback.print_exc()
# The index already exists with different options, so we drop it and recreate it...
self.db[collection].drop_index(list(kwargs["fields"].items()))
self.db[collection].ensure_index(list(kwargs["fields"].items()), **opts)
def _canonicalize_query(self, query):
"""Transform the query dictionary to replace e.g. documents with
__ref__ fields."""
def transform_query(q):
for encoder in self.query_encoders:
q = encoder.encode(q, [])
if isinstance(q, dict):
nq = {}
for key, value in q.items():
new_key = key
if (
isinstance(value, dict)
and len(value) == 1
and list(value.keys())[0].startswith("$")
):
if list(value.keys())[0] in ("$all", "$in"):
if list(value.values())[0] and isinstance(
list(value.values())[0][0], Document
):
if self._use_pk_based_refs:
new_key += ".pk"
else:
new_key += ".__ref__"
elif isinstance(value, Document):
if self._use_pk_based_refs:
new_key += ".pk"
else:
new_key += ".__ref__"
nq[new_key] = transform_query(value)
return nq
elif isinstance(q, (list, QuerySet, tuple)):
return [transform_query(x) for x in q]
elif isinstance(q, Document):
collection = self.get_collection_for_obj(q)
if self._use_pk_based_refs:
return q.pk
else:
return f"{collection}:{q.pk}"
else:
return q
return transform_query(query)
def get(self, cls_or_collection, properties, raw=False, only=None):
if not isinstance(cls_or_collection, str):
collection = self.get_collection_for_cls(cls_or_collection)
else:
collection = cls_or_collection
cls = self.get_cls_for_collection(collection)
queryset = self.filter(cls_or_collection, properties, raw=raw, only=only)
if len(queryset) == 0:
raise cls.DoesNotExist
elif len(queryset) > 1:
raise cls.MultipleDocumentsReturned
return queryset[0]
def filter(self, cls_or_collection, query, raw=False, only=None):
"""Filter objects from the database that correspond to a given set of
properties.
See :py:meth:`blitzdb.backends.base.Backend.filter` for documentation of individual parameters
.. note::
This function supports most query operators that are available in MongoDB and returns
a query set that is based on a MongoDB cursor.
"""
if not isinstance(cls_or_collection, str):
collection = self.get_collection_for_cls(cls_or_collection)
cls = cls_or_collection
else:
collection = cls_or_collection
cls = self.get_cls_for_collection(collection)
canonical_query = self._canonicalize_query(query)
args = {}
if only:
if isinstance(only, tuple):
args["projection"] = list(only)
else:
args["projection"] = only
return QuerySet(
self,
cls,
self.db[collection].find(canonical_query, **args),
raw=raw,
only=only,
)
|
the-stack_106_21510
|
import os
import yaml
from yacs.config import CfgNode as CN
_C = CN()
# Base config files
_C.BASE = ['']
# -----------------------------------------------------------------------------
# Data settings
# -----------------------------------------------------------------------------
_C.DATA = CN()
# Batch size for a single GPU, could be overwritten by command line argument
_C.DATA.BATCH_SIZE = 128
# Path to dataset, could be overwritten by command line argument
_C.DATA.DATA_PATH = ''
# Dataset name
_C.DATA.DATASET = 'imagenet'
# Input image size
_C.DATA.IMG_SIZE = 224
# Interpolation to resize image (random, bilinear, bicubic)
_C.DATA.INTERPOLATION = 'bicubic'
# Use zipped dataset instead of folder dataset
# could be overwritten by command line argument
_C.DATA.ZIP_MODE = False
# Cache Data in Memory, could be overwritten by command line argument
_C.DATA.CACHE_MODE = 'part'
# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.DATA.PIN_MEMORY = True
# Number of data loading threads
_C.DATA.NUM_WORKERS = 8
# -----------------------------------------------------------------------------
# Model settings
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model type
_C.MODEL.TYPE = 'cross-scale'
# Model name
_C.MODEL.NAME = 'tiny_patch4_group7_224'
# Checkpoint to resume, could be overwritten by command line argument
_C.MODEL.RESUME = ''
_C.MODEL.FROM_PRETRAIN = ''
# Number of classes, overwritten in data preparation
_C.MODEL.NUM_CLASSES = 1000
# Dropout rate
_C.MODEL.DROP_RATE = 0.0
# Drop path rate
_C.MODEL.DROP_PATH_RATE = 0.1
# Label Smoothing
_C.MODEL.LABEL_SMOOTHING = 0.1
# CrossFormer parameters
_C.MODEL.CROS = CN()
_C.MODEL.CROS.PATCH_SIZE = [4, 8, 16, 32]
_C.MODEL.CROS.MERGE_SIZE = [[2, 4], [2,4], [2, 4]]
_C.MODEL.CROS.IN_CHANS = 3
_C.MODEL.CROS.EMBED_DIM = 48
_C.MODEL.CROS.DEPTHS = [2, 2, 6, 2]
_C.MODEL.CROS.NUM_HEADS = [3, 6, 12, 24]
_C.MODEL.CROS.GROUP_SIZE = [7, 7, 7, 7]
_C.MODEL.CROS.MLP_RATIO = 4.
_C.MODEL.CROS.QKV_BIAS = True
_C.MODEL.CROS.QK_SCALE = None
_C.MODEL.CROS.APE = False
_C.MODEL.CROS.PATCH_NORM = True
# -----------------------------------------------------------------------------
# Training settings
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.START_EPOCH = 0
_C.TRAIN.EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 5e-4
_C.TRAIN.WARMUP_LR = 5e-7
_C.TRAIN.MIN_LR = 5e-6
# Clip gradient norm
_C.TRAIN.CLIP_GRAD = 5.0
# Auto resume from latest checkpoint
_C.TRAIN.AUTO_RESUME = True
# Gradient accumulation steps
# could be overwritten by command line argument
_C.TRAIN.ACCUMULATION_STEPS = 0
# Whether to use gradient checkpointing to save memory
# could be overwritten by command line argument
_C.TRAIN.USE_CHECKPOINT = False
# LR scheduler
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
# Epoch interval to decay LR, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
# LR decay rate, used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
# Optimizer
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'adamw'
# Optimizer Epsilon
_C.TRAIN.OPTIMIZER.EPS = 1e-8
# Optimizer Betas
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
# SGD momentum
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# -----------------------------------------------------------------------------
# Augmentation settings
# -----------------------------------------------------------------------------
_C.AUG = CN()
# Color jitter factor
_C.AUG.COLOR_JITTER = 0.4
# Use AutoAugment policy. "v0" or "original"
_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
# Random erase prob
_C.AUG.REPROB = 0.25
# Random erase mode
_C.AUG.REMODE = 'pixel'
# Random erase count
_C.AUG.RECOUNT = 1
# Mixup alpha, mixup enabled if > 0
_C.AUG.MIXUP = 0.8
# Cutmix alpha, cutmix enabled if > 0
_C.AUG.CUTMIX = 1.0
# Cutmix min/max ratio, overrides alpha and enables cutmix if set
_C.AUG.CUTMIX_MINMAX = None
# Probability of performing mixup or cutmix when either/both is enabled
_C.AUG.MIXUP_PROB = 1.0
# Probability of switching to cutmix when both mixup and cutmix enabled
_C.AUG.MIXUP_SWITCH_PROB = 0.5
# How to apply mixup/cutmix params. Per "batch", "pair", or "elem"
_C.AUG.MIXUP_MODE = 'batch'
# -----------------------------------------------------------------------------
# Testing settings
# -----------------------------------------------------------------------------
_C.TEST = CN()
# Whether to use center crop when testing
_C.TEST.CROP = True
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2')
# overwritten by command line argument
_C.AMP_OPT_LEVEL = ''
# Path to output folder, overwritten by command line argument
_C.OUTPUT = ''
# Tag of experiment, overwritten by command line argument
_C.TAG = 'default'
# Frequency to save checkpoint
_C.SAVE_FREQ = 1000
# Frequency to logging info
_C.PRINT_FREQ = 10
# Fixed random seed
_C.SEED = 0
# Perform evaluation only, overwritten by command line argument
_C.EVAL_MODE = False
# Test throughput only, overwritten by command line argument
_C.THROUGHPUT_MODE = False
# local rank for DistributedDataParallel, given by command line argument
_C.LOCAL_RANK = 0
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
# merge from specific arguments
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.zip:
config.DATA.ZIP_MODE = True
if args.cache_mode:
config.DATA.CACHE_MODE = args.cache_mode
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.use_checkpoint:
config.TRAIN.USE_CHECKPOINT = True
if args.amp_opt_level:
config.AMP_OPT_LEVEL = args.amp_opt_level
if args.output:
config.OUTPUT = args.output
if args.tag:
config.TAG = args.tag
if args.eval:
config.EVAL_MODE = True
if args.num_workers >= 0:
config.DATA.NUM_WORKERS = args.num_workers
if args.throughput:
config.THROUGHPUT_MODE = True
# if args.patch_size:
# config.MODEL.CROS.PATCH_SIZE = args.patch_size
config.MODEL.CROS.MLP_RATIO = args.mlp_ratio
# config.MODEL.MERGE_SIZE_AFTER = [args.merge_size_after1, args.merge_size_after2, args.merge_size_after3, []]
config.DATA.DATASET = args.data_set
config.TRAIN.WARMUP_EPOCHS = args.warmup_epochs
# set local rank for distributed training
config.LOCAL_RANK = args.local_rank
# output folder
config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG)
config.freeze()
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config
|
the-stack_106_21511
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import os
import re
import shutil
import sys
import tempfile
from typing import (Dict, Iterator, List, Match, Optional, # noqa
Pattern, Union, TYPE_CHECKING, Text, IO, Tuple)
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from .compat import StringIO, PY2, to_env
from .parser import parse_stream
if TYPE_CHECKING: # pragma: no cover
if sys.version_info >= (3, 6):
_PathLike = os.PathLike
else:
_PathLike = Text
if sys.version_info >= (3, 0):
_StringIO = StringIO
else:
_StringIO = StringIO[Text]
__posix_variable = re.compile(r'\$\{[^\}]*\}') # type: Pattern[Text]
class DotEnv():
def __init__(self, dotenv_path, verbose=False, encoding=None):
# type: (Union[Text, _PathLike, _StringIO], bool, Union[None, Text]) -> None
self.dotenv_path = dotenv_path # type: Union[Text,_PathLike, _StringIO]
self._dict = None # type: Optional[Dict[Text, Text]]
self.verbose = verbose # type: bool
self.encoding = encoding # type: Union[None, Text]
@contextmanager
def _get_stream(self):
# type: () -> Iterator[IO[Text]]
if isinstance(self.dotenv_path, StringIO):
yield self.dotenv_path
elif os.path.isfile(self.dotenv_path):
with io.open(self.dotenv_path, encoding=self.encoding) as stream:
yield stream
else:
if self.verbose:
warnings.warn("File doesn't exist {}".format(self.dotenv_path)) # type: ignore
yield StringIO('')
def dict(self):
# type: () -> Dict[Text, Text]
"""Return dotenv as dict"""
if self._dict:
return self._dict
values = OrderedDict(self.parse())
self._dict = resolve_nested_variables(values)
return self._dict
def parse(self):
# type: () -> Iterator[Tuple[Text, Text]]
with self._get_stream() as stream:
for mapping in parse_stream(stream):
if mapping.key is not None and mapping.value is not None:
yield mapping.key, mapping.value
def set_as_environment_variables(self, override=False):
# type: (bool) -> bool
"""
Load the current dotenv as system environemt variable.
"""
for k, v in self.dict().items():
if k in os.environ and not override:
continue
os.environ[to_env(k)] = to_env(v)
return True
def get(self, key):
# type: (Text) -> Optional[Text]
"""
"""
data = self.dict()
if key in data:
return data[key]
if self.verbose:
warnings.warn("key %s not found in %s." % (key, self.dotenv_path)) # type: ignore
return None
def get_key(dotenv_path, key_to_get):
# type: (Union[Text, _PathLike], Text) -> Optional[Text]
"""
Gets the value of a given key from the given .env
If the .env path given doesn't exist, fails
"""
return DotEnv(dotenv_path, verbose=True).get(key_to_get)
@contextmanager
def rewrite(path):
# type: (_PathLike) -> Iterator[Tuple[IO[Text], IO[Text]]]
try:
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest:
with io.open(path) as source:
yield (source, dest) # type: ignore
except BaseException:
if os.path.isfile(dest.name):
os.unlink(dest.name)
raise
else:
shutil.move(dest.name, path)
def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"):
# type: (_PathLike, Text, Text, Text) -> Tuple[Optional[bool], Text, Text]
"""
Adds or Updates a key/value to the given .env
If the .env path given doesn't exist, fails instead of risking creating
an orphan .env somewhere in the filesystem
"""
value_to_set = value_to_set.strip("'").strip('"')
if not os.path.exists(dotenv_path):
warnings.warn("can't write to %s - it doesn't exist." % dotenv_path) # type: ignore
return None, key_to_set, value_to_set
if " " in value_to_set:
quote_mode = "always"
line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n'
line_out = line_template.format(key_to_set, value_to_set)
with rewrite(dotenv_path) as (source, dest):
replaced = False
for mapping in parse_stream(source):
if mapping.key == key_to_set:
dest.write(line_out)
replaced = True
else:
dest.write(mapping.original)
if not replaced:
dest.write(line_out)
return True, key_to_set, value_to_set
def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
# type: (_PathLike, Text, Text) -> Tuple[Optional[bool], Text]
"""
Removes a given key from the given .env
If the .env path given doesn't exist, fails
If the given key doesn't exist in the .env, fails
"""
if not os.path.exists(dotenv_path):
warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path) # type: ignore
return None, key_to_unset
removed = False
with rewrite(dotenv_path) as (source, dest):
for mapping in parse_stream(source):
if mapping.key == key_to_unset:
removed = True
else:
dest.write(mapping.original)
if not removed:
warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path)) # type: ignore
return None, key_to_unset
return removed, key_to_unset
def resolve_nested_variables(values):
# type: (Dict[Text, Text]) -> Dict[Text, Text]
def _replacement(name):
# type: (Text) -> Text
"""
get appropriate value for a variable name.
first search in environ, if not found,
then look into the dotenv variables
"""
ret = os.getenv(name, new_values.get(name, ""))
return ret
def _re_sub_callback(match_object):
# type: (Match[Text]) -> Text
"""
From a match object gets the variable name and returns
the correct replacement
"""
return _replacement(match_object.group()[2:-1])
new_values = {}
for k, v in values.items():
new_values[k] = __posix_variable.sub(_re_sub_callback, v)
return new_values
def _walk_to_root(path):
# type: (Text) -> Iterator[Text]
"""
Yield directories starting from the given directory up to the root
"""
if not os.path.exists(path):
raise IOError('Starting path not found')
if os.path.isfile(path):
path = os.path.dirname(path)
last_dir = None
current_dir = os.path.abspath(path)
while last_dir != current_dir:
yield current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
last_dir, current_dir = current_dir, parent_dir
def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False):
# type: (Text, bool, bool) -> Text
"""
Search in increasingly higher folders for the given file
Returns path to the file if found, or an empty string otherwise
"""
def _is_interactive():
""" Decide whether this is running in a REPL or IPython notebook """
main = __import__('__main__', None, None, fromlist=['__file__'])
return not hasattr(main, '__file__')
if usecwd or _is_interactive():
# Should work without __file__, e.g. in REPL or IPython notebook.
path = os.getcwd()
else:
# will work for .py files
frame = sys._getframe()
# find first frame that is outside of this file
if PY2 and not __file__.endswith('.py'):
# in Python2 __file__ extension could be .pyc or .pyo (this doesn't account
# for edge case of Python compiled for non-standard extension)
current_file = __file__.rsplit('.', 1)[0] + '.py'
else:
current_file = __file__
while frame.f_code.co_filename == current_file:
frame = frame.f_back
frame_filename = frame.f_code.co_filename
path = os.path.dirname(os.path.abspath(frame_filename))
for dirname in _walk_to_root(path):
check_path = os.path.join(dirname, filename)
if os.path.isfile(check_path):
return check_path
if raise_error_if_not_found:
raise IOError('File not found')
return ''
def load_dotenv(dotenv_path=None, stream=None, verbose=False, override=False, **kwargs):
# type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, Union[None, Text]) -> bool
f = dotenv_path or stream or find_dotenv()
return DotEnv(f, verbose=verbose, **kwargs).set_as_environment_variables(override=override)
def dotenv_values(dotenv_path=None, stream=None, verbose=False, **kwargs):
# type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, Union[None, Text]) -> Dict[Text, Text]
f = dotenv_path or stream or find_dotenv()
return DotEnv(f, verbose=verbose, **kwargs).dict()
|
the-stack_106_21512
|
# script gets all Australian BoM weather station observations
# ... and applies an interpolated temperature to all GNAF points in a 100m grid
# TODO:
# 1. remove temperature biases due to altitude differences
# a. Add SRTM altitudes to GNAF
# b. Add interpolated altitude from weather stations to GNAF
# c. adjust where the difference is > 100m
# 2. generate temps outside the weather station network to catch the ~3,100 GNAF points outside the interpolated area
#
import geopandas
import io
import json
import logging
import matplotlib.pyplot as plt
import multiprocessing
import numpy
import os
import pandas
import psycopg2
import requests
import scipy.interpolate
import sqlalchemy
import struct
import urllib.request
import zipfile
from bs4 import BeautifulSoup
from datetime import datetime
from osgeo import gdal
# where to save the files
output_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
# states to include (note: no "ACT" or "OT" state, Antarctica is part of TAS in BoM observations)
states = [{"name": "NSW", "product": "IDN60801"},
{"name": "NT", "product": "IDD60801"},
{"name": "QLD", "product": "IDQ60801"},
{"name": "SA", "product": "IDS60801"},
{"name": "TAS", "product": "IDT60801"},
{"name": "VIC", "product": "IDV60801"},
{"name": "WA", "product": "IDW60801"},
{"name": "ANT", "product": "IDT60801"}]
# urls for each state's weather observations
base_url = "http://www.bom.gov.au/{0}/observations/{0}all.shtml"
# postgres connect strings
pg_connect_string = "dbname='geo' host='localhost' port='5432' user='postgres' password='password'"
sql_alchemy_engine_string = "postgresql+psycopg2://postgres:password@localhost/geo"
def main():
start_time = datetime.now()
# connect to Postgres
try:
pg_conn = psycopg2.connect(pg_connect_string)
pg_conn.autocommit = True
pg_cur = pg_conn.cursor()
except psycopg2.Error:
logger.fatal("Unable to connect to database\nACTION: Check your Postgres parameters and/or database security")
return False
# download weather stations
station_list = get_weather_stations()
logger.info("Downloaded {:,} weather stations : {}".format(len(station_list), datetime.now() - start_time))
obs_list = get_weather_observations(station_list)
logger.info("Downloaded {:,} latest observations : {}".format(len(obs_list), datetime.now() - start_time))
start_time = datetime.now()
# create dataframe of weather stations
station_df = pandas.DataFrame(station_list)
# create dataframe of weather obs
obs_df = pandas.DataFrame(obs_list).drop_duplicates()
# merge data and add points to dataframe
df = (obs_df.merge(station_df, on="wmo")
.drop(["lat", "lon"], axis=1)
)
# gdf = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df.longitude, df.latitude), crs="EPSG:4283")
# replace all missing values ("-") with NaN and change type of field of interest from string
df2 = df.replace('-', numpy.nan).astype({'rain_trace': 'float64'})
# select rows from the last hour with valid data
rain_trace_df = df2[(df2["utc_time_diff"] < 3600.0) & (df2["rain_trace"].notna())
& (df2["longitude"] > 112.0) & (df2["longitude"] < 162.0)
& (df2["latitude"] > -45.0) & (df2["latitude"] < -8.0)]
# # testing - get histogram of observation time
# rain_trace_df.hist("utc_time")
# plt.savefig(os.path.join(output_path, "histogram.png"), dpi=300, facecolor="w", pad_inches=0.0, metadata=None)
# export dataframe to PostGIS
export_dataframe(pg_cur, rain_trace_df, "testing", "weather_stations", "replace")
logger.info("Exported weather station dataframe to PostGIS: {}".format(datetime.now() - start_time))
start_time = datetime.now()
# # save to disk for debugging
# rain_trace_df.to_feather(os.path.join(output_path "temp_df.ipc"))
# # load from disk if debugging
# temp_df = pandas.read_feather(os.path.join(output_path "temp_df.ipc"))
# extract lat, long and air temp as arrays
x = rain_trace_df["longitude"].to_numpy()
y = rain_trace_df["latitude"].to_numpy()
z = rain_trace_df["rain_trace"].to_numpy()
h = rain_trace_df["altitude"].to_numpy()
logger.info("Filtered observations dataframe with weather station coordinates : {} rows : {}"
.format(len(rain_trace_df.index), datetime.now() - start_time))
start_time = datetime.now()
# # open SRTM 3 second DEM of Australia (ESRI Binary Grid format)
# dem_file_name = "/Users/hugh.saalmans/Downloads/3secSRTM_DEM/DEM_ESRI_GRID_16bit_Integer/dem3s_int/hdr.adf"
# dem_dataset = gdal.Open(dem_file_name, gdal.GA_ReadOnly)
# dem_geotransform = dem_dataset.GetGeoTransform()
#
# # get DEM origin point and pixel size to create numpy arrays from
# dem_num_x, dem_num_y = dem_dataset.RasterXSize, dem_dataset.RasterYSize
# dem_origin_x, dem_origin_y = dem_geotransform[0], dem_geotransform[3]
# dem_origin_delta_x, dem_origin_delta_y = dem_geotransform[1], dem_geotransform[5]
# select GNAF coordinates - group by 3 decimal places to create a ~100m grid of addresses
# sql = """SELECT latitude::numeric(5,3) as latitude, longitude::numeric(6,3) as longitude, count(*) as address_count
# FROM gnaf_202205.address_principals
# GROUP BY latitude::numeric(5,3), longitude::numeric(6,3)"""
# sql = """SELECT * FROM testing.gnaf_points_with_pop_and_height"""
# gnaf_df = pandas.read_sql_query(sql, pg_conn)
#
# # save to feather file for future use (GNAF only changes once every 3 months)
# gnaf_df.to_feather(os.path.join(output_path, "gnaf.ipc"))
# load from feather file
gnaf_df = pandas.read_feather(os.path.join(output_path, "gnaf.ipc"))
gnaf_x = gnaf_df["longitude"].to_numpy()
gnaf_y = gnaf_df["latitude"].to_numpy()
gnaf_counts = gnaf_df["count"].to_numpy()
# gnaf_dem_elevation = gnaf_df["elevation"].to_numpy()
logger.info("Loaded {:,} GNAF points : {}".format(len(gnaf_df.index), datetime.now() - start_time))
start_time = datetime.now()
# # interpolate temperatures for GNAF coordinates
gnaf_points = numpy.array((gnaf_x.flatten(), gnaf_y.flatten())).T
gnaf_temps = scipy.interpolate.griddata((x, y), z, gnaf_points, method="linear")
gnaf_weather_elevation = scipy.interpolate.griddata((x, y), h, gnaf_points, method="linear")
# create results dataframe
temperature_df = pandas.DataFrame({"latitude": gnaf_y, "longitude": gnaf_x,
"count": gnaf_counts, "rain_trace": gnaf_temps})
# temperature_df = pandas.DataFrame({"latitude": gnaf_y, "longitude": gnaf_x,
# "count": gnaf_counts, "dem_elevation": gnaf_dem_elevation,
# "weather_elevation": gnaf_weather_elevation, "rain_trace": gnaf_temps})
# # add temperatures adjusted for altitude differences between GNAF point and nearby weather stations
# temperature_df["adjusted_temp"] = temperature_df["rain_trace"] + \
# (temperature_df["weather_elevation"] - temperature_df["dem_elevation"]) / 150.0
# print(temperature_df)
# get count of rows with a temperature
row_count = len(temperature_df[temperature_df["rain_trace"].notna()].index)
logger.info("Got {:,} interpolated temperatures and elevations for GNAF points : {}"
.format(row_count, datetime.now() - start_time))
start_time = datetime.now()
# # plot a map of gnaf points by temperature
# temperature_df.plot.scatter("longitude", "latitude", c="rain_trace", colormap="jet")
# plt.axis("off")
# plt.savefig(os.path.join(output_path, "interpolated.png"), dpi=300, facecolor="w", pad_inches=0.0, metadata=None)
#
# logger.info("Plotted points to PNG file : {}".format(datetime.now() - start_time))
# start_time = datetime.now()
# export dataframe to PostGIS
export_dataframe(pg_cur, temperature_df, "testing", "gnaf_temperature", "replace")
logger.info("Exported GNAF temperature dataframe to PostGIS: {}".format(datetime.now() - start_time))
# start_time = datetime.now()
return True
def export_dataframe(pg_cur, df, schema_name, table_name, export_mode):
# create geodataframe
gdf = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df.longitude, df.latitude), crs="EPSG:4283")
# export to GeoPackage
# gdf.to_file(os.path.join(output_path, "{}.gpkg".format(table_name)), driver="GPKG")
#
# logger.info("Exported points to GeoPackage : {}".format(datetime.now() - start_time))
# start_time = datetime.now()
# export to PostGIS
engine = sqlalchemy.create_engine(sql_alchemy_engine_string)
gdf.to_postgis(table_name, engine, schema=schema_name, if_exists=export_mode)
pg_cur.execute("ANALYSE {}.{}".format(schema_name, table_name))
# pg_cur.execute("ALTER TABLE testing.weather_stations ADD CONSTRAINT weather_stations_pkey PRIMARY KEY (wmo)"
# .format(schema_name, table_name))
pg_cur.execute("ALTER TABLE {0}.{1} CLUSTER ON idx_{1}_geometry".format(schema_name, table_name))
pg_cur.execute("ALTER TABLE {}.{} RENAME COLUMN geometry TO geom".format(schema_name, table_name))
def get_weather_observations(station_list):
start_time = datetime.now()
obs_urls = list()
obs_list = list()
for state in states:
# get URL for web page to scrape
input_url = base_url.format(state["name"].lower())
# load and parse web page
r = requests.get(input_url)
soup = BeautifulSoup(r.content, features="html.parser")
# get all links
links = soup.find_all("a", href=True)
for link in links:
url = link["href"]
if "/products/" in url:
# only include weather station observations in their home state (border weather obs are duplicated)
for station in station_list:
if station["state"] == state["name"] and station["wmo"] == int(url.split(".")[1]):
# change URL to get JSON file of weather obs and add to list
obs_url = url.replace("/products/", "http://www.bom.gov.au/fwo/").replace(".shtml", ".json")
obs_urls.append(obs_url)
# with open(os.path.join(output_path, "weather_observations_urls.txt"), "w", newline="") as output_file:
# output_file.write("\n".join(obs_urls))
logger.info("\t - {} : got obs file list : {}".format(state["name"], datetime.now() - start_time))
start_time = datetime.now()
# download each obs file using multiprocessing
pool = multiprocessing.Pool(processes=16)
results = pool.imap_unordered(run_multiprocessing, obs_urls)
pool.close()
pool.join()
for result in list(results):
if result.get("error") is not None:
logger.warning("\t- Failed to parse {}".format(result["error"]))
else:
obs_list.append(result)
return obs_list
def get_weather_stations():
# get weather stations - obs have poor coordinates
response = urllib.request.urlopen("ftp://ftp.bom.gov.au/anon2/home/ncc/metadata/sitelists/stations.zip")
data = io.BytesIO(response.read())
station_file = zipfile.ZipFile(data, "r", zipfile.ZIP_DEFLATED).read("stations.txt").decode("utf-8")
stations = station_file.split("\r\n")
station_list = list()
# split fixed width file and get the fields we want
field_widths = (-8, -6, 41, -8, -7, 9, 10, -15, 4, 11, -9, 7) # negative widths represent ignored fields
format_string = " ".join("{}{}".format(abs(fw), "x" if fw < 0 else "s") for fw in field_widths)
field_struct = struct.Struct(format_string)
parser = field_struct.unpack_from
# print("fmtstring: {!r}, recsize: {} chars".format(fmtstring, fieldstruct.size))
# skip first 5 rows (lazy coding!)
stations.pop(0)
stations.pop(0)
stations.pop(0)
stations.pop(0)
stations.pop(0)
# add each station to a list of dictionaries
for station in stations:
if len(station) > 128:
fields = parser(bytes(station, "utf-8"))
# convert to list
field_list = list()
for field in fields:
field_list.append(field.decode("utf-8").lstrip().rstrip())
if field_list[5] != "..":
station_dict = dict()
station_dict["name"] = field_list[0]
station_dict["latitude"] = float(field_list[1])
station_dict["longitude"] = float(field_list[2])
station_dict["state"] = field_list[3]
if field_list[4] != "..":
station_dict["altitude"] = float(field_list[4])
station_dict["wmo"] = int(field_list[5])
station_list.append(station_dict)
return station_list
def run_multiprocessing(url):
# file_path = os.path.join(output_path, "obs", url.split("/")[-1])
# try:
obs_text = requests.get(url).text
# with open(file_path, "w", newline="") as output_file:
# output_file.write(obs_text)
obs_json = json.loads(obs_text)
obs_list = obs_json["observations"]["data"]
try:
# default is an error for when there are no observations
result = dict()
result["error"] = "{} : No observations".format(url)
for obs in obs_list:
if obs["sort_order"] == 0:
result = obs
# add utc time
obs["utc_time"] = datetime.strptime(obs["aifstime_utc"], "%Y%m%d%H%M%S")
obs["utc_time_diff"] = (datetime.utcnow() - obs["utc_time"]).total_seconds()
except Exception as ex:
result = dict()
result["error"] = "{} : {}".format(url, ex)
# print(result)
return result
if __name__ == "__main__":
full_start_time = datetime.now()
logger = logging.getLogger()
# set logger
log_file = os.path.abspath(__file__).replace(".py", ".log")
logging.basicConfig(filename=log_file, level=logging.DEBUG, format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p")
# setup logger to write to screen as well as writing to log file
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger("").addHandler(console)
logger.info("")
logger.info("Start weather obs download")
# geoscape.check_python_version(logger)
if main():
logger.info("Finished successfully! : {}".format(datetime.now() - full_start_time))
else:
logger.fatal("Something bad happened!")
logger.info("")
logger.info("-------------------------------------------------------------------------------")
|
the-stack_106_21514
|
import os
import re
import time
from bs4 import BeautifulSoup
import logging
from gensim.models import word2vec
import gensim
from nltk.corpus import stopwords
import nltk.data
from sklearn.cluster import KMeans
import time
THIS_FILE_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)))
NLTK_SAVE_DIR = os.path.join(os.path.join(THIS_FILE_FOLDER,
"NLTK"))
# Configure logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# Keggle default
def train_model(sentences, min_word_count=10, context=10):
"""
Parameters
----------
sentences: list of str
sentences to be analysed
min_word_count: int
minimum number of words to count
context:
window of neighbouring words to determine context
Returns
-------
word2vec model
"""
# Set values for various parameters
num_features = 300 # Word vector dimensionality
num_workers = 8 # Number of threads to run in parallel
downsampling = 1e-3 # Downsample setting for frequent words
return word2vec.Word2Vec(sentences, workers=num_workers,
size=num_features, min_count=min_word_count,
window=context, sample=downsampling)
def review_to_wordlist(review_text, remove_stopwords=False):
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]", " ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if w not in stops]
#
# 5. Return a list of words
return words
# Get words like new_york
def get_multiwords(sentences):
return gensim.models.Phrases(sentences)
# Downloads nltk tokens if you don't have them
def download_punkt():
if not os.path.exists(NLTK_SAVE_DIR):
os.mkdir(NLTK_SAVE_DIR)
nltk.download('punkt', download_dir=NLTK_SAVE_DIR)
# Get the punctuation tokenizer
def get_punktuation_tokenizer():
punkt_path = os.path.join(NLTK_SAVE_DIR,
'tokenizers',
'punkt',
'english.pickle')
try:
return nltk.data.load(punkt_path)
except LookupError as e:
print("Punkt not found, downloading punkt..")
download_punkt()
return nltk.data.load(punkt_path)
# Define a function to split a review into parsed sentences (lists of words)
def review_to_sentences(review, tokenizer, remove_stopwords=False):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append(review_to_wordlist(raw_sentence,
remove_stopwords))
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
def scan_all_sentences(list_of_sentences, update_frequency=50):
sentences = [] # Initialize an empty list of sentences
tokenizer = get_punktuation_tokenizer()
for i, sensence in enumerate(list_of_sentences):
# Print some useful output
if i % update_frequency:
print(str(i) + " of " + str(len(list_of_sentences)))
sentences += review_to_sentences(sensence, tokenizer)
return sentences
def make_clusters(model):
start = time.time() # Start time
# Set "k" (num_clusters) to be 1/5th of the vocabulary size, or an
# average of 5 words per cluster
word_vectors = model.syn0
num_clusters = int(word_vectors.shape[0] / 5)
# Initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans(n_clusters=num_clusters, n_jobs=-1)
idx = kmeans_clustering.fit_predict(word_vectors)
# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print("Time taken for K Means clustering: " + elapsed + "seconds.")
return idx
def cluster_word_dict(idx, model):
return dict(zip(model.index2word, idx))
def print_clusters(cluster, cluster_word_dict, amount=-1):
if amount == -1:
amount = len(cluster)
for cluster in range(amount):
#
# Print the cluster number
print("Cluster: " + str(cluster))
#
# Find all of the words for that cluster number, and print them out
words = []
for i, value in enumerate(cluster_word_dict.values()):
if (value == cluster):
c_keys = list(cluster_word_dict.keys())
words.append(c_keys[i])
print(str(words))
|
the-stack_106_21517
|
"""
Plotting class to be used by Log.
"""
import time
import numpy as nm
from sfepy.base.base import Output, Struct
def draw_data(ax, xdata, ydata, label, plot_kwargs, swap_axes=False):
"""
Draw log data to a given axes, obeying `swap_axes`.
"""
def _update_plot_kwargs(lines):
plot_kwargs['color'] = lines[0].get_color()
alpha = lines[0].get_alpha()
plot_kwargs['alpha'] = 0.5 if alpha is None else 0.5 * alpha
plot_kwargs = plot_kwargs.copy()
if not swap_axes:
if nm.isrealobj(ydata):
ax.plot(xdata, ydata, label=label,
**plot_kwargs)
else:
lines = ax.plot(xdata, ydata.real,
label='Re ' + label,
**plot_kwargs)
_update_plot_kwargs(lines)
ax.plot(xdata, ydata.imag,
label='Im ' + label,
**plot_kwargs)
else:
if nm.isrealobj(ydata):
ax.plot(ydata, xdata, label=label,
**plot_kwargs)
else:
lines = ax.plot(ydata.real, xdata,
label='Re ' + label,
**plot_kwargs)
_update_plot_kwargs(lines)
ax.plot(ydata.imag, xdata,
label='Im ' + label,
**plot_kwargs)
class LogPlotter(Struct):
"""
LogPlotter to be used by :class:`sfepy.base.log.Log`.
"""
output = Output('plotter:')
output = staticmethod(output)
def __init__(self, aggregate=100, sleep=1.0):
Struct.__init__(self, aggregate=aggregate, sleep=sleep,
xdata={}, ydata={}, plot_kwargs={},
clear_axes={}, show_legends=False)
def process_command(self, command):
self.output(command[0])
if command[0] == 'plot':
ig, ip, xd, yd = command[1:]
xdata = self.xdata.setdefault((ig, ip), [])
ydata = self.ydata.setdefault((ig, ip), [])
xdata.append(xd)
ydata.append(yd)
elif command[0] == 'vline':
ig, x, kwargs = command[1:]
self.vlines[ig].append((x, kwargs))
elif command[0] == 'clear':
ig = command[1]
self.clear_axes[ig] = True
elif command[0] == 'legends':
self.show_legends = True
elif command[0] == 'add_axis':
ig, names, yscale, xlabel, ylabel, plot_kwargs = command[1:]
self.data_names[ig] = names
self.yscales[ig] = yscale
self.xlabels[ig] = xlabel
self.ylabels[ig] = ylabel
self.plot_kwargs[ig] = plot_kwargs
self.n_gr = len(self.data_names)
self.make_axes()
elif command[0] == 'save':
self.fig.savefig(command[1])
self.pipe.send(True) # Acknowledge save.
def apply_commands(self):
from matplotlib.ticker import LogLocator, AutoLocator
for key in sorted(self.ydata.keys()):
ig, ip = key
xdata = nm.array(self.xdata[(ig, ip)])
ydata = nm.array(self.ydata[(ig, ip)])
ax = self.ax[ig]
if self.clear_axes[ig]:
ax.cla()
self.clear_axes[ig] = False
ax.set_yscale(self.yscales[ig])
ax.yaxis.grid(True)
draw_data(ax, nm.array(xdata), nm.array(ydata),
self.data_names[ig][ip], self.plot_kwargs[ig][ip])
if self.yscales[ig] == 'log':
ymajor_formatter = ax.yaxis.get_major_formatter()
ymajor_formatter.label_minor(True)
yminor_locator = LogLocator()
else:
yminor_locator = AutoLocator()
self.ax[ig].yaxis.set_minor_locator(yminor_locator)
if self.show_legends:
for ig, ax in enumerate(self.ax):
try:
ax.legend()
except:
pass
if self.xlabels[ig]:
ax.set_xlabel(self.xlabels[ig])
if self.ylabels[ig]:
ax.set_ylabel(self.ylabels[ig])
for x, kwargs in self.vlines[ig]:
ax.axvline(x, **kwargs)
try:
self.plt.tight_layout(pad=0.5)
except:
pass
def terminate(self):
if self.ii:
self.output('processed %d commands' % self.ii)
self.output('ended.')
self.plt.close('all')
def poll_draw(self):
while 1:
self.ii = 0
while 1:
if not self.pipe.poll():
break
command = self.pipe.recv()
can_break = False
if command is None:
self.terminate()
return False
elif command[0] == 'continue':
can_break = True
else:
self.process_command(command)
if (self.ii >= self.aggregate) and can_break:
break
self.ii += 1
if self.ii:
self.apply_commands()
self.fig.canvas.draw()
self.output('processed %d commands' % self.ii)
time.sleep(self.sleep)
return True
def make_axes(self):
from sfepy.linalg import cycle
self.fig.clf()
self.ax = []
n_col = min(5.0, nm.fix(nm.sqrt(self.n_gr)))
if int(n_col) == 0:
n_row = 0
else:
n_row = int(nm.ceil(self.n_gr / n_col))
n_col = int(n_col)
for ii, (ir, ic) in enumerate(cycle((n_col, n_row))):
if ii == self.n_gr: break
self.ax.append(self.fig.add_subplot(n_row, n_col, ii + 1))
self.vlines.setdefault(ii, [])
def __call__(self, pipe, log_file, data_names, yscales, xlabels, ylabels,
plot_kwargs):
"""
Sets-up the plotting window, starts a thread calling self.poll_draw()
that does the actual plotting, taking commands out of `pipe`.
Note that pyplot _must_ be imported here and not in this module so that
the import occurs _after_ the plotting process is started in that
process.
"""
import matplotlib.pyplot as plt
self.plt = plt
self.output.set_output(filename=log_file)
self.output('starting plotter...')
self.pipe = pipe
self.data_names = data_names
self.yscales = yscales
self.xlabels = xlabels
self.ylabels = ylabels
self.plot_kwargs = plot_kwargs
self.n_gr = len(data_names)
self.vlines = {}
self.fig = self.plt.figure()
self.make_axes()
import threading
draw_thread = threading.Thread(target=self.poll_draw)
draw_thread.start()
self.output('...done')
self.plt.show()
draw_thread.join()
|
the-stack_106_21518
|
#!/usr/bin/env python
import cv2 as cv
import numpy as np
import sys
import rospy
from std_msgs.msg import Int16
def servo_move_pub():
freq = 30
pub = rospy.Publisher('stepper', Int16, queue_size=10)
rospy.init_node('stepper_move_pub', anonymous=False)
rate = rospy.Rate(freq) # Frequency in Hz
cap = cv.VideoCapture(2) # Use 0 for built in webcam
if not cap.isOpened():
sys.exit()
speed = 0
k_p = 2
#k_i = 0.005
#k_d = 0.01
deadzone = 30
#past_err = 0
#sum_err = 0
while not rospy.is_shutdown():
ok, frame = cap.read()
scale_percent = 100 # percent of original size
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv.resize(frame, dim, interpolation = cv.INTER_AREA)
rows, cols, _ = resized.shape
center_x = int(cols / 2)
center = int(cols / 2)
hsv_frame = cv.cvtColor(resized, cv.COLOR_BGR2HSV)
low_red = np.array([0, 135, 122])
high_red = np.array([255, 255, 255])
red_mask = cv.inRange(hsv_frame, low_red, high_red)
contours, _ = cv.findContours(red_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=lambda x:cv.contourArea(x), reverse=True)
for cont in contours:
(x, y, w, h) = cv.boundingRect(cont)
center_x = int((x + x + w) / 2)
#y_medium = int((y + y + h) / 2)
#boxDim = [w, h]
break
cv.line(resized, (center_x, 0), (center_x, 480), (0, 255, 0), 2)
#cv.rectangle(resized, (center_x - boxDim[0]/2, y_medium - boxDim[1]/2),(center_x-boxDim[0]/2, y_medium + boxDim[1]/2), (center_x + boxDim[0]/2, y_medium + boxDim[1]/2),(center_x + boxDim[0]/2, y_medium - boxDim[1]/2), (0, 255, 0), 2)
err = center - center_x
if(abs(err) > deadzone):
speed = err*k_p
else:
speed = 0
#dt = 1.0/freq
#derr = err - past_err
#past_err = err
#sum_err += err*dt
#if(abs(err) > deadzone):
# speed = int(k_p*err + k_i*sum_err + k_d*(derr/dt))
#else:
# speed = 0
# sum_err = 0
#rospy.loginfo("Error: %d\nspeed: %d", err, speed)
cv.line(resized, (center_x, 0),(center_x, 480), (0,255,0), 2)
cv.imshow("Video", resized)
key = cv.waitKey(1)
if key == 27:
cap.release()
cv.destroyAllWindows
break
pub.publish(speed)
rate.sleep()
if __name__ == '__main__':
try:
servo_move_pub()
except rospy.ROSInterruptException:
pass
|
the-stack_106_21519
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
create YOLOv3 models with different backbone & head
"""
import warnings
from functools import partial
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from yolo3.models.yolo3_darknet import yolo3_body, custom_tiny_yolo3_body, yolo3lite_body, tiny_yolo3lite_body, custom_yolo3_spp_body
from yolo3.models.yolo3_mobilenet import yolo3_mobilenet_body, tiny_yolo3_mobilenet_body, yolo3lite_mobilenet_body, yolo3lite_spp_mobilenet_body, tiny_yolo3lite_mobilenet_body
from yolo3.models.yolo3_mobilenetv2 import yolo3_mobilenetv2_body, tiny_yolo3_mobilenetv2_body, yolo3lite_mobilenetv2_body, yolo3lite_spp_mobilenetv2_body, tiny_yolo3lite_mobilenetv2_body, yolo3_ultralite_mobilenetv2_body, tiny_yolo3_ultralite_mobilenetv2_body
from yolo3.models.yolo3_shufflenetv2 import yolo3_shufflenetv2_body, tiny_yolo3_shufflenetv2_body, yolo3lite_shufflenetv2_body, yolo3lite_spp_shufflenetv2_body, tiny_yolo3lite_shufflenetv2_body
from yolo3.models.yolo3_vgg16 import yolo3_vgg16_body, tiny_yolo3_vgg16_body
from yolo3.models.yolo3_xception import yolo3_xception_body, yolo3lite_xception_body, tiny_yolo3_xception_body, tiny_yolo3lite_xception_body, yolo3_spp_xception_body
from yolo3.models.yolo3_nano import yolo3_nano_body
from yolo3.models.yolo3_efficientnet import yolo3_efficientnet_body, tiny_yolo3_efficientnet_body, yolo3lite_efficientnet_body, yolo3lite_spp_efficientnet_body, tiny_yolo3lite_efficientnet_body
from yolo3.models.yolo3_mobilenetv3_large import yolo3_mobilenetv3large_body, yolo3lite_mobilenetv3large_body, tiny_yolo3_mobilenetv3large_body, tiny_yolo3lite_mobilenetv3large_body
from yolo3.models.yolo3_mobilenetv3_small import yolo3_mobilenetv3small_body, yolo3lite_mobilenetv3small_body, tiny_yolo3_mobilenetv3small_body, tiny_yolo3lite_mobilenetv3small_body, yolo3_ultralite_mobilenetv3small_body, tiny_yolo3_ultralite_mobilenetv3small_body
from yolo3.models.yolo3_peleenet import yolo3_peleenet_body, yolo3lite_peleenet_body, tiny_yolo3_peleenet_body, tiny_yolo3lite_peleenet_body, yolo3_ultralite_peleenet_body, tiny_yolo3_ultralite_peleenet_body
#from yolo3.models.yolo3_resnet50v2 import yolo3_resnet50v2_body, yolo3lite_resnet50v2_body, yolo3lite_spp_resnet50v2_body, tiny_yolo3_resnet50v2_body, tiny_yolo3lite_resnet50v2_body
from yolo4.models.yolo4_darknet import yolo4_body
from yolo4.models.yolo4_mobilenet import yolo4_mobilenet_body, yolo4lite_mobilenet_body, tiny_yolo4_mobilenet_body, tiny_yolo4lite_mobilenet_body
from yolo4.models.yolo4_mobilenetv2 import yolo4_mobilenetv2_body, yolo4lite_mobilenetv2_body, tiny_yolo4_mobilenetv2_body, tiny_yolo4lite_mobilenetv2_body
from yolo4.models.yolo4_mobilenetv3_large import yolo4_mobilenetv3large_body, yolo4lite_mobilenetv3large_body, tiny_yolo4_mobilenetv3large_body, tiny_yolo4lite_mobilenetv3large_body
from yolo4.models.yolo4_mobilenetv3_small import yolo4_mobilenetv3small_body, yolo4lite_mobilenetv3small_body, tiny_yolo4_mobilenetv3small_body, tiny_yolo4lite_mobilenetv3small_body
from yolo4.models.yolo4_efficientnet import yolo4_efficientnet_body, yolo4lite_efficientnet_body, tiny_yolo4_efficientnet_body, tiny_yolo4lite_efficientnet_body
#from yolo4.models.yolo4_resnet50v2 import yolo4_resnet50v2_body, yolo4lite_resnet50v2_body, tiny_yolo4_resnet50v2_body, tiny_yolo4lite_resnet50v2_body
from yolo3.loss import yolo3_loss
from yolo3.postprocess import batched_yolo3_postprocess, batched_yolo3_prenms, Yolo3PostProcessLayer
from common.model_utils import add_metrics, get_pruning_model
# A map of model type to construction info list for YOLOv3
#
# info list format:
# [model_function, backbone_length, pretrain_weight_path]
#
yolo3_model_map = {
'yolo3_mobilenet': [yolo3_mobilenet_body, 87, None],
'yolo3_mobilenet_lite': [yolo3lite_mobilenet_body, 87, None],
'yolo3_mobilenet_lite_spp': [yolo3lite_spp_mobilenet_body, 87, None],
'yolo3_mobilenetv2': [yolo3_mobilenetv2_body, 155, None],
'yolo3_mobilenetv2_lite': [yolo3lite_mobilenetv2_body, 155, None],
'yolo3_mobilenetv2_lite_spp': [yolo3lite_spp_mobilenetv2_body, 155, None],
'yolo3_mobilenetv2_ultralite': [yolo3_ultralite_mobilenetv2_body, 155, None],
'yolo3_mobilenetv3large': [yolo3_mobilenetv3large_body, 195, None],
'yolo3_mobilenetv3large_lite': [yolo3lite_mobilenetv3large_body, 195, None],
'yolo3_mobilenetv3small': [yolo3_mobilenetv3small_body, 166, None],
'yolo3_mobilenetv3small_lite': [yolo3lite_mobilenetv3small_body, 166, None],
'yolo3_mobilenetv3small_ultralite': [yolo3_ultralite_mobilenetv3small_body, 166, None],
'yolo3_peleenet': [yolo3_peleenet_body, 366, None],
'yolo3_peleenet_lite': [yolo3lite_peleenet_body, 366, None],
'yolo3_peleenet_ultralite': [yolo3_ultralite_peleenet_body, 366, None],
#'yolo3_resnet50v2': [yolo3_resnet50v2_body, 190, None],
#'yolo3_resnet50v2_lite': [yolo3lite_resnet50v2_body, 190, None],
#'yolo3_resnet50v2_lite_spp': [yolo3lite_spp_resnet50v2_body, 190, None],
'yolo3_shufflenetv2': [yolo3_shufflenetv2_body, 205, None],
'yolo3_shufflenetv2_lite': [yolo3lite_shufflenetv2_body, 205, None],
'yolo3_shufflenetv2_lite_spp': [yolo3lite_spp_shufflenetv2_body, 205, None],
# NOTE: backbone_length is for EfficientNetB3
# if change to other efficientnet level, you need to modify it
'yolo3_efficientnet': [yolo3_efficientnet_body, 382, None],
'yolo3_efficientnet_lite': [yolo3lite_efficientnet_body, 382, None],
'yolo3_efficientnet_lite_spp': [yolo3lite_spp_efficientnet_body, 382, None],
'yolo3_darknet': [yolo3_body, 185, 'weights/darknet53.h5'],
'yolo3_darknet_spp': [custom_yolo3_spp_body, 185, 'weights/yolov3-spp.h5'],
#Doesn't have pretrained weights, so no need to return backbone length
'yolo3_darknet_lite': [yolo3lite_body, 0, None],
'yolo3_vgg16': [yolo3_vgg16_body, 19, None],
'yolo3_xception': [yolo3_xception_body, 132, None],
'yolo3_xception_lite': [yolo3lite_xception_body, 132, None],
'yolo3_xception_spp': [yolo3_spp_xception_body, 132, None],
'yolo3_nano': [yolo3_nano_body, 268, None],
'yolo4_darknet': [yolo4_body, 250, 'weights/cspdarknet53.h5'],
'yolo4_mobilenet': [yolo4_mobilenet_body, 87, None],
'yolo4_mobilenet_lite': [yolo4lite_mobilenet_body, 87, None],
'yolo4_mobilenetv2': [yolo4_mobilenetv2_body, 155, None],
'yolo4_mobilenetv2_lite': [yolo4lite_mobilenetv2_body, 155, None],
'yolo4_mobilenetv3large': [yolo4_mobilenetv3large_body, 195, None],
'yolo4_mobilenetv3large_lite': [yolo4lite_mobilenetv3large_body, 195, None],
'yolo4_mobilenetv3small': [yolo4_mobilenetv3small_body, 166, None],
'yolo4_mobilenetv3small_lite': [yolo4lite_mobilenetv3small_body, 166, None],
#'yolo4_resnet50v2': [yolo4_resnet50v2_body, 190, None],
#'yolo4_resnet50v2_lite': [yolo4lite_resnet50v2_body, 190, None],
# NOTE: backbone_length is for EfficientNetB1
# if change to other efficientnet level, you need to modify it
'yolo4_efficientnet': [yolo4_efficientnet_body, 337, None],
'yolo4_efficientnet_lite': [yolo4lite_efficientnet_body, 337, None],
}
# A map of model type to construction info list for Tiny YOLOv3
#
# info list format:
# [model_function, backbone_length, pretrain_weight_file]
#
yolo3_tiny_model_map = {
'tiny_yolo3_mobilenet': [tiny_yolo3_mobilenet_body, 87, None],
'tiny_yolo3_mobilenet_lite': [tiny_yolo3lite_mobilenet_body, 87, None],
'tiny_yolo3_mobilenetv2': [tiny_yolo3_mobilenetv2_body, 155, None],
'tiny_yolo3_mobilenetv2_lite': [tiny_yolo3lite_mobilenetv2_body, 155, None],
'tiny_yolo3_mobilenetv2_ultralite': [tiny_yolo3_ultralite_mobilenetv2_body, 155, None],
'tiny_yolo3_mobilenetv3large': [tiny_yolo3_mobilenetv3large_body, 195, None],
'tiny_yolo3_mobilenetv3large_lite': [tiny_yolo3lite_mobilenetv3large_body, 195, None],
'tiny_yolo3_mobilenetv3small': [tiny_yolo3_mobilenetv3small_body, 166, None],
'tiny_yolo3_mobilenetv3small_lite': [tiny_yolo3lite_mobilenetv3small_body, 166, None],
'tiny_yolo3_mobilenetv3small_ultralite': [tiny_yolo3_ultralite_mobilenetv3small_body, 166, None],
'tiny_yolo3_peleenet': [tiny_yolo3_peleenet_body, 366, None],
'tiny_yolo3_peleenet_lite': [tiny_yolo3lite_peleenet_body, 366, None],
'tiny_yolo3_peleenet_ultralite': [tiny_yolo3_ultralite_peleenet_body, 366, None],
#'tiny_yolo3_resnet50v2': [tiny_yolo3_resnet50v2_body, 190, None],
#'tiny_yolo3_resnet50v2_lite': [tiny_yolo3lite_resnet50v2_body, 190, None],
'tiny_yolo3_shufflenetv2': [tiny_yolo3_shufflenetv2_body, 205, None],
'tiny_yolo3_shufflenetv2_lite': [tiny_yolo3lite_shufflenetv2_body, 205, None],
# NOTE: backbone_length is for EfficientNetB0
# if change to other efficientnet level, you need to modify it
'tiny_yolo3_efficientnet': [tiny_yolo3_efficientnet_body, 235, None],
'tiny_yolo3_efficientnet_lite': [tiny_yolo3lite_efficientnet_body, 235, None],
'tiny_yolo3_darknet': [custom_tiny_yolo3_body, 20, 'weights/yolov3-tiny.h5'],
#Doesn't have pretrained weights, so no need to return backbone length
'tiny_yolo3_darknet_lite': [tiny_yolo3lite_body, 0, None],
'tiny_yolo3_vgg16': [tiny_yolo3_vgg16_body, 19, None],
'tiny_yolo3_xception': [tiny_yolo3_xception_body, 132, None],
'tiny_yolo3_xception_lite': [tiny_yolo3lite_xception_body, 132, None],
'tiny_yolo4_mobilenet': [tiny_yolo4_mobilenet_body, 87, None],
'tiny_yolo4_mobilenet_lite': [tiny_yolo4lite_mobilenet_body, 87, None],
'tiny_yolo4_mobilenet_lite_nospp': [partial(tiny_yolo4lite_mobilenet_body, use_spp=False), 87, None],
'tiny_yolo4_mobilenetv2': [tiny_yolo4_mobilenetv2_body, 155, None],
'tiny_yolo4_mobilenetv2_lite': [tiny_yolo4lite_mobilenetv2_body, 155, None],
'tiny_yolo4_mobilenetv2_lite_nospp': [partial(tiny_yolo4lite_mobilenetv2_body, use_spp=False), 155, None],
'tiny_yolo4_mobilenetv3large': [tiny_yolo4_mobilenetv3large_body, 195, None],
'tiny_yolo4_mobilenetv3large_lite': [tiny_yolo4lite_mobilenetv3large_body, 195, None],
'tiny_yolo4_mobilenetv3large_lite_nospp': [partial(tiny_yolo4lite_mobilenetv3large_body, use_spp=False), 195, None],
'tiny_yolo4_mobilenetv3small': [tiny_yolo4_mobilenetv3small_body, 166, None],
'tiny_yolo4_mobilenetv3small_lite': [tiny_yolo4lite_mobilenetv3small_body, 166, None],
'tiny_yolo4_mobilenetv3small_lite_nospp': [partial(tiny_yolo4lite_mobilenetv3small_body, use_spp=False), 166, None],
#'tiny_yolo4_resnet50v2': [tiny_yolo4_resnet50v2_body, 190, None],
#'tiny_yolo4_resnet50v2_lite': [tiny_yolo4lite_resnet50v2_body, 190, None],
# NOTE: backbone_length is for EfficientNetB0
# if change to other efficientnet level, you need to modify it
'tiny_yolo4_efficientnet': [tiny_yolo4_efficientnet_body, 235, None],
'tiny_yolo4_efficientnet_lite': [tiny_yolo4lite_efficientnet_body, 235, None],
'tiny_yolo4_efficientnet_lite_nospp': [partial(tiny_yolo4lite_efficientnet_body, use_spp=False), 235, None],
}
def get_yolo3_model(model_type, num_feature_layers, num_anchors, num_classes, input_tensor=None, input_shape=None, model_pruning=False, pruning_end_step=10000):
#prepare input tensor
if input_shape:
input_tensor = Input(shape=input_shape, name='image_input')
if input_tensor is None:
input_tensor = Input(shape=(None, None, 3), name='image_input')
#Tiny YOLOv3 model has 6 anchors and 2 feature layers
if num_feature_layers == 2:
if model_type in yolo3_tiny_model_map:
model_function = yolo3_tiny_model_map[model_type][0]
backbone_len = yolo3_tiny_model_map[model_type][1]
weights_path = yolo3_tiny_model_map[model_type][2]
if weights_path:
model_body = model_function(input_tensor, num_anchors//2, num_classes, weights_path=weights_path)
else:
model_body = model_function(input_tensor, num_anchors//2, num_classes)
else:
raise ValueError('This model type is not supported now')
#YOLOv3 model has 9 anchors and 3 feature layers
elif num_feature_layers == 3:
if model_type in yolo3_model_map:
model_function = yolo3_model_map[model_type][0]
backbone_len = yolo3_model_map[model_type][1]
weights_path = yolo3_model_map[model_type][2]
if weights_path:
model_body = model_function(input_tensor, num_anchors//3, num_classes, weights_path=weights_path)
else:
model_body = model_function(input_tensor, num_anchors//3, num_classes)
else:
raise ValueError('This model type is not supported now')
else:
raise ValueError('model type mismatch anchors')
if model_pruning:
model_body = get_pruning_model(model_body, begin_step=0, end_step=pruning_end_step)
return model_body, backbone_len
def get_yolo3_train_model(model_type, anchors, num_classes, weights_path=None, freeze_level=1, optimizer=Adam(lr=1e-3, decay=0), label_smoothing=0, elim_grid_sense=False, model_pruning=False, pruning_end_step=10000):
'''create the training model, for YOLOv3'''
#K.clear_session() # get a new session
num_anchors = len(anchors)
#YOLOv3 model has 9 anchors and 3 feature layers but
#Tiny YOLOv3 model has 6 anchors and 2 feature layers,
#so we can calculate feature layers number to get model type
num_feature_layers = num_anchors//3
#feature map target value, so its shape should be like:
# [
# (image_height/32, image_width/32, 3, num_classes+5),
# (image_height/16, image_width/16, 3, num_classes+5),
# (image_height/8, image_width/8, 3, num_classes+5)
# ]
y_true = [Input(shape=(None, None, 3, num_classes+5), name='y_true_{}'.format(l)) for l in range(num_feature_layers)]
model_body, backbone_len = get_yolo3_model(model_type, num_feature_layers, num_anchors, num_classes, model_pruning=model_pruning, pruning_end_step=pruning_end_step)
print('Create {} {} model with {} anchors and {} classes.'.format('Tiny' if num_feature_layers==2 else '', model_type, num_anchors, num_classes))
print('model layer number:', len(model_body.layers))
if weights_path:
model_body.load_weights(weights_path, by_name=True)#, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_level in [1, 2]:
# Freeze the backbone part or freeze all but final feature map & input layers.
num = (backbone_len, len(model_body.layers)-3)[freeze_level-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
elif freeze_level == 0:
# Unfreeze all layers.
for i in range(len(model_body.layers)):
model_body.layers[i].trainable= True
print('Unfreeze all of the layers.')
model_loss, location_loss, confidence_loss, class_loss = Lambda(yolo3_loss, name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5, 'label_smoothing': label_smoothing, 'elim_grid_sense': elim_grid_sense})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
loss_dict = {'location_loss':location_loss, 'confidence_loss':confidence_loss, 'class_loss':class_loss}
add_metrics(model, loss_dict)
model.compile(optimizer=optimizer, loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
return model
def get_yolo3_inference_model(model_type, anchors, num_classes, weights_path=None, input_shape=None, confidence=0.1, iou_threshold=0.4, elim_grid_sense=False):
'''create the inference model, for YOLOv3'''
#K.clear_session() # get a new session
num_anchors = len(anchors)
#YOLOv3 model has 9 anchors and 3 feature layers but
#Tiny YOLOv3 model has 6 anchors and 2 feature layers,
#so we can calculate feature layers number to get model type
num_feature_layers = num_anchors//3
image_shape = Input(shape=(2,), dtype='int64', name='image_shape')
model_body, _ = get_yolo3_model(model_type, num_feature_layers, num_anchors, num_classes, input_shape=input_shape)
print('Create {} YOLOv3 {} model with {} anchors and {} classes.'.format('Tiny' if num_feature_layers==2 else '', model_type, num_anchors, num_classes))
if weights_path:
model_body.load_weights(weights_path, by_name=False)#, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
boxes, scores, classes = Lambda(batched_yolo3_postprocess, name='yolo3_postprocess',
arguments={'anchors': anchors, 'num_classes': num_classes, 'confidence': confidence, 'iou_threshold': iou_threshold, 'elim_grid_sense': elim_grid_sense})(
[*model_body.output, image_shape])
model = Model([model_body.input, image_shape], [boxes, scores, classes])
return model
|
the-stack_106_21520
|
import numpy as np
import os
from skimage.color import rgba2rgb
from skimage.transform import resize, rescale
LOOKUP = {'overflowed': 'bin_pos',
'bin other': 'bin_other',
'bin': 'bin_neg',
'ahead only': 'traffic_sign_ahead_only',
'caution children': 'traffic_sign_caution_children',
'crosswalk': 'traffic_sign_crosswalk',
'school crosswalk': 'traffic_sign_school_crosswalk',
'dead end': 'traffic_sign_dead_end',
'no parking': 'traffic_sign_no_parking',
'speed limit 25': 'traffic_sign_speed_limit_25',
'speed limit 30': 'traffic_sign_speed_limit_30',
'speed limit 35': 'traffic_sign_speed_limit_35',
'stop': 'traffic_sign_stop',
'stop ahead': 'traffic_sign_stop_ahead',
'lights': 'traffic_sign_traffic_lights',
'yield': 'traffic_sign_yield',
'trash object': 'trash_object',
'waste container': 'waste_container'
}
def class_lookup(cls_name):
inv_map = {v: k for k, v in LOOKUP.items()}
return inv_map.get(cls_name, cls_name)
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def crop_box(image, box, new_size):
if image.shape[-1] == 4:
image = rgba2rgb(image)
image = (image * 255).astype(np.uint8)
height, width, _ = image.shape
xmin, xmax, ymin, ymax = box[0], box[1], box[2], box[3]
x_size = xmax - xmin
y_size = ymax - ymin
if x_size < new_size:
xmin -= int((new_size - x_size) / 2)
xmax += int((new_size - x_size) / 2)
else:
# extend box
xmin = int(xmin - 0.01 * x_size)
xmax = int(xmax + 0.01 * x_size)
if y_size < new_size:
ymin -= int((new_size - y_size) / 2)
ymax += int((new_size - y_size) / 2)
else:
ymin = int(ymin - 0.01 * y_size)
ymax = int(ymax + 0.01 * y_size)
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(width, xmax)
ymax = min(height, ymax)
new_image = image.copy()[ymin: ymax, xmin: xmax]
new_image = resize(new_image, (new_size, new_size), preserve_range=True)
return new_image.astype(np.uint8)
def compute_class_weights(path_to_train_data):
weights = {}
for cls in os.listdir(path_to_train_data):
if os.path.isdir(os.path.join(path_to_train_data, cls)):
weights[cls] = len(os.listdir(os.path.join(path_to_train_data, cls)))
ref = max(weights.values())
for k, v in weights.items():
weights[k] = ref / v
return weights
def bifocal_view(img, coords=None):
height, width, _ = img.shape
img_left = img[:, :height]
img_right = img[:, width - height:]
coords_left = []
coords_right = []
if coords is not None:
for coord in coords:
class_, xmin, ymin, xmax, ymax = coord
if xmin < img_left.shape[1]:
coords_left.append([class_, xmin, ymin, min(xmax, height), ymax])
if xmax > width - height:
xmin = max(xmin - (width - height), 1)
xmax = xmax - (width - height)
coords_right.append([class_, xmin, ymin, xmax, ymax])
return img_left, img_right, coords_left, coords_right
def rescale_image(img, dimension):
if img.shape[-1] == 4:
img = rgba2rgb(img)
img = (img * 255).astype(np.uint8)
max_dim = max(img.shape[0], img.shape[1])
scaled_img = rescale(img, float(dimension) / max_dim, preserve_range=True)
shp = scaled_img.shape
left_pad = int(round(float((dimension - shp[0])) / 2))
right_pad = int(round(float(dimension - shp[0]) - left_pad))
top_pad = int(round(float((dimension - shp[1])) / 2))
bottom_pad = int(round(float(dimension - shp[1]) - top_pad))
pads = ((left_pad, right_pad), (top_pad, bottom_pad))
new_image = np.zeros((dimension, dimension, img.shape[-1]), dtype=np.float32)
for i in range(new_image.shape[-1]):
new_image[:, :, i] = np.lib.pad(scaled_img[:, :, i], pads, 'constant', constant_values=((0, 0), (0, 0)))
return new_image
def non_max_suppression(boxes, overlap_threshold):
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
picked_indices = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
picked_indices.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlap_threshold)[0])))
return picked_indices
|
the-stack_106_21521
|
"""Identify program versions used for analysis, reporting in structured table.
Catalogs the full list of programs used in analysis, enabling reproduction of
results and tracking of provenance in output files.
"""
from __future__ import print_function
import os
import contextlib
import subprocess
import sys
import yaml
import toolz as tz
from bcbio import utils
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.log import logger
_cl_progs = [{"cmd": "bamtofastq", "name": "biobambam",
"args": "--version", "stdout_flag": "This is biobambam2 version"},
{"cmd": "bamtools", "args": "--version", "stdout_flag": "bamtools"},
{"cmd": "bcftools", "stdout_flag": "Version:"},
{"cmd": "bedtools", "args": "--version", "stdout_flag": "bedtools"},
{"cmd": "bowtie2", "args": "--version", "stdout_flag": "bowtie2-align version"},
{"cmd": "bwa", "stdout_flag": "Version:"},
{"cmd": "cutadapt", "args": "--version"},
{"cmd": "fastqc", "args": "--version", "stdout_flag": "FastQC"},
{"cmd": "freebayes", "stdout_flag": "version:"},
{"cmd": "gemini", "args": "--version", "stdout_flag": "gemini "},
{"cmd": "novosort", "args": "--version"},
{"cmd": "novoalign", "stdout_flag": "Novoalign"},
{"cmd": "samtools", "stdout_flag": "Version:"},
{"cmd": "qualimap", "args": "-h", "stdout_flag": "QualiMap"},
{"cmd": "preseq", "stdout_flag": "preseq"},
{"cmd": "vcflib", "has_cl_version": False},
{"cmd": "featureCounts", "args": "-v", "stdout_flag": "featureCounts"}]
_manifest_progs = [
'bcbio-variation', 'bioconductor-bubbletree', 'cufflinks',
'cnvkit', 'fgbio', 'gatk4', 'hisat2', 'sailfish', 'salmon', 'grabix',
'htseq', 'lumpy-sv', 'manta', 'break-point-inspector', 'metasv', 'multiqc',
'seq2c', 'mirdeep2', 'oncofuse', 'picard', 'phylowgs', 'platypus-variant',
'rapmap', 'star', 'rtg-tools', 'sambamba', 'samblaster', 'scalpel',
'seqbuster', 'snpeff', 'vardict', 'vardict-java', 'varscan',
'ensembl-vep', 'vt', 'wham', 'umis']
def _broad_versioner(type):
def get_version(config):
from bcbio import broad
try:
runner = broad.runner_from_config(config)
except ValueError:
return ""
if type == "gatk":
return runner.get_gatk_version()
elif type == "mutect":
try:
runner = broad.runner_from_config(config, "mutect")
return runner.get_mutect_version()
except ValueError:
return ""
else:
raise NotImplementedError(type)
return get_version
def jar_versioner(program_name, jar_name):
"""Retrieve version information based on jar file.
"""
def get_version(config):
try:
pdir = config_utils.get_program(program_name, config, "dir")
# not configured
except ValueError:
return ""
jar = os.path.basename(config_utils.get_jar(jar_name, pdir))
for to_remove in [jar_name, ".jar", "-standalone"]:
jar = jar.replace(to_remove, "")
if jar.startswith(("-", ".")):
jar = jar[1:]
if not jar:
logger.warn("Unable to determine version for program '{}' from jar file {}".format(
program_name, config_utils.get_jar(jar_name, pdir)))
return jar
return get_version
def java_versioner(pname, jar_name, **kwargs):
def get_version(config):
try:
pdir = config_utils.get_program(pname, config, "dir")
except ValueError:
return ""
jar = config_utils.get_jar(jar_name, pdir)
kwargs["cmd"] = "java"
kwargs["args"] = "-Xms128m -Xmx256m -jar %s" % jar
return _get_cl_version(kwargs, config)
return get_version
_alt_progs = [{"name": "gatk", "version_fn": _broad_versioner("gatk")},
{"name": "mutect",
"version_fn": _broad_versioner("mutect")}]
def _parse_from_stdoutflag(stdout, x):
for line in (l.decode() for l in stdout):
if line.find(x) >= 0:
parts = [p for p in line[line.find(x) + len(x):].split() if p.strip()]
return parts[0].strip()
return ""
def _parse_from_parenflag(stdout, x):
for line in (l.decode() for l in stdout):
if line.find(x) >= 0:
return line.split("(")[-1].split(")")[0]
return ""
def _get_cl_version(p, config):
"""Retrieve version of a single commandline program.
"""
if not p.get("has_cl_version", True):
return ""
try:
prog = config_utils.get_program(p["cmd"], config)
except config_utils.CmdNotFound:
localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"])
if os.path.exists(localpy_cmd):
prog = localpy_cmd
else:
return ""
args = p.get("args", "")
cmd = "{prog} {args}"
subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
if p.get("stdout_flag"):
v = _parse_from_stdoutflag(stdout, p["stdout_flag"])
elif p.get("paren_flag"):
v = _parse_from_parenflag(stdout, p["paren_flag"])
else:
lines = [l.strip() for l in stdout.read().decode().split("\n") if l.strip()]
v = lines[-1]
if v.endswith("."):
v = v[:-1]
return v
def _get_versions(config=None):
"""Retrieve details on all programs available on the system.
"""
try:
from bcbio.pipeline import version
if hasattr(version, "__version__"):
bcbio_version = ("%s-%s" % (version.__version__, version.__git_revision__)
if version.__git_revision__ else version.__version__)
else:
bcbio_version = ""
except ImportError:
bcbio_version = ""
out = [{"program": "bcbio-nextgen", "version": bcbio_version}]
# get programs from the conda manifest, if available
manifest_dir = _get_manifest_dir(config)
manifest_vs = _get_versions_manifest(manifest_dir) if manifest_dir else []
if manifest_vs:
out += manifest_vs
programs = {x["program"] for x in out if x["version"]}
# get program versions from command line
for p in _cl_progs:
if p["cmd"] not in programs:
out.append({"program": p["cmd"],
"version": _get_cl_version(p, config)})
programs.add(p["cmd"])
for p in _alt_progs:
if p["name"] not in programs:
out.append({"program": p["name"],
"version": (p["version_fn"](config))})
programs.add(p["name"])
out.sort(key=lambda x: x["program"])
# remove entries with empty version strings
out = [x for x in out if x["version"]]
return out
def _get_manifest_dir(data=None, name=None):
"""
get manifest directory from the data dictionary, falling back on alternatives
it prefers, in order:
1. locating it from the bcbio_system.yaml file
2. locating it from the galaxy directory
3. location it from the python executable.
it can accept either the data or config dictionary
"""
manifest_dir = None
if data:
bcbio_system = tz.get_in(["config", "bcbio_system"], data, None)
bcbio_system = bcbio_system if bcbio_system else data.get("bcbio_system", None)
if bcbio_system:
sibling_dir = os.path.normpath(os.path.dirname(bcbio_system))
else:
sibling_dir = dd.get_galaxy_dir(data)
if sibling_dir:
manifest_dir = os.path.normpath(os.path.join(sibling_dir, os.pardir,
"manifest"))
if not manifest_dir or not os.path.exists(manifest_dir):
manifest_dir = os.path.join(config_utils.get_base_installdir(), "manifest")
if not os.path.exists(manifest_dir) and name:
manifest_dir = os.path.join(config_utils.get_base_installdir(name), "manifest")
return manifest_dir
def _get_versions_manifest(manifest_dir):
"""Retrieve versions from a pre-existing manifest of installed software.
"""
all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs]
if os.path.exists(manifest_dir):
out = []
for plist in ["toolplus", "python", "r", "debian", "custom"]:
pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist)
if os.path.exists(pkg_file):
logger.info(f"Retreiving program versions from {pkg_file}.")
with open(pkg_file) as in_handle:
pkg_info = yaml.safe_load(in_handle)
if not pkg_info:
continue
added = []
for pkg in all_pkgs:
if pkg in pkg_info:
added.append(pkg)
out.append({"program": pkg, "version": pkg_info[pkg]["version"]})
for x in added:
all_pkgs.remove(x)
out.sort(key=lambda x: x["program"])
for pkg in all_pkgs:
out.append({"program": pkg, "version": ""})
return out
def _get_program_file(dirs):
if dirs.get("work"):
base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance"))
return os.path.join(base_dir, "programs.txt")
def write_versions(dirs, config=None, is_wrapper=False):
"""Write CSV file with versions used in analysis pipeline.
"""
out_file = _get_program_file(dirs)
if is_wrapper:
assert utils.file_exists(out_file), "Failed to create program versions from VM"
elif out_file is None:
for p in _get_versions(config):
print("{program},{version}".format(**p))
else:
with open(out_file, "w") as out_handle:
for p in _get_versions(config):
program = p["program"]
version = p["version"]
out_handle.write(f"{program},{version}\n")
return out_file
def get_version_manifest(name, data=None, required=False):
"""Retrieve a version from the currently installed manifest.
"""
manifest_dir = _get_manifest_dir(data, name)
manifest_vs = _get_versions_manifest(manifest_dir) or []
for x in manifest_vs:
if x["program"] == name:
v = x.get("version", "")
if v:
return v
if required:
raise ValueError("Did not find %s in install manifest. Could not check version." % name)
return ""
def add_subparser(subparsers):
"""Add command line option for exporting version information.
"""
parser = subparsers.add_parser("version",
help="Export versions of used software to stdout or a file ")
parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt",
default=None)
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs)
else:
p = tz.get_in(["resources", "program_versions"], config)
if p:
with open(p) as in_handle:
for line in in_handle:
prog, version = line.rstrip().split(",")
if prog == name and version:
return version
raise KeyError("Version information not found for %s in %s" % (name, p))
|
the-stack_106_21522
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import unittest
import numpy as np
from test.aqua.common import QiskitAquaTestCase
from qiskit.aqua import run_algorithm
from qiskit.aqua.input import LinearSystemInput
from qiskit.aqua.algorithms import ExactLSsolver
class TestExactLSsolver(QiskitAquaTestCase):
def setUp(self):
super().setUp()
self.algo_input = LinearSystemInput()
self.algo_input.matrix = [[1, 2], [2, 1]]
self.algo_input.vector = [1, 2]
def test_els_via_run_algorithm_full_dict(self):
params = {
'algorithm': {
'name': 'ExactLSsolver'
},
'problem': {
'name': 'linear_system'
},
'input': {
'name': 'LinearSystemInput',
'matrix': self.algo_input.matrix,
'vector': self.algo_input.vector
}
}
result = run_algorithm(params)
np.testing.assert_array_almost_equal(result['solution'], [1, 0])
np.testing.assert_array_almost_equal(result['eigvals'], [3, -1])
def test_els_via_run_algorithm(self):
params = {
'algorithm': {
'name': 'ExactLSsolver'
},
'problem': {
'name': 'linear_system'
}
}
result = run_algorithm(params, self.algo_input)
np.testing.assert_array_almost_equal(result['solution'], [1, 0])
np.testing.assert_array_almost_equal(result['eigvals'], [3, -1])
def test_els_direct(self):
algo = ExactLSsolver(self.algo_input.matrix, self.algo_input.vector)
result = algo.run()
np.testing.assert_array_almost_equal(result['solution'], [1, 0])
np.testing.assert_array_almost_equal(result['eigvals'], [3, -1])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_21523
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import sys
import yaml
from ludwig.contrib import contrib_command
from ludwig.data.postprocessing import postprocess
from ludwig.globals import LUDWIG_VERSION, set_on_master, is_on_master
from ludwig.predict import predict
from ludwig.predict import print_prediction_results
from ludwig.predict import save_prediction_outputs
from ludwig.predict import save_prediction_statistics
from ludwig.train import full_train
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.print_utils import logging_level_registry
from ludwig.utils.print_utils import print_ludwig
def experiment(
model_definition,
model_definition_file=None,
data_csv=None,
data_train_csv=None,
data_validation_csv=None,
data_test_csv=None,
data_hdf5=None,
data_train_hdf5=None,
data_validation_hdf5=None,
data_test_hdf5=None,
train_set_metadata_json=None,
experiment_name='experiment',
model_name='run',
model_load_path=None,
model_resume_path=None,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=False,
skip_save_unprocessed_output=False,
output_directory='results',
gpus=None,
gpu_fraction=1.0,
use_horovod=False,
random_seed=default_random_seed,
debug=False,
**kwargs
):
"""Trains a model on a dataset's training and validation splits and
uses it to predict on the test split.
It saves the trained model and the statistics of training and testing.
:param model_definition: Model definition which defines the different
parameters of the model, features, preprocessing and training.
:type model_definition: Dictionary
:param model_definition_file: The file that specifies the model definition.
It is a yaml file.
:type model_definition_file: filepath (str)
:param data_csv: A CSV file contanining the input data which is used to
train, validate and test a model. The CSV either contains a
split column or will be split.
:type data_csv: filepath (str)
:param data_train_csv: A CSV file contanining the input data which is used
to train a model.
:type data_train_csv: filepath (str)
:param data_validation_csv: A CSV file contanining the input data which is used
to validate a model..
:type data_validation_csv: filepath (str)
:param data_test_csv: A CSV file contanining the input data which is used
to test a model.
:type data_test_csv: filepath (str)
:param data_hdf5: If the dataset is in the hdf5 format, this is used instead
of the csv file.
:type data_hdf5: filepath (str)
:param data_train_hdf5: If the training set is in the hdf5 format, this is
used instead of the csv file.
:type data_train_hdf5: filepath (str)
:param data_validation_hdf5: If the validation set is in the hdf5 format,
this is used instead of the csv file.
:type data_validation_hdf5: filepath (str)
:param data_test_hdf5: If the test set is in the hdf5 format, this is
used instead of the csv file.
:type data_test_hdf5: filepath (str)
:param train_set_metadata_json: If the dataset is in hdf5 format, this is
the associated json file containing metadata.
:type train_set_metadata_json: filepath (str)
:param experiment_name: The name for the experiment.
:type experiment_name: Str
:param model_name: Name of the model that is being used.
:type model_name: Str
:param model_load_path: If this is specified the loaded model will be used
as initialization (useful for transfer learning).
:type model_load_path: filepath (str)
:param model_resume_path: Resumes training of the model from the path
specified. The difference with model_load_path is that also training
statistics like the current epoch and the loss and performance so
far are also resumed effectively cotinuing a previously interrupted
training process.
:type model_resume_path: filepath (str)
:param skip_save_model: Disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation measure imrpvoes, but if the model is really big
that can be time consuming if you do not want to keep
the weights and just find out what performance can a model get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on.
:type skip_save_model: Boolean
:param skip_save_progress: Disables saving
progress each epoch. By default Ludwig saves weights and stats
after each epoch for enabling resuming of training, but if
the model is really big that can be time consuming and will uses
twice as much space, use this parameter to skip it, but training
cannot be resumed later on.
:type skip_save_progress: Boolean
:param skip_save_log: Disables saving TensorBoard
logs. By default Ludwig saves logs for the TensorBoard, but if it
is not needed turning it off can slightly increase the
overall speed..
:type skip_save_log: Boolean
:param skip_save_processed_input: If a CSV dataset is provided it is
preprocessed and then saved as an hdf5 and json to avoid running
the preprocessing again. If this parameter is False,
the hdf5 and json file are not saved.
:type skip_save_processed_input: Boolean
:param skip_save_unprocessed_output: By default predictions and
their probabilities are saved in both raw unprocessed numpy files
contaning tensors and as postprocessed CSV files
(one for each output feature). If this parameter is True,
only the CSV ones are saved and the numpy ones are skipped.
:type skip_save_unprocessed_output: Boolean
:param output_directory: The directory that will contanin the training
statistics, the saved model and the training procgress files.
:type output_directory: filepath (str)
:param gpus: List of GPUs that are available for training.
:type gpus: List
:param gpu_fraction: Fraction of the memory of each GPU to use at
the beginning of the training. The memory may grow elastically.
:type gpu_fraction: Integer
:param use_horovod: Flag for using horovod
:type use_horovod: Boolean
:param random_seed: Random seed used for weights initialization,
splits and any other random function.
:type random_seed: Integer
:param debug: If true turns on tfdbg with inf_or_nan checks.
:type debug: Boolean
"""
(
model,
preprocessed_data,
experiment_dir_name,
_,
model_definition
) = full_train(
model_definition,
model_definition_file=model_definition_file,
data_csv=data_csv,
data_train_csv=data_train_csv,
data_validation_csv=data_validation_csv,
data_test_csv=data_test_csv,
data_hdf5=data_hdf5,
data_train_hdf5=data_train_hdf5,
data_validation_hdf5=data_validation_hdf5,
data_test_hdf5=data_test_hdf5,
train_set_metadata_json=train_set_metadata_json,
experiment_name=experiment_name,
model_name=model_name,
model_load_path=model_load_path,
model_resume_path=model_resume_path,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
output_directory=output_directory,
gpus=gpus,
gpu_fraction=gpu_fraction,
use_horovod=use_horovod,
random_seed=random_seed,
debug=debug,
**kwargs
)
(training_set,
validation_set,
test_set,
train_set_metadata) = preprocessed_data
if test_set is not None:
if model_definition['training']['eval_batch_size'] > 0:
batch_size = model_definition['training']['eval_batch_size']
else:
batch_size = model_definition['training']['batch_size']
# predict
test_results = predict(
test_set,
train_set_metadata,
model,
model_definition,
batch_size,
evaluate_performance=True,
gpus=gpus,
gpu_fraction=gpu_fraction,
debug=debug
)
# postprocess
postprocessed_output = postprocess(
test_results,
model_definition['output_features'],
train_set_metadata,
experiment_dir_name,
skip_save_unprocessed_output or not is_on_master()
)
if is_on_master():
print_prediction_results(test_results)
save_prediction_outputs(postprocessed_output, experiment_dir_name)
save_prediction_statistics(test_results, experiment_dir_name)
model.close_session()
if is_on_master():
logging.info('\nFinished: {0}_{1}'.format(
experiment_name, model_name))
logging.info('Saved to: {}'.format(experiment_dir_name))
contrib_command("experiment_save", experiment_dir_name)
return experiment_dir_name
def cli(sys_argv):
parser = argparse.ArgumentParser(
description='This script trains and tests a model.',
prog='ludwig experiment',
usage='%(prog)s [options]'
)
# ----------------------------
# Experiment naming parameters
# ----------------------------
parser.add_argument(
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
parser.add_argument(
'--experiment_name',
type=str,
default='experiment',
help='experiment name'
)
parser.add_argument(
'--model_name',
type=str,
default='run',
help='name for the model'
)
# ---------------
# Data parameters
# ---------------
parser.add_argument(
'--data_csv',
help='input data CSV file. If it has a split column, it will be used '
'for splitting (0: train, 1: validation, 2: test), otherwise the '
'dataset will be randomly split'
)
parser.add_argument('--data_train_csv', help='input train data CSV file')
parser.add_argument(
'--data_validation_csv',
help='input validation data CSV file'
)
parser.add_argument('--data_test_csv', help='input test data CSV file')
parser.add_argument(
'--data_hdf5',
help='input data HDF5 file. It is an intermediate preprocess version of'
' the input CSV created the first time a CSV file is used in the '
'same directory with the same name and a hdf5 extension'
)
parser.add_argument(
'--data_train_hdf5',
help='input train data HDF5 file. It is an intermediate preprocess '
'version of the input CSV created the first time a CSV file is '
'used in the same directory with the same name and a hdf5 '
'extension'
)
parser.add_argument(
'--data_validation_hdf5',
help='input validation data HDF5 file. It is an intermediate preprocess'
' version of the input CSV created the first time a CSV file is '
'used in the same directory with the same name and a hdf5 '
'extension'
)
parser.add_argument(
'--data_test_hdf5',
help='input test data HDF5 file. It is an intermediate preprocess '
'version of the input CSV created the first time a CSV file is '
'used in the same directory with the same name and a hdf5 '
'extension'
)
parser.add_argument(
'--metadata_json',
help='input metadata JSON file. It is an intermediate preprocess file'
' containing the mappings of the input CSV created the first time '
'a CSV file is used in the same directory with the same name and a'
' json extension'
)
parser.add_argument(
'-sspi',
'--skip_save_processed_input',
help='skips saving intermediate HDF5 and JSON files',
action='store_true',
default=False
)
parser.add_argument(
'-ssuo',
'--skip_save_unprocessed_output',
help='skips saving intermediate NPY output files',
action='store_true',
default=False
)
# ----------------
# Model parameters
# ----------------
model_definition = parser.add_mutually_exclusive_group(required=True)
model_definition.add_argument(
'-md',
'--model_definition',
type=yaml.safe_load,
help='model definition'
)
model_definition.add_argument(
'-mdf',
'--model_definition_file',
help='YAML file describing the model. Ignores --model_hyperparameters'
)
parser.add_argument(
'-mlp',
'--model_load_path',
help='path of a pretrained model to load as initialization'
)
parser.add_argument(
'-mrp',
'--model_resume_path',
help='path of a the model directory to resume training of'
)
parser.add_argument(
'-ssm',
'--skip_save_model',
action='store_true',
default=False,
help='disables saving model weights and hyperparameters each time '
'the model imrpoves. '
'By default Ludwig saves model weights after each epoch '
'the validation measure imrpvoes, but if the model is really big '
'that can be time consuming if you do not want to keep '
'the weights and just find out what performance can a model get '
'with a set of hyperparameters, use this parameter to skip it,'
'but the model will not be loadable later on.'
)
parser.add_argument(
'-ssp',
'--skip_save_progress',
action='store_true',
default=False,
help='disables saving progress each epoch. By default Ludwig saves '
'weights and stats after each epoch for enabling resuming '
'of training, but if the model is really big that can be '
'time consuming and will uses twice as much space, use '
'this parameter to skip it, but training cannot be resumed '
'later on. '
)
parser.add_argument(
'-ssl',
'--skip_save_log',
action='store_true',
default=False,
help='disables saving TensorBoard logs. By default Ludwig saves '
'logs for the TensorBoard, but if it is not needed turning it off '
'can slightly increase the overall speed.'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-rs',
'--random_seed',
type=int,
default=42,
help='a random seed that is going to be used anywhere there is a call '
'to a random number generator: data splitting, parameter '
'initialization and training set shuffling'
)
parser.add_argument(
'-g',
'--gpus',
nargs='+',
type=int,
default=None,
help='list of GPUs to use'
)
parser.add_argument(
'-gf',
'--gpu_fraction',
type=float,
default=1.0,
help='fraction of gpu memory to initialize the process with'
)
parser.add_argument(
'-uh',
'--use_horovod',
action='store_true',
default=False,
help='uses horovod for distributed training'
)
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
logging.basicConfig(
stream=sys.stdout,
level=logging_level_registry[args.logging_level],
format='%(message)s'
)
set_on_master(args.use_horovod)
if is_on_master():
print_ludwig('Experiment', LUDWIG_VERSION)
experiment(**vars(args))
if __name__ == '__main__':
contrib_command("experiment", *sys.argv)
cli(sys.argv[1:])
|
the-stack_106_21524
|
import sys, os
try:
import Queue as Queue
except ImportError:
import queue as Queue
import multiprocessing
import threading
import zipfile
from xml.dom.minidom import parseString
from xml.sax.saxutils import escape
import datetime, time
import traceback
import inspect
import json
import jam.common as common
import jam.db.db_modules as db_modules
from jam.items import *
from jam.dataset import *
from jam.sql import *
from jam.execute import process_request, execute_sql
from jam.third_party.six import exec_, print_
from werkzeug._compat import iteritems, iterkeys, text_type, string_types, to_bytes, to_unicode
class ServerDataset(Dataset, SQL):
def __init__(self, table_name='', soft_delete=True):
Dataset.__init__(self)
self.ID = None
self.table_name = table_name
self.gen_name = None
self._order_by = []
self.values = None
self.on_open = None
self.on_apply = None
self.on_count = None
self.on_field_get_text = None
self.soft_delete = soft_delete
self.virtual_table = False
def copy(self, filters=True, details=True, handlers=True):
if self.master:
raise DatasetException(u'A detail item can not be copied: %s' % self.item_name)
result = self._copy(filters, details, handlers)
return result
def free(self):
try:
for d in self.details:
d.__dict__ = {}
for f in self.filters:
f.field = None
f.__dict__ = {}
self.filters.__dict__ = {}
self.__dict__ = {}
except:
pass
def _copy(self, filters=True, details=True, handlers=True):
result = super(ServerDataset, self)._copy(filters, details, handlers)
result.table_name = self.table_name
result.gen_name = self.gen_name
result._order_by = self._order_by
result.soft_delete = self.soft_delete
result._primary_key = self._primary_key
result._deleted_flag = self._deleted_flag
result._master_id = self._master_id
result._master_rec_id = self._master_rec_id
result._primary_key_db_field_name = self._primary_key_db_field_name
result._deleted_flag_db_field_name = self._deleted_flag_db_field_name
result._master_id_db_field_name = self._master_id_db_field_name
result._master_rec_id_db_field_name = self._master_rec_id_db_field_name
return result
def get_event(self, caption):
return getattr(caption)
def add_field(self, field_id, field_name, field_caption, data_type, required = False,
item = None, object_field = None, visible = True, index=0, edit_visible = True, edit_index = 0, read_only = False,
expand = False, word_wrap = False, size = 0, default_value=None, default = False, calculated = False, editable = False,
master_field = None, alignment=None, lookup_values=None, enable_typeahead=False, field_help=None,
field_placeholder=None, lookup_field1=None, lookup_field2=None, db_field_name=None, field_mask=None):
if db_field_name == None:
db_field_name = field_name.upper()
field_def = self.add_field_def(field_id, field_name, field_caption, data_type, required, item, object_field,
lookup_field1, lookup_field2, visible, index, edit_visible, edit_index, read_only, expand, word_wrap, size,
default_value, default, calculated, editable, master_field, alignment, lookup_values, enable_typeahead,
field_help, field_placeholder, field_mask, db_field_name)
field = DBField(self, field_def)
self._fields.append(field)
return field
def add_filter(self, name, caption, field_name, filter_type = common.FILTER_EQ,
multi_select_all=None, data_type = None, visible = True, filter_help=None,
filter_placeholder=None):
filter_def = self.add_filter_def(name, caption, field_name, filter_type,
multi_select_all, data_type, visible, filter_help, filter_placeholder)
fltr = DBFilter(self, filter_def)
self.filters.append(fltr)
return fltr
def do_internal_open(self, params):
return self.select_records(params)
def do_apply(self, params=None, safe=False):
if not self.master and self.log_changes:
changes = {}
self.change_log.get_changes(changes)
if changes['data']:
data, error = self.apply_changes((changes, params), safe)
if error:
raise Exception(error)
else:
self.change_log.update(data)
def add_detail(self, table):
detail = Detail(self.task, self, table.item_name, table.item_caption, table.table_name)
self.details.append(detail)
detail.owner = self
detail.init_fields()
return detail
def detail_by_name(self, caption):
for table in self.details:
if table.item_name == caption:
return table
def get_record_count(self, params, safe=False):
if safe and not self.can_view():
raise Exception(self.task.language('cant_view') % self.item_caption)
result = None
if self.task.on_count:
result = self.task.on_count(self, params)
if result is None and self.on_count:
result = self.on_count(self, params)
elif result is None:
error_mess = ''
count = 0
for sql in self.get_record_count_queries(params):
rows = self.task.execute_select(sql)
count += rows[0][0]
result = count, error_mess
return result
def select_records(self, params, safe=False):
if safe and not self.can_view():
raise Exception(self.task.language('cant_view') % self.item_caption)
result = None
if self.task.on_open:
result = self.task.on_open(self, params)
if result is None and self.on_open:
result = self.on_open(self, params)
elif result is None:
error_mes = ''
limit = params['__limit']
offset = params['__offset']
sqls = self.get_select_queries(params)
if len(sqls) == 1:
rows = self.task.execute_select(sqls[0])
else:
rows = []
cut = False
for sql in sqls:
rows += self.task.execute_select(sql)
if limit or offset:
if len(rows) >= offset + limit:
rows = rows[offset:offset + limit]
cut = True
break
if (limit or offset) and not cut:
rows = rows[offset:offset + limit]
result = rows, error_mes
return result
def apply_delta(self, delta, safe=False):
sql = delta.apply_sql(safe)
return self.task.execute(sql)
def apply_changes(self, data, safe):
result = None
changes, params = data
if not params:
params = {}
delta = self.delta(changes)
if self.task.on_apply:
result = self.task.on_apply(self, delta, params)
if result is None and self.on_apply:
result = self.on_apply(self, delta, params)
if result is None:
result = self.apply_delta(delta, safe)
return result
def apply_changes(self, data, safe):
self.abort(u'This is a demo version with limited functionality. \
You are not allowed to write changes to the database. \
The full-featured version is located in the demo folder of the Jam.py package.')
def update_deleted(self):
if self._is_delta and len(self.details):
rec_no = self.rec_no
try:
for it in self:
if it.rec_deleted():
for detail in self.details:
fields = []
for field in detail.fields:
fields.append(field.field_name)
det = self.task.item_by_name(detail.item_name).copy()
where = {
det._master_id: self.ID,
det._master_rec_id: self._primary_key_field.value
}
det.open(fields=fields, expanded=detail.expanded, where=where)
if det.record_count():
it.edit()
for d in det:
detail.append()
for field in detail.fields:
f = det.field_by_name(field.field_name)
field.set_value(f.value, f.lookup_value)
detail.post()
it.post()
for d in detail:
d.record_status = common.RECORD_DELETED
finally:
self.rec_no = rec_no
def field_by_id(self, id_value, field_name):
return self.get_field_by_id((id_value, field_name))
def get_field_by_id(self, params):
id_value, fields = params
if not (isinstance(fields, tuple) or isinstance(fields, list)):
fields = [fields]
copy = self.copy()
copy.set_where(id=id_value)
copy.open(fields=fields)
if copy.record_count() == 1:
result = []
for field_name in fields:
result.append(copy.field_by_name(field_name).value)
if len(fields) == 1:
return result[0]
else:
return result
return
class Item(AbstrItem, ServerDataset):
def __init__(self, task, owner, name, caption, visible = True,
table_name='', view_template='', js_filename='', soft_delete=True):
AbstrItem.__init__(self, task, owner, name, caption, visible, js_filename=js_filename)
ServerDataset.__init__(self, table_name, soft_delete)
self.item_type_id = None
self.reports = []
def get_reports_info(self):
result = []
for report in self.reports:
result.append(report.ID)
return result
class Param(DBField):
def __init__(self, owner, param_def):
DBField.__init__(self, owner, param_def)
self.field_kind = common.PARAM_FIELD
if self.data_type == common.TEXT:
self.field_size = 1000
else:
self.field_size = 0
self.param_name = self.field_name
self.param_caption = self.field_caption
self._value = None
self._lookup_value = None
setattr(owner, self.param_name, self)
def system_field(self):
return False
def get_data(self):
return self._value
def set_data(self, value):
self._value = value
def get_lookup_data(self):
return self._lookup_value
def set_lookup_data(self, value):
self._lookup_value = value
def _do_before_changed(self):
pass
def _change_lookup_field(self, lookup_value=None, slave_field_values=None):
pass
def copy(self, owner):
result = Param(owner, self.param_caption, self.field_name, self.data_type,
self.lookup_item, self.lookup_field, self.required,
self.edit_visible, self.alignment)
return result
class Report(AbstrReport):
def __init__(self, task, owner, name='', caption='', visible = True,
table_name='', view_template='', js_filename=''):
AbstrReport.__init__(self, task, owner, name, caption, visible, js_filename=js_filename)
self.param_defs = []
self.params = []
self.template = view_template
self.template_name = None
self.template_content = {}
self.ext = 'ods'
self.on_before_generate = None
self.on_generate = None
self.on_after_generate = None
self.on_parsed = None
self.on_before_save_report = None
self.on_before_append = None
self.on_after_append = None
self.on_before_edit = None
self.on_after_edit = None
self.on_before_open = None
self.on_after_open = None
self.on_before_post = None
self.on_after_post = None
self.on_before_delete = None
self.on_after_delete = None
self.on_before_cancel = None
self.on_after_cancel = None
self.on_before_apply = None
self.on_after_apply = None
self.on_before_scroll = None
self.on_after_scroll = None
self.on_filter_record = None
self.on_field_changed = None
self.on_filters_applied = None
self.on_before_field_changed = None
self.on_filter_value_changed = None
self.on_field_validate = None
self.on_field_get_text = None
def add_param(self, caption='', name='', data_type=common.INTEGER,
obj=None, obj_field=None, required=True, visible=True, alignment=None,
multi_select=None, multi_select_all=None, enable_typeahead=None, lookup_values=None,
param_help=None, param_placeholder=None):
param_def = self.add_param_def(caption, name, data_type, obj,
obj_field, required, visible, alignment, multi_select, multi_select_all,
enable_typeahead, lookup_values, param_help, param_placeholder)
param = Param(self, param_def)
self.params.append(param)
def add_param_def(self, param_caption='', param_name='', data_type=common.INTEGER,
lookup_item=None, lookup_field=None, required=True, visible=True,
alignment=0, multi_select=False, multi_select_all=False, enable_typeahead=False,
lookup_values=None, param_help=None,
param_placeholder=None):
param_def = [None for i in range(len(FIELD_DEF))]
param_def[FIELD_NAME] = param_name
param_def[NAME] = param_caption
param_def[FIELD_DATA_TYPE] = data_type
param_def[REQUIRED] = required
param_def[LOOKUP_ITEM] = lookup_item
param_def[LOOKUP_FIELD] = lookup_field
param_def[FIELD_EDIT_VISIBLE] = visible
param_def[FIELD_ALIGNMENT] = alignment
param_def[FIELD_MULTI_SELECT] = multi_select
param_def[FIELD_MULTI_SELECT_ALL] = multi_select_all
param_def[FIELD_ENABLE_TYPEAHEAD] = enable_typeahead
param_def[FIELD_LOOKUP_VALUES] = lookup_values
param_def[FIELD_HELP] = param_help
param_def[FIELD_PLACEHOLDER] = param_placeholder
self.param_defs.append(param_def)
return param_def
def prepare_params(self):
for param in self.params:
if param.lookup_item and type(param.lookup_item) == int:
param.lookup_item = self.task.item_by_ID(param.lookup_item)
if param.lookup_field and type(param.lookup_field) == int:
param.lookup_field = param.lookup_item._field_by_ID(param.lookup_field).field_name
if param.lookup_values and type(param.lookup_values) == int:
try:
param.lookup_values = self.task.lookup_lists[param.lookup_values]
except:
pass
def copy(self):
result = self.__class__(self.task, None, self.item_name, self.item_caption, self.visible,
'', self.template, '');
result.on_before_generate = self.on_before_generate
result.on_generate = self.on_generate
result.on_after_generate = self.on_after_generate
result.on_before_save_report = self.on_before_save_report
result.on_parsed = self.on_parsed
result.on_convert_report = self.owner.on_convert_report
result.param_defs = self.param_defs
result.template_content = self.template_content.copy()
result.template_name = self.template_name
for param_def in result.param_defs:
param = Param(result, param_def)
result.params.append(param)
result.prepare_params()
return result
def free(self):
for p in self.params:
p.field = None
self.__dict__ = {}
def print_report(self, param_values, url, ext=None, safe=False):
if safe and not self.can_view():
raise Exception(self.task.language('cant_view') % self.item_caption)
copy = self.copy()
copy.ext = ext
result = copy.generate(param_values, url, ext)
copy.free()
return result
def generate_file_name(self, ext=None):
if not ext:
ext = 'ods'
file_name = self.item_name + '_' + datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f') + '.' + ext
file_name = escape(file_name, {':': '-', '/': '_', '\\': '_'})
return os.path.abspath(os.path.join(self.task.work_dir, 'static', 'reports', file_name))
def generate(self, param_values, url, ext):
self.extension = ext
self.url = url
template = self.template
for i, param in enumerate(self.params):
param.set_data(param_values[i]);
if self.on_before_generate:
self.on_before_generate(self)
if template != self.template:
self.template_content = None
if self.template:
if not self.template_content:
self.parse_template()
if self.on_parsed:
self.on_parsed(self)
self.content_name = os.path.join(self.task.work_dir, 'reports', 'content%s.xml' % time.time())
self.content = open(self.content_name, 'wb')
try:
self.report_filename = self.generate_file_name()
file_name = os.path.basename(self.report_filename)
static_dir = os.path.dirname(self.report_filename)
if not os.path.exists(static_dir):
os.makedirs(static_dir)
self.content.write(self.template_content['header'])
self.content.write(self.template_content['columns'])
self.content.write(self.template_content['rows'])
if self.on_generate:
self.on_generate(self)
self.content.write(self.template_content['footer'])
self.save()
finally:
try:
if not self.content.closed:
self.content.close()
if os.path.exists(self.content_name):
os.remove(self.content_name)
except:
pass
if ext and (ext != 'ods'):
converted = False
if self.on_convert_report:
try:
self.on_convert_report(self)
converted = True
except:
pass
if not converted:
converted = self.task.convert_report(self, ext)
converted_file = self.report_filename.replace('.ods', '.' + ext)
if converted and os.path.exists(converted_file):
self.delete_report(self.report_filename)
file_name = file_name.replace('.ods', '.' + ext)
self.report_filename = os.path.join(self.task.work_dir, 'static', 'reports', file_name)
self.report_url = self.report_filename
if self.url:
self.report_url = os.path.join(self.url, 'static', 'reports', file_name)
else:
if self.on_generate:
self.on_generate(self)
if self.on_after_generate:
self.on_after_generate(self)
return self.report_url
def delete_report(self, file_name):
report_name = os.path.join(self.task.work_dir, 'static', 'reports', file_name)
os.remove(report_name)
def find(self, text, search, beg=None, end=None):
return to_bytes(text, 'utf-8').find(to_bytes(search, 'utf-8'), beg, end)
def rfind(self, text, search, beg=None, end=None):
return to_bytes(text, 'utf-8').rfind(to_bytes(search, 'utf-8'), beg, end)
def replace(self, text, find, replace):
return to_bytes(text, 'utf-8').replace(to_bytes(find, 'utf-8'), to_bytes(replace, 'utf-8'))
def parse_template(self):
if not os.path.isabs(self.template):
self.template_name = os.path.join(self.task.work_dir, 'reports', self.template)
else:
self.template_name = self.template
z = zipfile.ZipFile(self.template_name, 'r')
try:
data = z.read('content.xml')
finally:
z.close()
band_tags = []
bands = {}
colum_defs = []
header = ''
columns = ''
rows = ''
footer = ''
repeated_rows = None
if data:
dom = parseString(data)
try:
tables = dom.getElementsByTagName('table:table')
if len(tables) > 0:
table = tables[0]
for child in table.childNodes:
if child.nodeName == 'table:table-column':
repeated = child.getAttribute('table:number-columns-repeated')
if not repeated:
repeated = 1
colum_defs.append(['', repeated])
if child.nodeName == 'table:table-row':
repeated = child.getAttribute('table:number-rows-repeated')
if repeated and repeated.isdigit():
repeated_rows = to_bytes(repeated, 'utf-8')
for row_child in child.childNodes:
if row_child.nodeName == 'table:table-cell':
text = row_child.getElementsByTagName('text:p')
if text.length > 0:
band_tags.append(text[0].childNodes[0].nodeValue)
break
start = 0
columns_start = 0
for col in colum_defs:
start = self.find(data, '<table:table-column', start)
if columns_start == 0:
columns_start = start
end = self.find(data, '/>', start)
col_text = data[start: end + 2]
columns = to_bytes('%s%s' % (columns, col_text), 'utf-8')
col[0] = data[start: end + 2]
start = end + 2
columns_end = start
header = data[0:columns_start]
assert len(band_tags) > 0, 'No bands in the report template'
positions = []
start = 0
for tag in band_tags:
text = '>%s<' % tag
i = self.find(data, text)
i = self.rfind(data, '<table:table-row', start, i)
positions.append(i)
start = i
if repeated_rows and int(repeated_rows) > 1000:
i = self.find(data, repeated_rows)
i = self.rfind(data, '<table:table-row', start, i)
band_tags.append('$$$end_of_report')
positions.append(i)
rows = data[columns_end:positions[0]]
for i, tag in enumerate(band_tags):
start = positions[i]
try:
end = positions[i + 1]
except:
end = self.find(data, '</table:table>', start)
bands[tag] = self.replace(data[start: end], str(tag), '')
footer = data[end:len(data)]
self.template_content = {}
self.template_content['bands'] = bands
self.template_content['colum_defs'] = colum_defs
self.template_content['header'] = header
self.template_content['columns'] = columns
self.template_content['rows'] = rows
self.template_content['footer'] = footer
finally:
dom.unlink()
del(dom)
def hide_columns(self, col_list):
def convert_str_to_int(string):
s = string.upper()
base = ord('A')
mult = ord('Z') - base + 1
result = s
if type(s) == str:
result = 0
chars = []
for i in range(len(s)):
chars.append(s[i])
for i in range(len(chars) - 1, -1, -1):
result += (ord(chars[i]) - base + 1) * (mult ** (len(chars) - i - 1))
return result
def remove_repeated(col, repeated):
result = col
p = self.find(col, 'table:number-columns-repeated')
if p != -1:
r = self.find(col, str(repeated), p)
if r != -1:
for i in range(r, 100):
if col[i] in ("'", '"'):
result = self.replace(col, col[p:i+1], '')
break
return result
if self.template_content:
ints = []
for i in col_list:
ints.append(convert_str_to_int(i))
colum_defs = self.template_content['colum_defs']
columns = ''
index = 1
for col, repeated in colum_defs:
repeated = int(repeated)
if repeated > 1:
col = remove_repeated(col, repeated)
for i in range(repeated):
cur_col = col
if index in ints:
cur_col = cur_col[0:-2] + ' table:visibility="collapse"/>'
columns += cur_col
index += 1
self.template_content['colum_defs'] = colum_defs
self.template_content['columns'] = columns
def print_band(self, band, dic=None, update_band_text=None):
text = self.template_content['bands'][band]
if dic:
d = dic.copy()
for key, value in iteritems(d):
if type(value) in string_types:
d[key] = escape(value)
cell_start = 0
cell_start_tag = to_bytes('<table:table-cell', 'utf-8')
cell_type_tag = to_bytes('office:value-type="string"', 'utf-8')
calcext_type_tag = to_bytes('calcext:value-type="string"', 'utf-8')
start_tag = to_bytes('<text:p>', 'utf-8')
end_tag = to_bytes('</text:p>', 'utf-8')
while True:
cell_start = self.find(text, cell_start_tag, cell_start)
if cell_start == -1:
break
else:
start = self.find(text, start_tag, cell_start)
if start != -1:
end = self.find(text, end_tag, start + len(start_tag))
if end != -1:
text_start = start + len(start_tag)
text_end = end
cell_text = text[text_start:text_end]
cell_text_start = self.find(cell_text, to_bytes('%(', 'utf-8'), 0)
if cell_text_start != -1:
end = self.find(cell_text, to_bytes(')s', 'utf-8'), cell_text_start + 2)
if end != -1:
end += 2
val = cell_text[cell_text_start:end]
key = val[2:-2]
value = d.get(to_unicode(key, 'utf-8'))
if isinstance(value, DBField):
raise Exception('Report: "%s" band: "%s" key "%s" a field object is passed. Specify the value attribute.' % \
(self.item_name, band, key))
elif not value is None:
val = to_unicode(val, 'utf-8')
val = val % d
val = to_bytes(val, 'utf-8')
if type(value) == float:
val = self.replace(val, '.', common.DECIMAL_POINT)
else:
if not key in iterkeys(d):
print('Report: "%s" band: "%s" key "%s" not found in the dictionary' % \
(self.item_name, band, key))
cell_text = to_bytes('%s%s%s', 'utf-8') % (cell_text[:cell_text_start], val, cell_text[end:])
text = to_bytes('', 'utf-8').join([text[:text_start], cell_text, text[text_end:]])
if type(value) in (int, float):
start_text = text[cell_start:start]
office_value = value
start_text = self.replace(start_text, cell_type_tag, 'office:value-type="float" office:value="%s"' % office_value)
start_text = self.replace(start_text, calcext_type_tag, 'calcext:value-type="float"')
text = to_bytes('', 'utf-8').join([text[:cell_start], start_text, text[start:]])
cell_start += 1
if update_band_text:
text = update_band_text(text)
self.content.write(text)
def save(self):
self.content.close()
z = None
self.zip_file = None
try:
self.zip_file = zipfile.ZipFile(self.report_filename, 'w', zipfile.ZIP_DEFLATED)
z = zipfile.ZipFile(self.template_name, 'r')
if self.on_before_save_report:
self.on_before_save_report(self)
for file_name in z.namelist():
data = z.read(file_name)
if file_name == 'content.xml':
self.zip_file.write(self.content_name, file_name)
else:
self.zip_file.writestr(file_name, data)
finally:
if z:
z.close()
if self.zip_file:
self.zip_file.close()
def cur_to_str(self, value):
return common.cur_to_str(value)
def date_to_str(self, value):
return common.date_to_str(value)
def datetime_to_str(self, value):
return common.datetime_to_str(value)
def _set_modified(self, value):
pass
class Consts(object):
def __init__(self):
self.TEXT = common.TEXT
self.INTEGER = common.INTEGER
self.FLOAT = common.FLOAT
self.CURRENCY = common.CURRENCY
self.DATE = common.DATE
self.DATETIME = common.DATETIME
self.BOOLEAN = common.BOOLEAN
self.LONGTEXT = common.LONGTEXT
self.ITEM_FIELD = common.ITEM_FIELD
self.FILTER_FIELD = common.FILTER_FIELD
self.PARAM_FIELD = common.PARAM_FIELD
self.FILTER_EQ = common.FILTER_EQ
self.FILTER_NE = common.FILTER_NE
self.FILTER_LT = common.FILTER_LT
self.FILTER_LE = common.FILTER_LE
self.FILTER_GT = common.FILTER_GT
self.FILTER_GE = common.FILTER_GE
self.FILTER_IN = common.FILTER_IN
self.FILTER_NOT_IN = common.FILTER_NOT_IN
self.FILTER_RANGE = common.FILTER_RANGE
self.FILTER_ISNULL = common.FILTER_ISNULL
self.FILTER_EXACT = common.FILTER_EXACT
self.FILTER_CONTAINS = common.FILTER_CONTAINS
self.FILTER_STARTWITH = common.FILTER_STARTWITH
self.FILTER_ENDWITH = common.FILTER_ENDWITH
self.FILTER_CONTAINS_ALL = common.FILTER_CONTAINS_ALL
self.ALIGN_LEFT = common.ALIGN_LEFT
self.ALIGN_CENTER = common.ALIGN_CENTER
self.ALIGN_RIGHT = common.ALIGN_RIGHT
self.STATE_INACTIVE = common.STATE_INACTIVE
self.STATE_BROWSE = common.STATE_BROWSE
self.STATE_INSERT = common.STATE_INSERT
self.STATE_EDIT = common.STATE_EDIT
self.STATE_DELETE = common.STATE_DELETE
self.RECORD_UNCHANGED = common.RECORD_UNCHANGED
self.RECORD_INSERTED = common.RECORD_INSERTED
self.RECORD_MODIFIED = common.RECORD_MODIFIED
self.RECORD_DETAILS_MODIFIED = common.RECORD_DETAILS_MODIFIED
self.RECORD_DELETED = common.RECORD_DELETED
class ConCounter(object):
def __init__(self):
self.val = 0
class AbstractServerTask(AbstrTask):
def __init__(self, app, name, caption, js_filename, db_type, db_server = '',
db_database = '', db_user = '', db_password = '', host='', port='',
encoding='', con_pool_size=1, mp_pool=False, persist_con=False):
AbstrTask.__init__(self, None, None, None, None)
self.app = app
self.consts = Consts()
self.items = []
self.lookup_lists = {}
self.ID = None
self.item_name = name
self.item_caption = caption
self.js_filename = js_filename
self.db_type = db_type
self.db_server = db_server
self.db_database = db_database
self.db_user = db_user
self.db_password = db_password
self.db_host = host
self.db_port = port
self.db_encoding = encoding
self.db_module = db_modules.get_db_module(self.db_type)
self.on_before_request = None
self.on_after_request = None
self.on_open = None
self.on_apply = None
self.on_count = None
self.work_dir = os.getcwd()
self.con_pool_size = 0
self.mod_count = 0
self.modules = []
self.conversion_lock = threading.Lock()
self.con_pool_size = con_pool_size
self.mp_pool = mp_pool
self.persist_con = persist_con
self.con_counter = ConCounter()
#~ self.persist_con_busy = 0
if self.mp_pool:
if self.persist_con:
self.create_connection_pool(1)
self.create_mp_connection_pool(self.con_pool_size)
else:
self.create_connection_pool(self.con_pool_size)
def get_version(self):
return common.SETTINGS['VERSION']
version = property (get_version)
def create_connection_pool(self, con_count):
self.queue = Queue.Queue()
pid = None
for i in range(con_count):
p = threading.Thread(target=process_request, args=(pid, self.item_name,
self.queue, self.db_type, self.db_server, self.db_database, self.db_user,
self.db_password, self.db_host, self.db_port,
self.db_encoding, self.mod_count))
p.daemon = True
p.start()
def create_mp_connection_pool(self, con_count):
self.mp_queue = multiprocessing.Queue()
self.mp_manager = multiprocessing.Manager()
pid = os.getpid()
for i in range(con_count):
p = multiprocessing.Process(target=process_request, args=(pid, self.item_name,
self.mp_queue, self.db_type, self.db_server, self.db_database, self.db_user,
self.db_password, self.db_host, self.db_port,
self.db_encoding, self.mod_count))
p.daemon = True
p.start()
def create_connection(self):
return self.db_module.connect(self.db_database, self.db_user, \
self.db_password, self.db_host, self.db_port, self.db_encoding, self.db_server)
def send_to_pool(self, queue, result_queue, command, params=None, call_proc=False, select=False):
request = {}
request['queue'] = result_queue
request['command'] = command
request['params'] = params
request['call_proc'] = call_proc
request['select'] = select
request['mod_count'] = self.mod_count
queue.put(request)
return result_queue.get()
def execute_in_pool(self, command, params=None, call_proc=False, select=False):
result_queue = Queue.Queue()
result = self.send_to_pool(self.queue, result_queue, command, params, call_proc, select)
return result
def execute_in_mp_poll(self, command, params=None, call_proc=False, select=False):
result_queue = self.mp_manager.Queue()
result = self.send_to_pool(self.mp_queue, result_queue, command, params, call_proc, select)
return result
def execute(self, command, params=None, call_proc=False, select=False):
if self.mp_pool:
if self.persist_con and not self.con_counter.val:
self.con_counter.val += 1
try:
result = self.execute_in_pool(command, params, call_proc, select)
finally:
self.con_counter.val -= 1
else:
result = self.execute_in_mp_poll(command, params, call_proc, select)
else:
result = self.execute_in_pool(command, params, call_proc, select)
return result
def callproc(self, command, params=None):
result_set, error = self.execute(command, params, call_proc=True)
if not error:
return result_set
def execute_select(self, command, params=None):
result, error = self.execute(command, params, select=True)
if error:
raise Exception(error)
else:
return result
def get_module_name(self):
return str(self.item_name)
def compile_item(self, item):
item.module_name = None
code = item.server_code
item.module_name = item.get_module_name()
item_module = type(sys)(item.module_name)
item_module.__dict__['this'] = item
sys.modules[item.module_name] = item_module
item.task.modules.append(item.module_name)
if item.owner:
sys.modules[item.owner.get_module_name()].__dict__[item.module_name] = item_module
if code:
try:
code = to_bytes(code, 'utf-8')
except Exception as e:
print(e)
comp_code = compile(code, item.module_name, "exec")
exec_(comp_code, item_module.__dict__)
item_module.__dict__['__loader__'] = item._loader
funcs = inspect.getmembers(item_module, inspect.isfunction)
item._events = []
for func_name, func in funcs:
item._events.append((func_name, func))
setattr(item, func_name, func)
del code
def add_item(self, item):
self.items.append(item)
item.owner = self
return item
def find_item(self, g_index, i_index):
return self.items[g_index].items[i_index]
def convert_report(self, report, ext):
converted = False
with self.conversion_lock:
try:
from subprocess import Popen, STDOUT, PIPE
if os.name == "nt":
import _winreg
regpath = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\soffice.exe"
root = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, regpath)
s_office = _winreg.QueryValue(root, "")
else:
s_office = "soffice"
convertion = Popen([s_office, '--headless', '--convert-to', ext,
# convertion = Popen([s_office, '--headless', '--convert-to', '--norestore', ext,
report.report_filename, '--outdir', os.path.join(self.work_dir, 'static', 'reports') ],
stderr=STDOUT,stdout=PIPE)#, shell=True)
out, err = convertion.communicate()
converted = True
except Exception as e:
print(e)
return converted
class DebugException(Exception):
pass
class Task(AbstractServerTask):
def __init__(self, app, name, caption, js_filename,
db_type, db_server = '', db_database = '', db_user = '', db_password = '',
host='', port='', encoding='', con_pool_size=4, mp_pool=True,
persist_con=True):
AbstractServerTask.__init__(self, app, name, caption, js_filename,
db_type, db_server, db_database, db_user, db_password,
host, port, encoding, con_pool_size, mp_pool, persist_con)
self.on_created = None
self.on_login = None
self.on_ext_request = None
self.compress_history = True
self.init_dict = {}
for key, value in iteritems(self.__dict__):
self.init_dict[key] = value
def get_safe_mode(self):
return self.app.admin.safe_mode
safe_mode = property (get_safe_mode)
def drop_indexes(self):
from jam.adm_server import drop_indexes_sql
sqls = drop_indexes_sql(self.app.admin)
for s in sqls:
try:
self.execute(s)
except:
pass
def restore_indexes(self):
from jam.adm_server import restore_indexes_sql
sqls = restore_indexes_sql(self.app.admin)
for s in sqls:
try:
self.execute(s)
except:
pass
def copy_database(self, dbtype, database=None, user=None, password=None,
host=None, port=None, encoding=None, server=None, limit = 4096):
def convert_sql(item, sql, db_module):
new_case = item.task.db_module.identifier_case
old_case = db_module.identifier_case
if old_case('a') != new_case('a'):
if new_case(item.table_name) == item.table_name:
sql = sql.replace(item.table_name, old_case(item.table_name))
for field in item.fields:
if new_case(field.db_field_name) == field.db_field_name and \
not field.db_field_name.upper() in common.SQL_KEYWORDS:
field_name = '"%s"' % field.db_field_name
sql = sql.replace(field_name, old_case(field_name))
return sql
print('copying started')
connection = None
db_module = db_modules.get_db_module(dbtype)
print('copying droping indexes')
self.drop_indexes()
if hasattr(self.db_module, 'set_foreign_keys'):
self.execute(self.db_module.set_foreign_keys(False))
try:
for group in self.items:
for it in group.items:
if it.item_type != 'report':
item = it.copy(handlers=False, filters=False, details=False)
if item.table_name and not item.virtual_table:
print('copying table %s' % item.item_name)
params = {'__expanded': False, '__offset': 0, '__limit': 0, '__filters': []}
rec_count, mess = item.get_record_count(params)
sql = item.get_record_count_query(params, db_module)
sql = convert_sql(item, sql, db_module)
connection, (result, error) = \
execute_sql(db_module, server, database, user, password,
host, port, encoding, connection, sql,
params=None, select=True)
record_count = result[0][0]
loaded = 0
max_id = 0
item.open(expanded=False, open_empty=True)
if record_count and rec_count != record_count:
self.execute('DELETE FROM "%s"' % item.table_name)
while True:
params = {'__expanded': False, '__offset': loaded, '__limit': limit, '__fields': [], '__filters': []}
sql = item.get_select_statement(params, db_module)
sql = convert_sql(item, sql, db_module)
connection, (result, error) = \
execute_sql(db_module, server, database, user, password,
host, port, encoding, connection, sql,
params=None, select=True)
if not error:
for i, r in enumerate(result):
item.append()
j = 0
for field in item.fields:
if not field.master_field:
field.set_data(r[j])
j += 1
if item._primary_key and item._primary_key_field.value > max_id:
max_id = item._primary_key_field.value
item.post()
item.apply()
else:
raise Exception(error)
records = len(result)
loaded += records
print('copying table %s: %d%%' % (item.item_name, int(loaded * 100 / record_count)))
if records == 0 or records < limit:
break
if item.gen_name:
sql = self.db_module.restart_sequence_sql(item.gen_name, max_id + 1)
self.execute(sql)
finally:
print('copying restoring indexes')
self.restore_indexes()
if hasattr(self.db_module, 'set_foreign_keys'):
self.execute(self.db_module.set_foreign_keys(True))
print('copying finished')
class AdminTask(AbstractServerTask):
def __init__(self, app, name, caption, js_filename,
db_type, db_server = '', db_database = '', db_user = '', db_password = '',
host='', port='', encoding=''):
AbstractServerTask.__init__(self, app, name, caption, js_filename,
db_type, db_server, db_database, db_user, db_password, host, port, encoding, 2)
filepath, filename = os.path.split(__file__)
self.cur_path = filepath
self.edited_docs = []
def create_task(self):
from jam.adm_server import create_task
return create_task(self.app)
def reload_task(self):
from jam.adm_server import reload_task
reload_task(self)
def update_events_code(self):
from jam.adm_server import update_events_code
update_events_code(self)
class Group(AbstrGroup):
def __init__(self, task, owner, name, caption, view_template=None, js_filename=None, visible=True, item_type_id=0):
AbstrGroup.__init__(self, task, owner, name, caption, visible, item_type_id, js_filename)
self.ID = None
self.view_template = view_template
self.js_filename = js_filename
if item_type_id == common.REPORTS_TYPE:
self.on_convert_report = None
def add_catalog(self, name, caption, table_name, visible=True, view_template='', js_filename='', soft_delete=True):
result = Item(self.task, self, name, caption, visible, table_name, view_template, js_filename, soft_delete)
result.item_type_id = common.ITEM_TYPE
return result
def add_table(self, name, caption, table_name, visible=True, view_template='', js_filename='', soft_delete=True):
result = Item(self.task, self, name, caption, visible, table_name, view_template, js_filename, soft_delete)
result.item_type_id = common.TABLE_TYPE
return result
def add_report(self, name, caption, table_name, visible=True, view_template='', js_filename='', soft_delete=True):
result = Report(self.task, self, name, caption, visible, table_name, view_template, js_filename)
result.item_type_id = common.REPORT_TYPE
return result
class Detail(AbstrDetail, ServerDataset):
def __init__(self, task, owner, name, caption, table_name):
AbstrDetail.__init__(self, task, owner, name, caption, True)
ServerDataset.__init__(self, table_name)
self.prototype = self.task.item_by_name(self.item_name)
self.master = owner
def init_fields(self):
self.field_defs = []
for field_def in self.prototype.field_defs:
self.field_defs.append(list(field_def))
for field_def in self.field_defs:
field = DBField(self, field_def)
self._fields.append(field)
self._primary_key = self.prototype._primary_key
self._deleted_flag = self.prototype._deleted_flag
self._master_id = self.prototype._master_id
self._master_rec_id = self.prototype._master_rec_id
def do_internal_post(self):
return {'success': True, 'id': None, 'message': '', 'detail_ids': None}
def where_clause(self, query, db_module):
master_id = query['__master_id']
master_rec_id = query['__master_rec_id']
if master_id and master_rec_id:
result = super(Detail, self).where_clause(query, db_module)
if self._master_id:
clause = '%s."%s"=%s AND %s."%s"=%s' % \
(self.table_alias(), self._master_id_db_field_name, str(master_id),
self.table_alias(), self._master_rec_id_db_field_name, str(master_rec_id))
else:
clause = '%s."%s"=%s' % \
(self.table_alias(), self._master_rec_id_db_field_name, str(master_rec_id))
if result:
result += ' AND ' + clause
else:
result = ' WHERE ' + clause
return result
else:
raise Exception('Invalid request parameter')
def get_filters(self):
return self.prototype.filters
def get_reports_info(self):
return []
|
the-stack_106_21525
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import os
import sys
from pkg_resources import resource_listdir, resource_string
from trac.loader import load_components
from trac.test import EnvironmentStub, Mock, MockPerm
from trac.util.text import printout
from trac.web.chrome import web_context
from trac.web.href import Href
from trac.wiki.formatter import Formatter
from trac.wiki.model import WikiPage
TURN_ON = '\033[30m\033[41m'
TURN_OFF = '\033[m'
class DefaultWikiChecker(Formatter):
def __init__(self, env, context, name):
Formatter.__init__(self, env, context)
self.__name = name
self.__marks = []
self.__super = super(DefaultWikiChecker, self)
def handle_match(self, fullmatch):
rv = self.__super.handle_match(fullmatch)
if rv:
if not isinstance(rv, basestring):
text = unicode(rv)
else:
text = rv
if text.startswith('<a ') and text.endswith('</a>') and \
'class="missing ' in text:
self.__marks.append((fullmatch.start(0), fullmatch.end(0)))
return rv
def handle_code_block(self, line, startmatch=None):
prev_processor = getattr(self, 'code_processor', None)
try:
return self.__super.handle_code_block(line, startmatch)
finally:
processor = self.code_processor
if startmatch and processor and processor != prev_processor and \
processor.error:
self.__marks.append((startmatch.start(0), startmatch.end(0)))
def format(self, text, out=None):
return self.__super.format(SourceWrapper(self, text), out)
def next_callback(self, line, idx):
marks = self.__marks
if marks:
buf = []
prev = 0
for start, end in self.__marks:
buf.append(line[prev:start])
buf.append(TURN_ON)
buf.append(line[start:end])
buf.append(TURN_OFF)
prev = end
buf.append(line[prev:])
printout('%s:%d:%s' % (self.__name, idx + 1, ''.join(buf)))
self.__marks[:] = ()
class SourceWrapper(object):
def __init__(self, formatter, text):
self.formatter = formatter
self.text = text
def __iter__(self):
return LinesIterator(self.formatter, self.text.splitlines())
class LinesIterator(object):
def __init__(self, formatter, lines):
self.formatter = formatter
self.lines = lines
self.idx = 0
self.current = None
def next(self):
idx = self.idx
if self.current is not None:
self.formatter.next_callback(self.current, idx)
if idx >= len(self.lines):
self.current = None
raise StopIteration
self.idx = idx + 1
self.current = self.lines[idx]
return self.current
class DummyIO(object):
def write(self, data):
pass
def parse_args():
from optparse import OptionParser
parser = OptionParser(usage='Usage: %prog [options] [PAGES...]')
parser.add_option('-d', '--download', dest='download', default=False,
action='store_true',
help='Download default pages from trac.edgewall.org '
'before checking')
parser.add_option('-p', '--prefix', dest='prefix', default='',
help='Prepend "prefix/" to the page when downloading')
return parser.parse_args()
def download_default_pages(names, prefix):
from httplib import HTTPSConnection
host = 'trac.edgewall.org'
if prefix and not prefix.endswith('/'):
prefix += '/'
conn = HTTPSConnection(host)
for name in names:
if name in ('SandBox', 'TitleIndex', 'WikiStart'):
continue
sys.stdout.write('Downloading %s%s' % (prefix, name))
conn.request('GET', '/wiki/%s%s?format=txt' % (prefix, name))
response = conn.getresponse()
content = response.read()
if prefix and (response.status != 200 or not content):
sys.stdout.write(' %s' % name)
conn.request('GET', '/wiki/%s?format=txt' % name)
response = conn.getresponse()
content = response.read()
if response.status == 200 and content:
with open('trac/wiki/default-pages/' + name, 'w') as f:
lines = content.replace('\r\n', '\n').splitlines(True)
f.write(''.join(line for line in lines
if line.strip() != '[[TranslatedPages]]'))
sys.stdout.write('\tdone.\n')
else:
sys.stdout.write('\tmissing or empty.\n')
conn.close()
def main():
options, args = parse_args()
names = sorted(name for name in resource_listdir('trac.wiki',
'default-pages')
if not name.startswith('.'))
if args:
args = sorted(set(names) & set(map(os.path.basename, args)))
else:
args = names
if options.download:
download_default_pages(args, options.prefix)
env = EnvironmentStub(disable=['trac.mimeview.pygments.*'])
load_components(env)
with env.db_transaction:
for name in names:
wiki = WikiPage(env, name)
wiki.text = resource_string('trac.wiki', 'default-pages/' +
name).decode('utf-8')
if wiki.text:
wiki.save('trac', '')
else:
printout('%s: Skipped empty page' % name)
req = Mock(href=Href('/'), abs_href=Href('http://localhost/'),
perm=MockPerm())
for name in args:
wiki = WikiPage(env, name)
if not wiki.exists:
continue
context = web_context(req, wiki.resource)
out = DummyIO()
DefaultWikiChecker(env, context, name).format(wiki.text, out)
if __name__ == '__main__':
main()
|
the-stack_106_21526
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import ad_group_criterion_simulation
from google.ads.googleads.v7.services.types import ad_group_criterion_simulation_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupCriterionSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupCriterionSimulationService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group_criterion_simulation: gapic_v1.method.wrap_method(
self.get_ad_group_criterion_simulation,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_ad_group_criterion_simulation(self) -> typing.Callable[
[ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest],
ad_group_criterion_simulation.AdGroupCriterionSimulation]:
raise NotImplementedError
__all__ = (
'AdGroupCriterionSimulationServiceTransport',
)
|
the-stack_106_21527
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2018-2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
set_node_times,
)
class DisconnectBanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban("127.0.0.1", "add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
self.bump_mocktime(2)
set_node_times(self.nodes, self.mocktime)
wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
|
the-stack_106_21532
|
# --------------------------------------------------------
# DaSiamRPN
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import cv2
import torch
import numpy as np
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def im_to_numpy(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # H*W*C
return img
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
return img
def torch_to_img(img):
img = to_numpy(torch.squeeze(img, 0))
img = np.transpose(img, (1, 2, 0)) # H*W*C
return img
def get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch', new=False):
"""
获取图像及目标信息
:param im:
:param pos:
:param model_sz:
:param original_sz:
:param avg_chans:
:param out_mode='torch':
:param new=False:
"""
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz+1) / 2
context_xmin = round(pos[0] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[1] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
# 获得背景坐标(考虑到可能出界了,所以加上pad的值,完成截取)
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
# zzp: a more easy speed version
# 如果需要填充,首先初始化te_im,再进行对应位置赋值,最后赋给im_patch_original
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):#如果四边有不在图像im中的,则直接利用im的均值进行填充
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8) # 0 is better than 1 initialization
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else: #如果以pos为中心,original_sz为边长的正方形在图像im中,则直接进行截取
im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
# 如果原始图像块大小与模型输入不同则调用 OpenCV 函数
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz)) # zzp: use cv to get a better speed
else:
im_patch = im_patch_original
return im_to_torch(im_patch) if out_mode in 'torch' else im_patch
def cxy_wh_2_rect(pos, sz):
return np.array([pos[0]-sz[0]/2, pos[1]-sz[1]/2, sz[0], sz[1]]) # 0-index
def rect_2_cxy_wh(rect):
return np.array([rect[0]+rect[2]/2, rect[1]+rect[3]/2]), np.array([rect[2], rect[3]]) # 0-index
def get_axis_aligned_bbox(region):
try:
region = np.array([region[0][0][0], region[0][0][1], region[0][1][0], region[0][1][1],
region[0][2][0], region[0][2][1], region[0][3][0], region[0][3][1]])
except:
region = np.array(region)
cx = np.mean(region[0::2]) # region[0::2]:从下标为0的地方开始,到结束,步长为2 np.mean:求平均值
cy = np.mean(region[1::2])
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
# linalg=linear(线性)+algebra(代数)
# 求范数,默认为L_2范数,即所有值的平方和再开方
# A1=bbox的实际面积
A1 = np.linalg.norm(region[0:2] - region[2:4]) * np.linalg.norm(region[2:4] - region[4:6]) #region[0:2]:表示下标从0开始到2截止,不包括2
A2 = (x2 - x1) * (y2 - y1) # A2:为完全框住region的矩形面积
s = np.sqrt(A1 / A2)
w = s * (x2 - x1) + 1
h = s * (y2 - y1) + 1
return cx, cy, w, h # region中心点的坐标以及宽高
|
the-stack_106_21533
|
#%%
#%matplotlib auto
import numpy as np
import matplotlib.pyplot as plt
import sensor_fusion as sf
import robot_n_measurement_functions as rnmf
import pathlib
import seaborn as sns
import matplotlib.patches as mpatches
from scipy.linalg import expm
import lsqSolve as lsqS
import pathlib
sns.set()
#%%
parent_path = pathlib.Path.home()#('/media/muhammad/Data/')
parent_path = parent_path/'Dropbox/09. Aalto Postdoc/DiddyBorg_experiment'
Camera = sf.Sensor('Camera',sf.CAMERA_COLUMNS,meas_record_file=parent_path/'test-run-camera.csv',is_linear=False,start_index=154)
#%%
x_init = np.array([17,60,0])
x = np.zeros((Camera.meas_record.shape[0]//3,3),dtype=np.float)
x[0,:] = x_init
t = np.zeros(x.shape[0])
t[0] = Camera.time[0]
R_one_diag = np.array([2,20])
#R_one_diag = np.array([2])
I_max=50
gamma=1
params_LSQ = {'x_sensors':None,
'R':None,
'LR':None,#cholesky factorization of a matrix (chol(a) in matlab returns an upper triangular matrix, but linalg.cholesky(a) returns a lower triangular matrix)
'Rinv':None,
'gamma':gamma,
'I_max':I_max,
'Line_search':False,
'Line_search_n_points':10,
'Jwls':lsqS.Jwls
}
#%%
Camera.reset_sampling_index()
# for i in range(1,x.shape[0]-1):
i=0
while(Camera.current_sample_index<Camera.time.shape[0] and i<x.shape[0]-1):
i +=1
# print('EKF')
t = Camera.current_time
y_raw = Camera.get_measurement()
n_qr_codes = y_raw.shape[0]
if n_qr_codes < 3:
x[i,:] = x[i-1,:]
continue
# dist = y_raw[:,5]
# direct = y_raw[:,-1]*rnmf.DEG_TO_RAD
# y_raw[:,5] = dist/np.cos(direct)
weight = y_raw[:,3]
height = y_raw[:,4]
c_x = y_raw[:,1]
dist = rnmf.QRCODE_SIDE_LENGTH*rnmf.PERCEIVED_FOCAL_LENGTH/height
direct = np.arctan2(c_x,rnmf.PERCEIVED_FOCAL_LENGTH)
angle_qr = np.arccos(np.minimum(weight,height)/height)
corrected_dist = dist/np.cos(direct) + 0.5*rnmf.QRCODE_SIDE_LENGTH*np.sin(angle_qr)
y_raw[:,5] = corrected_dist#dist/np.cos(direct)
y = y_raw[:,5:].flatten()
qr_pos = rnmf.QRCODE_LOCATIONS[y_raw[:,0].astype('int'),1:]
params_LSQ['x_sensors'] = qr_pos
R = np.diag(np.kron(np.ones(n_qr_codes),R_one_diag))
params_LSQ['R'] = R
params_LSQ['LR'] = np.linalg.cholesky(R).T
params_LSQ['Rinv'] = np.diag(1/np.diag(R))
xhat_history_GN, J_history_GN = lsqS.lsqsolve(y,rnmf.h_cam,rnmf.H_cam,x[i-1,:],params_LSQ,method='gauss-newton')
x[i,:] = xhat_history_GN[:,-1]
#%%
# plt.figure()
#plt.plot(x[:,0],x[:,1],'-ok',linewidth=0.5,markersize=2)
skip=5
end_index=(x.shape[0]-1)
fig, ax = plt.subplots()
ax.plot(x[:end_index:skip,0], x[:end_index:skip,1])
q = ax.quiver(x[:end_index:skip,0], x[:end_index:skip,1], -np.sin(x[:end_index:skip,2]), np.cos(x[:end_index:skip,2]),headwidth=1,width=0.0051,alpha=0.8,color='blue')
p = mpatches.Circle((x[0,0], x[0,1]), 1,color='red')
ax.add_patch(p)
p = mpatches.Circle((x[end_index,0], x[end_index,1]), 1,color='black')
ax.add_patch(p)
ax.plot(x[:end_index,0],x[:end_index,1],'-r',linewidth=4,alpha=0.5)
# %%
|
the-stack_106_21535
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MHSMPrivateLinkResourcesOperations:
"""MHSMPrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_mhsm_resource(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.MHSMPrivateLinkResourceListResult":
"""Gets the private link resources supported for the managed hsm pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MHSMPrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.MHSMPrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MHSMPrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = self.list_by_mhsm_resource.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MHSMPrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_mhsm_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateLinkResources'} # type: ignore
|
the-stack_106_21536
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from alembic.ddl import base as alembic_ddl
from alembic import script as alembic_script
from contextlib import contextmanager
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_db.sqlalchemy import test_migrations
from oslotest import base as oslotest_base
import six
import sqlalchemy
from sqlalchemy import event
from sqlalchemy.sql import ddl as sqla_ddl
import subprocess
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import cli as migration
from neutron.db.migration.models import head as head_models
from neutron.tests import base as test_base
from neutron.tests.unit import testlib_api
cfg.CONF.import_opt('core_plugin', 'neutron.conf.common')
CREATION_OPERATIONS = {
'sqla': (sqla_ddl.CreateIndex,
sqla_ddl.CreateTable,
sqla_ddl.CreateColumn,
),
'alembic': (alembic_ddl.AddColumn,
)
}
DROP_OPERATIONS = {
'sqla': (sqla_ddl.DropConstraint,
sqla_ddl.DropIndex,
sqla_ddl.DropTable,
),
'alembic': (alembic_ddl.DropColumn,
)
}
def upgrade(engine, alembic_config, branch_name='heads'):
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(alembic_config, 'upgrade',
branch_name)
class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
'''Test for checking of equality models state and migrations.
For the opportunistic testing you need to set up a db named
'openstack_citest' with user 'openstack_citest' and password
'openstack_citest' on localhost.
The test will then use that db and user/password combo to run the tests.
For PostgreSQL on Ubuntu this can be done with the following commands::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner
openstack_citest;
For MySQL on Ubuntu this can be done with the following commands::
mysql -u root
>create database openstack_citest;
>grant all privileges on openstack_citest.* to
openstack_citest@localhost identified by 'openstack_citest';
Output is a list that contains information about differences between db and
models. Output example::
[('add_table',
Table('bat', MetaData(bind=None),
Column('info', String(), table=<bat>), schema=None)),
('remove_table',
Table(u'bar', MetaData(bind=None),
Column(u'data', VARCHAR(), table=<bar>), schema=None)),
('add_column',
None,
'foo',
Column('data', Integer(), table=<foo>)),
('remove_column',
None,
'foo',
Column(u'old_data', VARCHAR(), table=None)),
[('modify_nullable',
None,
'foo',
u'x',
{'existing_server_default': None,
'existing_type': INTEGER()},
True,
False)]]
* ``remove_*`` means that there is extra table/column/constraint in db;
* ``add_*`` means that it is missing in db;
* ``modify_*`` means that on column in db is set wrong
type/nullable/server_default. Element contains information:
- what should be modified,
- schema,
- table,
- column,
- existing correct column parameters,
- right value,
- wrong value.
This class also contains tests for branches, like that correct operations
are used in contract and expand branches.
'''
BUILD_SCHEMA = False
TIMEOUT_SCALING_FACTOR = 4
def setUp(self):
super(_TestModelsMigrations, self).setUp()
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(core_plugin='ml2')
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
# Migration tests can take a long time
self.useFixture(test_base.Timeout(scaling=self.TIMEOUT_SCALING_FACTOR))
def db_sync(self, engine):
upgrade(engine, self.alembic_config)
def get_engine(self):
return self.engine
def get_metadata(self):
return head_models.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (name == 'alembic_version'
or name in external.TABLES):
return False
return super(_TestModelsMigrations, self).include_object(
object_, name, type_, reflected, compare_to)
def filter_metadata_diff(self, diff):
return list(filter(self.remove_unrelated_errors, diff))
# Remove some difference that are not mistakes just specific of
# dialects, etc
def remove_unrelated_errors(self, element):
insp = sqlalchemy.engine.reflection.Inspector.from_engine(
self.get_engine())
dialect = self.get_engine().dialect.name
if isinstance(element, tuple):
if dialect == 'mysql' and element[0] == 'remove_index':
table_name = element[1].table.name
for fk in insp.get_foreign_keys(table_name):
if fk['name'] == element[1].name:
return False
cols = [c.name for c in element[1].expressions]
for col in cols:
if col in insp.get_pk_constraint(
table_name)['constrained_columns']:
return False
else:
for modified, _, table, column, _, _, new in element:
if modified == 'modify_default' and dialect == 'mysql':
constrained = insp.get_pk_constraint(table)
if column in constrained['constrained_columns']:
return False
return True
def test_upgrade_expand_branch(self):
# Verify that "command neutron-db-manage upgrade --expand" works
# without errors. Check this for both MySQL and PostgreSQL.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.EXPAND_BRANCH)
def test_upgrade_contract_branch(self):
# Verify that "command neutron-db-manage upgrade --contract" works
# without errors. Check this for both MySQL and PostgreSQL.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.CONTRACT_BRANCH)
@contextmanager
def _listener(self, engine, listener_func):
try:
event.listen(engine, 'before_execute', listener_func)
yield
finally:
event.remove(engine, 'before_execute',
listener_func)
def test_branches(self):
drop_exceptions = collections.defaultdict(list)
creation_exceptions = collections.defaultdict(list)
def find_migration_exceptions():
# Due to some misunderstandings and some conscious decisions,
# there may be some expand migrations which drop elements and
# some contract migrations which create elements. These excepted
# elements must be returned by a method in the script itself.
# The names of the method must be 'contract_creation_exceptions'
# or 'expand_drop_exceptions'. The methods must have a docstring
# explaining the reason for the exception.
#
# Here we build lists of the excepted elements and verify that
# they are documented.
script = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
for m in list(script.walk_revisions(base='base', head='heads')):
branches = m.branch_labels or []
if migration.CONTRACT_BRANCH in branches:
method_name = 'contract_creation_exceptions'
exceptions_dict = creation_exceptions
elif migration.EXPAND_BRANCH in branches:
method_name = 'expand_drop_exceptions'
exceptions_dict = drop_exceptions
else:
continue
get_excepted_elements = getattr(m.module, method_name, None)
if not get_excepted_elements:
continue
explanation = getattr(get_excepted_elements, '__doc__', "")
if len(explanation) < 1:
self.fail("%s() requires docstring with explanation" %
'.'.join([m.module.__name__,
get_excepted_elements.__name__]))
for sa_type, elements in get_excepted_elements().items():
exceptions_dict[sa_type].extend(elements)
def is_excepted_sqla(clauseelement, exceptions):
"""Identify excepted operations that are allowed for the branch."""
element = clauseelement.element
element_name = element.name
if isinstance(element, sqlalchemy.Index):
element_name = element.table.name
for sa_type_, excepted_names in exceptions.items():
if isinstance(element, sa_type_):
if element_name in excepted_names:
return True
def is_excepted_alembic(clauseelement, exceptions):
"""Identify excepted operations that are allowed for the branch."""
# For alembic the clause is AddColumn or DropColumn
column = clauseelement.column.name
table = clauseelement.column.table.name
element_name = '.'.join([table, column])
for alembic_type, excepted_names in exceptions.items():
if alembic_type == sqlalchemy.Column:
if element_name in excepted_names:
return True
def is_allowed(clauseelement, exceptions, disallowed_ops):
if (isinstance(clauseelement, disallowed_ops['sqla']) and
hasattr(clauseelement, 'element')):
return is_excepted_sqla(clauseelement, exceptions)
if isinstance(clauseelement, disallowed_ops['alembic']):
return is_excepted_alembic(clauseelement, exceptions)
return True
def check_expand_branch(conn, clauseelement, multiparams, params):
if not is_allowed(clauseelement, drop_exceptions, DROP_OPERATIONS):
self.fail("Migration in expand branch contains drop command")
def check_contract_branch(conn, clauseelement, multiparams, params):
if not is_allowed(clauseelement, creation_exceptions,
CREATION_OPERATIONS):
self.fail("Migration in contract branch contains create "
"command")
find_migration_exceptions()
engine = self.engine
cfg.CONF.set_override('connection', engine.url, group='database')
with engine.begin() as connection:
self.alembic_config.attributes['connection'] = connection
migration.do_alembic_command(self.alembic_config, 'upgrade',
'kilo')
with self._listener(engine, check_expand_branch):
migration.do_alembic_command(
self.alembic_config, 'upgrade',
'%s@head' % migration.EXPAND_BRANCH)
with self._listener(engine, check_contract_branch):
migration.do_alembic_command(
self.alembic_config, 'upgrade',
'%s@head' % migration.CONTRACT_BRANCH)
def _test_has_offline_migrations(self, revision, expected):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(self.alembic_config, 'upgrade', revision)
self.assertEqual(expected,
migration.has_offline_migrations(self.alembic_config,
'unused'))
def test_has_offline_migrations_pending_contract_scripts(self):
self._test_has_offline_migrations('kilo', True)
def test_has_offline_migrations_all_heads_upgraded(self):
self._test_has_offline_migrations('heads', False)
# NOTE(ihrachys): if this test fails for you, it probably means that you
# attempt to add an unsafe contract migration script, that is in
# contradiction to blueprint online-upgrades
# TODO(ihrachys): revisit later in Pike+ where some contract scripts may be
# safe again
def test_forbid_offline_migrations_starting_newton(self):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
# the following revisions are Newton heads
for revision in ('5cd92597d11d', '5c85685d616d'):
migration.do_alembic_command(
self.alembic_config, 'upgrade', revision)
self.assertFalse(migration.has_offline_migrations(
self.alembic_config, 'unused'),
msg='Offline contract migration scripts are forbidden for Ocata+')
class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
def test_check_mysql_engine(self):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
with engine.begin() as connection:
self.alembic_config.attributes['connection'] = connection
migration.do_alembic_command(self.alembic_config, 'upgrade',
'heads')
insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
# Test that table creation on MySQL only builds InnoDB tables
tables = insp.get_table_names()
self.assertGreater(len(tables), 0,
"No tables found. Wrong schema?")
res = [table for table in tables if
insp.get_table_options(table)['mysql_engine'] != 'InnoDB'
and table != 'alembic_version']
self.assertEqual(0, len(res), "%s non InnoDB tables created" % res)
class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestSanityCheck(testlib_api.SqlTestCaseLight):
BUILD_SCHEMA = False
def setUp(self):
super(TestSanityCheck, self).setUp()
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def _drop_table(self, table):
with self.engine.begin() as conn:
table.drop(conn)
def test_check_sanity_1df244e556f5(self):
ha_router_agent_port_bindings = sqlalchemy.Table(
'ha_router_agent_port_bindings', sqlalchemy.MetaData(),
sqlalchemy.Column('port_id', sqlalchemy.String(36)),
sqlalchemy.Column('router_id', sqlalchemy.String(36)),
sqlalchemy.Column('l3_agent_id', sqlalchemy.String(36)))
with self.engine.connect() as conn:
ha_router_agent_port_bindings.create(conn)
self.addCleanup(self._drop_table, ha_router_agent_port_bindings)
conn.execute(ha_router_agent_port_bindings.insert(), [
{'port_id': '1234', 'router_id': '12345',
'l3_agent_id': '123'},
{'port_id': '12343', 'router_id': '12345',
'l3_agent_id': '123'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("1df244e556f5").module
self.assertRaises(script.DuplicateL3HARouterAgentPortBinding,
script.check_sanity, conn)
def test_check_sanity_030a959ceafa(self):
routerports = sqlalchemy.Table(
'routerports', sqlalchemy.MetaData(),
sqlalchemy.Column('router_id', sqlalchemy.String(36)),
sqlalchemy.Column('port_id', sqlalchemy.String(36)),
sqlalchemy.Column('port_type', sqlalchemy.String(255)))
with self.engine.connect() as conn:
routerports.create(conn)
self.addCleanup(self._drop_table, routerports)
conn.execute(routerports.insert(), [
{'router_id': '1234', 'port_id': '12345',
'port_type': '123'},
{'router_id': '12343', 'port_id': '12345',
'port_type': '1232'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("030a959ceafa").module
self.assertRaises(script.DuplicatePortRecordinRouterPortdatabase,
script.check_sanity, conn)
def test_check_sanity_6b461a21bcfc_dup_on_fixed_ip(self):
floatingips = sqlalchemy.Table(
'floatingips', sqlalchemy.MetaData(),
sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64)))
with self.engine.connect() as conn:
floatingips.create(conn)
self.addCleanup(self._drop_table, floatingips)
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': '12345678'},
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': '12345678'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("6b461a21bcfc").module
self.assertRaises(script.DuplicateFloatingIPforOneFixedIP,
script.check_sanity, conn)
def test_check_sanity_6b461a21bcfc_dup_on_no_fixed_ip(self):
floatingips = sqlalchemy.Table(
'floatingips', sqlalchemy.MetaData(),
sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64)))
with self.engine.connect() as conn:
floatingips.create(conn)
self.addCleanup(self._drop_table, floatingips)
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': None},
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': None}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("6b461a21bcfc").module
self.assertIsNone(script.check_sanity(conn))
class TestWalkDowngrade(oslotest_base.BaseTestCase):
def setUp(self):
super(TestWalkDowngrade, self).setUp()
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def test_no_downgrade(self):
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
versions = [v for v in script_dir.walk_revisions(base='base',
head='heads')]
failed_revisions = []
for version in versions:
if hasattr(version.module, 'downgrade'):
failed_revisions.append(version.revision)
if failed_revisions:
self.fail('Migrations %s have downgrade' % failed_revisions)
return True
class _TestWalkMigrations(object):
'''This will add framework for testing schema migarations
for different backends.
'''
BUILD_SCHEMA = False
def execute_cmd(self, cmd=None):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
output = proc.communicate()[0]
self.assertEqual(0, proc.returncode, 'Command failed with '
'output:\n%s' % output)
def _get_alembic_config(self, uri):
db_config = migration.get_neutron_config()
self.script_dir = alembic_script.ScriptDirectory.from_config(db_config)
db_config.neutron_config = cfg.CONF
db_config.neutron_config.set_override('connection',
six.text_type(uri),
group='database')
return db_config
def _revisions(self):
"""Provides revisions and its parent revisions.
:return: List of tuples. Every tuple contains revision and its parent
revision.
"""
revisions = list(self.script_dir.walk_revisions("base", "heads"))
revisions = list(reversed(revisions))
for rev in revisions:
# Destination, current
yield rev.revision, rev.down_revision
def _migrate_up(self, config, engine, dest, curr, with_data=False):
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % dest, None)
if pre_upgrade:
data = pre_upgrade(engine)
migration.do_alembic_command(config, 'upgrade', dest)
if with_data:
check = getattr(self, "_check_%s" % dest, None)
if check and data:
check(engine, data)
def test_walk_versions(self):
"""Test migrations ability to upgrade and downgrade.
"""
engine = self.engine
config = self._get_alembic_config(engine.url)
revisions = self._revisions()
for dest, curr in revisions:
self._migrate_up(config, engine, dest, curr, with_data=True)
class TestWalkMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestWalkMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass
|
the-stack_106_21539
|
import sys
import pytest
import logging
from sentry_sdk.integrations.logging import LoggingIntegration
other_logger = logging.getLogger("testfoo")
logger = logging.getLogger(__name__)
@pytest.fixture(autouse=True)
def reset_level():
other_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
@pytest.mark.parametrize("logger", [logger, other_logger])
def test_logging_works_with_many_loggers(sentry_init, capture_events, logger):
sentry_init(integrations=[LoggingIntegration(event_level="ERROR")])
events = capture_events()
logger.info("bread")
logger.critical("LOL")
event, = events
assert event["level"] == "fatal"
assert not event["logentry"]["params"]
assert event["logentry"]["message"] == "LOL"
assert any(crumb["message"] == "bread" for crumb in event["breadcrumbs"])
@pytest.mark.parametrize("integrations", [None, [], [LoggingIntegration()]])
def test_logging_defaults(integrations, sentry_init, capture_events):
sentry_init(integrations=integrations)
events = capture_events()
logger.info("bread")
logger.critical("LOL")
event, = events
assert event["level"] == "fatal"
assert any(crumb["message"] == "bread" for crumb in event["breadcrumbs"])
assert not any(crumb["message"] == "LOL" for crumb in event["breadcrumbs"])
assert "threads" not in event
def test_logging_extra_data(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.info("bread", extra=dict(foo=42))
logger.critical("lol", extra=dict(bar=69))
event, = events
assert event["level"] == "fatal"
assert event["extra"] == {"bar": 69}
assert any(
crumb["message"] == "bread" and crumb["data"] == {"foo": 42}
for crumb in event["breadcrumbs"]
)
@pytest.mark.xfail(sys.version_info[:2] == (3, 4), reason="buggy logging module")
def test_logging_stack(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.error("first", exc_info=True)
logger.error("second")
event_with, event_without, = events
assert event_with["level"] == "error"
assert event_with["threads"]["values"][0]["stacktrace"]["frames"]
assert event_without["level"] == "error"
assert "threads" not in event_without
def test_logging_level(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.setLevel(logging.WARNING)
logger.error("hi")
event, = events
assert event["level"] == "error"
assert event["logentry"]["message"] == "hi"
del events[:]
logger.setLevel(logging.ERROR)
logger.warn("hi")
assert not events
def test_logging_filters(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
should_log = False
class MyFilter(logging.Filter):
def filter(self, record):
return should_log
logger.addFilter(MyFilter())
logger.error("hi")
assert not events
should_log = True
logger.error("hi")
event, = events
assert event["logentry"]["message"] == "hi"
|
the-stack_106_21542
|
from .test import BaseTest, ValidatorError
import random
class Test_Region_Pixels(BaseTest):
label = 'Region specified by pixels'
level = 1
category = 2
versions = [u'1.0', u'1.1', u'2.0', u'3.0']
validationInfo = None
def run(self, result):
try:
match = 0
for i in range(5):
x = random.randint(0,9)
y = random.randint(0,9)
ix = x*100+13
iy = y*100+13
hw = 74
params = {'region' :'%s,%s,%s,%s' % (ix,iy, hw, hw)}
img = result.get_image(params)
ok = self.validationInfo.do_test_square(img,x,y, result)
if ok:
match += 1
if match >= 4:
return result
else:
raise ValidatorError('color', 1,0, result)
except:
# self.validationInfo.check('status', result.last_status, 200, result)
raise
|
the-stack_106_21544
|
from datetime import datetime
from unittest import mock
from requests_mock import Mocker
import pytest
from pyairtable import Table
from pyairtable.orm import Model
from pyairtable.orm import fields as f
def test_model_missing_meta():
with pytest.raises(ValueError):
class Address(Model):
street = f.TextField("Street")
class Meta:
base_id = "required"
table_name = "required"
# api_key = "required"
def test_model_overlapping():
# Should raise error because conflicts with .exists()
with pytest.raises(ValueError):
class Address(Model):
exists = f.TextField("Exists") # clases with Model.exists()
class Meta:
base_id = "required"
table_name = "required"
api_key = "required"
def test_model():
class Address(Model):
street = f.TextField("Street")
number = f.TextField("Number")
class Meta:
base_id = "address_base_id"
table_name = "Address"
api_key = "fake"
class Contact(Model):
first_name = f.TextField("First Name")
last_name = f.TextField("Last Name")
email = f.EmailField("Email")
is_registered = f.CheckboxField("Registered")
link = f.LinkField("Link", Address, lazy=True)
birthday = f.DateField("Birthday")
class Meta:
base_id = "contact_base_id"
table_name = "Contact"
api_key = "fake"
contact = Contact(
first_name="Gui",
last_name="Talarico",
email="[email protected]",
is_registered=True,
birthday=datetime(2020, 12, 12).date(),
)
# attribute look up
assert contact.first_name == "Gui"
assert not contact.id
# delete
with mock.patch.object(Table, "create") as m_save:
m_save.return_value = {"id": "id", "createdTime": "time"}
contact.save()
assert m_save.called
assert contact.id == "id"
# delete
with mock.patch.object(Table, "delete") as m_delete:
m_delete.return_value = {"deleted": True}
contact.delete()
assert m_delete.called
record = contact.to_record()
assert record["id"] == contact.id
assert record["createdTime"] == contact.created_time
assert record["fields"]["First Name"] == contact.first_name
def test_from_record():
class Contact(Model):
first_name = f.TextField("First Name")
timestamp = f.DatetimeField("Timestamp")
class Meta:
base_id = "contact_base_id"
table_name = "Contact"
api_key = "fake"
# Fetch = True
with mock.patch.object(Table, "get") as m_get:
m_get.return_value = {
"id": "recwnBLPIeQJoYVt4",
"createdTime": "",
"fields": {"First Name": "X", "Timestamp": "2014-09-05T12:34:56.000Z"},
}
contact = Contact.from_id("recwnBLPIeQJoYVt4")
assert m_get.called
assert m_get.called
assert contact.id == "recwnBLPIeQJoYVt4"
assert contact.first_name == "X"
assert contact.timestamp.year == 2014
# Fetch = False
with mock.patch.object(Table, "get") as m_get_no_fetch:
contact = Contact.from_id("recwnBLPIeQJoYVt4", fetch=False)
assert not m_get_no_fetch.called
assert not contact.first_name == "X"
def test_linked_record():
class Address(Model):
street = f.TextField("Street")
class Meta:
base_id = "address_base_id"
table_name = "Address"
api_key = "fake"
class Contact(Model):
address = f.LinkField("Link", Address, lazy=True)
class Meta:
base_id = "contact_base_id"
table_name = "Contact"
api_key = "fake"
record = {"id": "recFake", "createdTime": "", "fields": {"Street": "A"}}
address = Address.from_id("recFake", fetch=False)
# Id Reference
contact = Contact(address=[address])
assert contact.address[0].id == address.id
assert not contact.address[0].street
with Mocker() as mock:
url = address.get_table().get_record_url(address.id)
mock.get(url, status_code=200, json=record)
contact.address[0].fetch()
assert contact.address[0].street == "A"
|
the-stack_106_21546
|
# !/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dump out stats about all the actions that are in use in a set of replays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import multiprocessing
import os
import signal
import sys
import threading
import time
from future.builtins import range # pylint: disable=redefined-builtin
import six
from six.moves import queue
from pysc2 import run_configs
from pysc2.lib import features
from pysc2.lib import point
from pysc2.lib import protocol
from pysc2.lib import remote_controller
from pysc2.lib import app
import gflags as flags
from pysc2.lib import gfile
from s2clientprotocol import sc2api_pb2 as sc_pb
FLAGS = flags.FLAGS
flags.DEFINE_integer("parallel", 1, "How many instances to run in parallel.")
flags.DEFINE_integer("step_mul", 8, "How many game steps per observation.")
# flags.DEFINE_string("replays", None, "Path to a directory of replays.")
flags.DEFINE_string("replays", "C:\\Users\\chensy\\Desktop\\sc2 project\\replay\\OdysseyLE_1.SC2Replay", "Path to a directory of replays.")
# flags.mark_flag_as_required("replays")
FLAGS(sys.argv)
size = point.Point(16, 16)
interface = sc_pb.InterfaceOptions(
raw=True, score=False,
feature_layer=sc_pb.SpatialCameraSetup(width=24))
size.assign_to(interface.feature_layer.resolution)
size.assign_to(interface.feature_layer.minimap_resolution)
def sorted_dict_str(d):
return "{%s}" % ", ".join("%s: %s" % (k, d[k])
for k in sorted(d, key=d.get, reverse=True))
class ReplayStats(object):
"""Summary stats of the replays seen so far."""
def __init__(self):
self.replays = 0
self.steps = 0
self.camera_move = 0
self.select_pt = 0
self.select_rect = 0
self.control_group = 0
self.maps = collections.defaultdict(int)
self.races = collections.defaultdict(int)
self.unit_ids = collections.defaultdict(int)
self.valid_abilities = collections.defaultdict(int)
self.made_abilities = collections.defaultdict(int)
self.valid_actions = collections.defaultdict(int)
self.made_actions = collections.defaultdict(int)
self.crashing_replays = set()
self.invalid_replays = set()
def merge(self, other):
"""Merge another ReplayStats into this one."""
def merge_dict(a, b):
for k, v in six.iteritems(b):
a[k] += v
self.replays += other.replays
self.steps += other.steps
self.camera_move += other.camera_move
self.select_pt += other.select_pt
self.select_rect += other.select_rect
self.control_group += other.control_group
merge_dict(self.maps, other.maps)
merge_dict(self.races, other.races)
merge_dict(self.unit_ids, other.unit_ids)
merge_dict(self.valid_abilities, other.valid_abilities)
merge_dict(self.made_abilities, other.made_abilities)
merge_dict(self.valid_actions, other.valid_actions)
merge_dict(self.made_actions, other.made_actions)
self.crashing_replays |= other.crashing_replays
self.invalid_replays |= other.invalid_replays
def __str__(self):
len_sorted_dict = lambda s: (len(s), sorted_dict_str(s))
len_sorted_list = lambda s: (len(s), sorted(s))
return "\n\n".join((
"Replays: %s, Steps total: %s" % (self.replays, self.steps),
"Camera move: %s, Select pt: %s, Select rect: %s, Control group: %s" % (
self.camera_move, self.select_pt, self.select_rect,
self.control_group),
"Maps: %s\n%s" % len_sorted_dict(self.maps),
"Races: %s\n%s" % len_sorted_dict(self.races),
"Unit ids: %s\n%s" % len_sorted_dict(self.unit_ids),
"Valid abilities: %s\n%s" % len_sorted_dict(self.valid_abilities),
"Made abilities: %s\n%s" % len_sorted_dict(self.made_abilities),
"Valid actions: %s\n%s" % len_sorted_dict(self.valid_actions),
"Made actions: %s\n%s" % len_sorted_dict(self.made_actions),
"Crashing replays: %s\n%s" % len_sorted_list(self.crashing_replays),
"Invalid replays: %s\n%s" % len_sorted_list(self.invalid_replays),
))
class ProcessStats(object):
"""Stats for a worker process."""
def __init__(self, proc_id):
self.proc_id = proc_id
self.time = time.time()
self.stage = ""
self.replay = ""
self.replay_stats = ReplayStats()
def update(self, stage):
self.time = time.time()
self.stage = stage
def __str__(self):
return ("[%2d] replay: %10s, replays: %5d, steps: %7d, game loops: %7s, "
"last: %12s, %3d s ago" % (
self.proc_id, self.replay, self.replay_stats.replays,
self.replay_stats.steps,
self.replay_stats.steps * FLAGS.step_mul, self.stage,
time.time() - self.time))
def valid_replay(info, ping):
"""Make sure the replay isn't corrupt, and is worth looking at."""
if (info.HasField("error") or
info.base_build != ping.base_build or # different game version
info.game_duration_loops < 1000 or
len(info.player_info) != 2):
# Probably corrupt, or just not interesting.
return False
for p in info.player_info:
if p.player_apm < 10 or p.player_mmr < -1000:
# Low APM = player just standing around.
# Low MMR = corrupt replay or player who is weak.
return False
return True
class ReplayProcessor(multiprocessing.Process):
"""A Process that pulls replays and processes them."""
def __init__(self, proc_id, run_config, replay_queue, stats_queue):
super(ReplayProcessor, self).__init__()
self.stats = ProcessStats(proc_id)
self.run_config = run_config
self.replay_queue = replay_queue
self.stats_queue = stats_queue
def run(self):
signal.signal(signal.SIGTERM, lambda a, b: sys.exit()) # Exit quietly.
self._update_stage("spawn")
replay_name = "none"
while True:
self._print("Starting up a new SC2 instance.")
self._update_stage("launch")
try:
with self.run_config.start() as controller:
self._print("SC2 Started successfully.")
ping = controller.ping()
for _ in range(300):
try:
replay_path = self.replay_queue.get()
except queue.Empty:
self._update_stage("done")
self._print("Empty queue, returning")
return
try:
replay_name = os.path.basename(replay_path)[:11]
self.stats.replay = replay_name
self._print("Got replay: %s" % replay_path)
self._update_stage("open replay file")
replay_data = self.run_config.replay_data(replay_path)
self._update_stage("replay_info")
info = controller.replay_info(replay_data)
self._print((" Replay Info %s " % replay_name).center(60, "-"))
self._print(info)
self._print("-" * 60)
if valid_replay(info, ping):
self.stats.replay_stats.maps[info.map_name] += 1
for player_info in info.player_info:
self.stats.replay_stats.races[
sc_pb.Race.Name(player_info.player_info.race_actual)] += 1
map_data = None
if info.local_map_path:
self._update_stage("open map file")
map_data = self.run_config.map_data(info.local_map_path)
for player_id in [1]:
self._print("Starting %s from player %s's perspective" % (
replay_name, player_id))
self.process_replay(controller, replay_data, map_data,
player_id)
else:
self._print("Replay is invalid.")
self.stats.replay_stats.invalid_replays.add(replay_name)
finally:
self.replay_queue.task_done()
self._update_stage("shutdown")
except (protocol.ConnectionError, protocol.ProtocolError,
remote_controller.RequestError):
self.stats.replay_stats.crashing_replays.add(replay_name)
except KeyboardInterrupt:
return
def _print(self, s):
for line in str(s).strip().splitlines():
print("[%s] %s" % (self.stats.proc_id, line))
def _update_stage(self, stage):
self.stats.update(stage)
self.stats_queue.put(self.stats)
def process_replay(self, controller, replay_data, map_data, player_id):
"""Process a single replay, updating the stats."""
self._update_stage("start_replay")
controller.start_replay(sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=map_data,
options=interface,
observed_player_id=player_id))
feat = features.Features(controller.game_info())
self.stats.replay_stats.replays += 1
self._update_stage("step")
controller.step()
while True:
self.stats.replay_stats.steps += 1
self._update_stage("observe")
obs = controller.observe()
for action in obs.actions:
act_fl = action.action_feature_layer
if act_fl.HasField("unit_command"):
self.stats.replay_stats.made_abilities[
act_fl.unit_command.ability_id] += 1
if act_fl.HasField("camera_move"):
self.stats.replay_stats.camera_move += 1
if act_fl.HasField("unit_selection_point"):
self.stats.replay_stats.select_pt += 1
if act_fl.HasField("unit_selection_rect"):
self.stats.replay_stats.select_rect += 1
if action.action_ui.HasField("control_group"):
self.stats.replay_stats.control_group += 1
try:
func = feat.reverse_action(action).function
except ValueError:
func = -1
self.stats.replay_stats.made_actions[func] += 1
for valid in obs.observation.abilities:
self.stats.replay_stats.valid_abilities[valid.ability_id] += 1
for u in obs.observation.raw_data.units:
self.stats.replay_stats.unit_ids[u.unit_type] += 1
if u.orders:
for u_order in u.orders:
if u_order.ability_id == 881:
self._print("steps: %d, type: %d, order_id: %d, progress: %f\n"
% (self.stats.replay_stats.steps, u.unit_type, u_order.ability_id, u_order.progress))
for ability_id in feat.available_actions(obs.observation):
self.stats.replay_stats.valid_actions[ability_id] += 1
if obs.player_result:
break
self._update_stage("step")
controller.step(FLAGS.step_mul)
def stats_printer(stats_queue):
"""A thread that consumes stats_queue and prints them every 10 seconds."""
proc_stats = [ProcessStats(i) for i in range(FLAGS.parallel)]
print_time = start_time = time.time()
width = 107
running = True
while running:
print_time += 10
while time.time() < print_time:
try:
s = stats_queue.get(True, print_time - time.time())
if s is None: # Signal to print and exit NOW!
running = False
break
proc_stats[s.proc_id] = s
except queue.Empty:
pass
replay_stats = ReplayStats()
for s in proc_stats:
replay_stats.merge(s.replay_stats)
print((" Summary %0d secs " % (print_time - start_time)).center(width, "="))
print(replay_stats)
print(" Process stats ".center(width, "-"))
print("\n".join(str(s) for s in proc_stats))
print("=" * width)
def replay_queue_filler(replay_queue, replay_list):
"""A thread that fills the replay_queue with replay filenames."""
for replay_path in replay_list:
replay_queue.put(replay_path)
def main(unused_argv):
"""Dump stats about all the actions that are in use in a set of replays."""
run_config = run_configs.get()
if not gfile.Exists(FLAGS.replays):
sys.exit("{} doesn't exist.".format(FLAGS.replays))
stats_queue = multiprocessing.Queue()
stats_thread = threading.Thread(target=stats_printer, args=(stats_queue,))
stats_thread.start()
try:
# For some reason buffering everything into a JoinableQueue makes the
# program not exit, so save it into a list then slowly fill it into the
# queue in a separate thread. Grab the list synchronously so we know there
# is work in the queue before the SC2 processes actually run, otherwise
# The replay_queue.join below succeeds without doing any work, and exits.
print("Getting replay list:", FLAGS.replays)
replay_list = sorted(run_config.replay_paths(FLAGS.replays))
print(len(replay_list), "replays found.\n")
replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10)
replay_queue_thread = threading.Thread(target=replay_queue_filler,
args=(replay_queue, replay_list))
replay_queue_thread.daemon = True
replay_queue_thread.start()
for i in range(FLAGS.parallel):
p = ReplayProcessor(i, run_config, replay_queue, stats_queue)
p.daemon = True
p.start()
time.sleep(1) # Stagger startups, otherwise they seem to conflict somehow
replay_queue.join() # Wait for the queue to empty.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, exiting.")
finally:
stats_queue.put(None) # Tell the stats_thread to print and exit.
stats_thread.join()
if __name__ == "__main__":
app.run(main)
|
the-stack_106_21550
|
import KratosMultiphysics as km
import KratosMultiphysics.KratosUnittest as UnitTest
import KratosMultiphysics.kratos_utilities as kratos_utilities
from KratosMultiphysics.RANSApplication.test_utilities import RunParametricTestCase
class FlowSolverTestCase(UnitTest.TestCase):
@classmethod
def setUpCase(cls, working_folder, parameters_file_name, print_output):
cls.working_folder = working_folder
cls.parameters_file_name = parameters_file_name
cls.print_output = print_output
cls.parameters = {}
def testSteady(self):
self.parameters["<TIME_SCHEME_TYPE>"] = "steady"
self._runTest()
def testBossak(self):
self.parameters["<TIME_SCHEME_TYPE>"] = self.transient_scheme_type
self._runTest()
def _runTest(self):
if (km.IsDistributedRun()):
self.parameters["<PARALLEL_TYPE>"] = "MPI"
else:
self.parameters["<PARALLEL_TYPE>"] = "OpenMP"
self.addCleanup(lambda: kratos_utilities.DeleteTimeFiles("."))
RunParametricTestCase(self.parameters_file_name, self.working_folder,
self.parameters, self.print_output)
|
the-stack_106_21551
|
from flask import Flask, request, redirect, g, render_template, make_response, session, url_for
from datetime import datetime, timedelta, date
from pytz import timezone
import urllib
import urllib.parse
import secrets
import string
import requests
from urllib.parse import urlencode
import json
import base64
from os import environ
CLIENT_ID = "MY_CLIENT_ID"
CLIENT_SECRET = "MY_CLIENT_SECRET"
# Spotify URLS
SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize"
SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token"
SPOTIFY_API_BASE_URL = "https://api.spotify.com"
API_VERSION = "v1"
SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION)
SCOPE = "user-read-private user-read-email user-top-read playlist-modify-public playlist-modify-private"
SHOW_DIALOG_bool = True
SHOW_DIALOG_str = str(SHOW_DIALOG_bool).lower()
CLIENT_SIDE_URL = "http://127.0.0.1"
PORT = 8080
REDIRECT_URI = "{}:{}/callback".format(CLIENT_SIDE_URL, PORT)
AUTH_URL = 'https://accounts.spotify.com/authorize'
TOKEN_URL = 'https://accounts.spotify.com/api/token'
ME_URL = 'https://api.spotify.com/v1/me'
def getUserAuthorization():
state = ''.join(
secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16)
)
# scope = 'user-read-private user-read-email user-top-read playlist-modify-public playlist-modify-private'
payload = {
'client_id': CLIENT_ID,
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'state': state,
'scope': SCOPE,
}
res = make_response(redirect(f'{AUTH_URL}/?{urlencode(payload)}'))
res.set_cookie('spotify_auth_state', state)
return res
def getUserToken():
error = request.args.get('error')
code = request.args.get('code')
state = request.args.get('state')
stored_state = request.cookies.get('spotify_auth_state')
if state is None or state != stored_state:
app.logger.error('Error message: %s', repr(error))
app.logger.error('State mismatch: %s != %s', stored_state, state)
abort(400)
payload = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': REDIRECT_URI,
}
res = requests.post(TOKEN_URL, auth=(CLIENT_ID, CLIENT_SECRET), data=payload)
return res
def getRecommendationThroughTracks(authorization_header, listOfSeedTracks, listOfAudioFeature):
""" Utilizes a list of seed (5 max) and list of audio feature (currently none FIX) to return json of recommendation """
limit = 20
market = "ES"
recommend_base_endpoint = "{}/recommendations?limit={}&market={}".format(SPOTIFY_API_URL,limit,market)
appended_list_seed = ','.join(listOfSeedTracks)
seed_api_endpoint = "{}&seed_tracks={}&".format(recommend_base_endpoint,appended_list_seed)
raw_audio_feature = '&'.join("%s=%s" % (key, val) for (key, val) in listOfAudioFeature)
audio_feature_api_endpoint = "{}{}".format(seed_api_endpoint,raw_audio_feature)
recommend_response = requests.get(audio_feature_api_endpoint, headers=authorization_header)
recommend_data = json.loads(recommend_response.text)
return recommend_data
def getProfileData(authorization_header):
""" Gets the profile data """
user_profile_api_endpoint = "{}/me".format(SPOTIFY_API_URL)
print("user_profile_api_endpoint:", user_profile_api_endpoint)
profile_response = requests.get(user_profile_api_endpoint, headers=authorization_header)
print("profile_response:", profile_response)
profile_data = json.loads(profile_response.text)
return profile_data
def getTopTrack(authorization_header):
""" Gets the current (4 weeks) top track """
time_range = 'short_term'#'short_term'
limit = 50
type = 'tracks'
top_api_endpoint = "{}/me/top/{}".format(SPOTIFY_API_URL,type)
specific_top_api_endpoint = "{}?time_range={}&limit={}".format(top_api_endpoint,time_range,limit)
top_track_response = requests.get(specific_top_api_endpoint, headers=authorization_header)
top_track_data = json.loads(top_track_response.text)
return top_track_data
def postBlankPlaylist(authorization_header, weather, user_id):
""" Creates a blank playlist """
d = date.today()
user_date = d.strftime("%m/%d/%y")
title = '{} {}'.format(d,weather)
print(title)
playlist_post = {'name': title, 'public': 'true', 'collaborative': 'false', 'description': 'Created at {} for {} weather. Made via SpotifyWeather.'.format(user_date,weather)}
post_playlist_api_endpoint = '{}/users/{}/playlists'.format(SPOTIFY_API_URL,user_id)
print("post_playlist_api_endpoint",post_playlist_api_endpoint)
post_playlist_api_response = requests.post(post_playlist_api_endpoint, headers=authorization_header, data=json.dumps(playlist_post))
print("post_playlist_api_response",post_playlist_api_response)
# ALSO GET THE PLAYLIST ID#
post_playlist_api_json = post_playlist_api_response.json()
playlist_id = post_playlist_api_json.get('id')
return post_playlist_api_response, playlist_id
def postTrackToPlaylist(authorization_header, track_id_list, playlist_id):
""" Puts a list of tracks (track_id_list) to a playlist (playlist_id) """
edited_track_list = ['spotify:track:{}'.format(track_id) for track_id in track_id_list]
# print("edited_track_list",edited_track_list)
post_track_api_endpoint = '{}/playlists/{}/tracks?uris={}'.format(SPOTIFY_API_URL,playlist_id,','.join(edited_track_list))
# print("post_track_api_endpoint",post_track_api_endpoint)
post_track_api_response = requests.post(post_track_api_endpoint, headers=authorization_header)
return post_track_api_response
def getAudioFeatureFromTrack(authorization_header, id):
""" Gets the audio feature from a single track (id) """
audio_feature_api_endpoint = "{}/{}/audio-features/{}".format(SPOTIFY_API_BASE_URL, API_VERSION, id)
audio_feature_response = requests.get(audio_feature_api_endpoint, headers=authorization_header)
audio_feature_data = json.loads(audio_feature_response.text)
return audio_feature_data
def getIframePlaylist(playlist_id):
""" Utilizes playlist_id to create a url for iframe """
open_base_url = 'https://open.spotify.com/embed?uri=spotify'
iframe_playlist_url = '{}:playlist:{}'.format(open_base_url,playlist_id)
return iframe_playlist_url
|
the-stack_106_21552
|
"""Plots class-activation maps (CAM)."""
import numpy
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import error_checking
DEFAULT_CONTOUR_WIDTH = 2
DEFAULT_CONTOUR_STYLE = 'solid'
def plot_2d_grid(
class_activation_matrix_2d, axes_object, colour_map_object,
min_contour_level, max_contour_level, contour_interval,
line_width=DEFAULT_CONTOUR_WIDTH, line_style=DEFAULT_CONTOUR_STYLE):
"""Plots 2-D class-activation map with line contours.
M = number of rows in spatial grid
N = number of columns in spatial grid
:param class_activation_matrix_2d: M-by-N numpy array of class activations.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
Will plot on these axes.
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param min_contour_level: Minimum value to plot.
:param max_contour_level: Max value to plot.
:param contour_interval: Interval (in class-activation units) between
successive contours.
:param line_width: Width of contour lines.
:param line_style: Style (e.g., "solid") for contour lines.
"""
error_checking.assert_is_numpy_array_without_nan(class_activation_matrix_2d)
error_checking.assert_is_numpy_array(
class_activation_matrix_2d, num_dimensions=2)
# Determine contour levels.
max_contour_level = max([
min_contour_level + 1e-3, max_contour_level
])
contour_interval = max([contour_interval, 1e-4])
contour_interval = min([
contour_interval, max_contour_level - min_contour_level
])
num_contours = 1 + int(numpy.round(
(max_contour_level - min_contour_level) / contour_interval
))
contour_levels = numpy.linspace(
min_contour_level, max_contour_level, num=num_contours, dtype=float)
# Determine grid coordinates.
num_grid_rows = class_activation_matrix_2d.shape[0]
num_grid_columns = class_activation_matrix_2d.shape[1]
x_coord_spacing = num_grid_columns ** -1
y_coord_spacing = num_grid_rows ** -1
x_coords, y_coords = grids.get_xy_grid_points(
x_min_metres=x_coord_spacing / 2, y_min_metres=y_coord_spacing / 2,
x_spacing_metres=x_coord_spacing, y_spacing_metres=y_coord_spacing,
num_rows=num_grid_rows, num_columns=num_grid_columns)
x_coord_matrix, y_coord_matrix = numpy.meshgrid(x_coords, y_coords)
# Plot.
axes_object.contour(
x_coord_matrix, y_coord_matrix, class_activation_matrix_2d,
contour_levels, cmap=colour_map_object,
vmin=numpy.min(contour_levels), vmax=numpy.max(contour_levels),
linewidths=line_width, linestyles=line_style, zorder=1e6,
transform=axes_object.transAxes)
def plot_many_2d_grids(
class_activation_matrix_3d, axes_object_matrix, colour_map_object,
min_contour_level, max_contour_level, contour_interval,
line_width=DEFAULT_CONTOUR_WIDTH, row_major=True,
line_style=DEFAULT_CONTOUR_STYLE):
"""Plots the same 2-D class-activation map for each predictor.
M = number of rows in spatial grid
N = number of columns in spatial grid
P = number of predictors
:param class_activation_matrix_3d: M-by-N-by-P numpy array of class
activations.
:param axes_object_matrix: See doc for `plotting_utils.init_panels`.
:param colour_map_object: See doc for `plot_2d_grid`.
:param min_contour_level: Same.
:param max_contour_level: Same.
:param contour_interval: Same.
:param line_width: Same.
:param row_major: Boolean flag. If True, panels will be filled along rows
first, then down columns. If False, down columns first, then along
rows.
:param line_style: Style (e.g., "solid") for contour lines.
"""
error_checking.assert_is_numpy_array_without_nan(class_activation_matrix_3d)
error_checking.assert_is_numpy_array(
class_activation_matrix_3d, num_dimensions=3)
error_checking.assert_is_boolean(row_major)
if row_major:
order_string = 'C'
else:
order_string = 'F'
num_predictors = class_activation_matrix_3d.shape[-1]
num_panel_rows = axes_object_matrix.shape[0]
num_panel_columns = axes_object_matrix.shape[1]
for k in range(num_predictors):
this_panel_row, this_panel_column = numpy.unravel_index(
k, (num_panel_rows, num_panel_columns), order=order_string
)
plot_2d_grid(
class_activation_matrix_2d=class_activation_matrix_3d[..., k],
axes_object=axes_object_matrix[this_panel_row, this_panel_column],
colour_map_object=colour_map_object,
min_contour_level=min_contour_level,
max_contour_level=max_contour_level,
contour_interval=contour_interval,
line_width=line_width, line_style=line_style)
|
the-stack_106_21553
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LFADS architecture and loss functions."""
from __future__ import print_function, division, absolute_import
import jax.numpy as np
from jax import jit, random, vmap
import jax.flatten_util as flatten_util
import lfads_tutorial.distributions as dists
import lfads_tutorial.utils as utils
def sigmoid(x):
return 0.5 * (np.tanh(x / 2.) + 1)
def linear_params(key, o, u, ifactor=1.0):
"""Params for y = w x
Arguments:
key: random.PRNGKey for random bits
o: output size
u: input size
ifactor: scaling factor
Returns:
a dictionary of parameters
"""
key, skeys = utils.keygen(key, 1)
ifactor = ifactor / np.sqrt(u)
return {'w' : random.normal(next(skeys), (o, u)) * ifactor}
def affine_params(key, o, u, ifactor=1.0):
"""Params for y = w x + b
Arguments:
key: random.PRNGKey for random bits
o: output size
u: input size
ifactor: scaling factor
Returns:
a dictionary of parameters
"""
key, skeys = utils.keygen(key, 1)
ifactor = ifactor / np.sqrt(u)
return {'w' : random.normal(next(skeys), (o, u)) * ifactor,
'b' : np.zeros((o,))}
def gru_params(key, n, u, ifactor=1.0, hfactor=1.0, hscale=0.0):
"""Generate GRU parameters
Arguments:
key: random.PRNGKey for random bits
n: hidden state size
u: input size
ifactor: scaling factor for input weights
hfactor: scaling factor for hidden -> hidden weights
hscale: scale on h0 initial condition
Returns:
a dictionary of parameters
"""
key, skeys = utils.keygen(key, 5)
ifactor = ifactor / np.sqrt(u)
hfactor = hfactor / np.sqrt(n)
wRUH = random.normal(next(skeys), (n+n,n)) * hfactor
wRUX = random.normal(next(skeys), (n+n,u)) * ifactor
wRUHX = np.concatenate([wRUH, wRUX], axis=1)
wCH = random.normal(next(skeys), (n,n)) * hfactor
wCX = random.normal(next(skeys), (n,u)) * ifactor
wCHX = np.concatenate([wCH, wCX], axis=1)
return {'h0' : random.normal(next(skeys), (n,)) * hscale,
'wRUHX' : wRUHX,
'wCHX' : wCHX,
'bRU' : np.zeros((n+n,)),
'bC' : np.zeros((n,))}
def affine(params, x):
"""Implement y = w x + b
Arguments:
params: a dictionary of params
x: np array of input
Returns:
np array of output
"""
return np.dot(params['w'], x) + params['b']
# Affine expects n_W_m m_x_1, but passing in t_x_m (has txm dims)
# So map over first dimension to hand t_x_m.
# I.e. if affine yields n_y_1 = dot(n_W_m, m_x_1), then
# batch_affine yields t_y_n.
# And so the vectorization pattern goes for all batch_* functions.
batch_affine = vmap(affine, in_axes=(None, 0))
def normed_linear(params, x):
"""Implement y = \hat{w} x, where \hat{w}_ij = w_ij / |w_{i:}|, norm over j
Arguments:
params: a dictionary of params
x: np array of input
Returns:
np array of output
"""
w = params['w']
w_row_norms = np.sqrt(np.sum(w**2, axis=1, keepdims=True))
w = w / w_row_norms
return np.dot(w, x)
# Note not BatchNorm, the neural network regularizer,
# rather just batching the normed linear function above.
batch_normed_linear = vmap(normed_linear, in_axes=(None, 0))
def dropout(x, key, keep_rate):
"""Implement a dropout layer.
Arguments:
x: np array to be dropped out
key: random.PRNGKey for random bits
keep_rate: dropout rate
Returns:
np array of dropped out x
"""
# The shenanigans with np.where are to avoid having to re-jit if
# keep rate changes.
do_keep = random.bernoulli(key, keep_rate, x.shape)
kept_rates = np.where(do_keep, x / keep_rate, 0.0)
return np.where(keep_rate < 1.0, kept_rates, x)
# Note that dropout is a feed-forward routine that requires randomness. Thus,
# the keys argument is also vectorized over, and you'll see the correct
# number of keys allocated by the caller.
batch_dropout = vmap(dropout, in_axes=(0, 0, None))
def run_dropout(x_t, key, keep_rate):
"""Run the dropout layer over additional dimensions, e.g. time.
Arguments:
x_t: np array to be dropped out
key: random.PRNGKey for random bits
keep_rate: dropout rate
Returns:
np array of dropped out x
"""
ntime = x_t.shape[0]
keys = random.split(key, ntime)
return batch_dropout(x_t, keys, keep_rate)
def gru(params, h, x, bfg=0.5):
"""Implement the GRU equations.
Arguments:
params: dictionary of GRU parameters
h: np array of hidden state
x: np array of input
bfg: bias on forget gate (useful for learning if > 0.0)
Returns:
np array of hidden state after GRU update"""
hx = np.concatenate([h, x], axis=0)
ru = sigmoid(np.dot(params['wRUHX'], hx) + params['bRU'])
r, u = np.split(ru, 2, axis=0)
rhx = np.concatenate([r * h, x])
c = np.tanh(np.dot(params['wCHX'], rhx) + params['bC'] + bfg)
return u * h + (1.0 - u) * c
def run_rnn(params, rnn, x_t, h0=None):
"""Run an RNN module forward in time.
Arguments:
params: dictionary of RNN parameters
rnn: function for running RNN one step
x_t: np array data for RNN input with leading dim being time
h0: initial condition for running rnn, which overwrites param h0
Returns:
np array of rnn applied to time data with leading dim being time"""
h = h0 if h0 is not None else params['h0']
h_t = []
for x in x_t:
h = rnn(params, h, x)
h_t.append(h)
return np.array(h_t)
def run_bidirectional_rnn(params, fwd_rnn, bwd_rnn, x_t):
"""Run an RNN encoder backwards and forwards over some time series data.
Arguments:
params: a dictionary of bidrectional RNN encoder parameters
fwd_rnn: function for running forward rnn encoding
bwd_rnn: function for running backward rnn encoding
x_t: np array data for RNN input with leading dim being time
Returns:
tuple of np array concatenated forward, backward encoding, and
np array of concatenation of [forward_enc(T), backward_enc(1)]
"""
fwd_enc_t = run_rnn(params['fwd_rnn'], fwd_rnn, x_t)
bwd_enc_t = np.flipud(run_rnn(params['bwd_rnn'], bwd_rnn, np.flipud(x_t)))
full_enc = np.concatenate([fwd_enc_t, bwd_enc_t], axis=1)
enc_ends = np.concatenate([bwd_enc_t[0], fwd_enc_t[-1]], axis=1)
return full_enc, enc_ends
def lfads_params(key, lfads_hps):
"""Instantiate random LFADS parameters.
Arguments:
key: random.PRNGKey for random bits
lfads_hps: a dict of LFADS hyperparameters
Returns:
a dictionary of LFADS parameters
"""
key, skeys = utils.keygen(key, 10)
data_dim = lfads_hps['data_dim']
ntimesteps = lfads_hps['ntimesteps']
enc_dim = lfads_hps['enc_dim']
con_dim = lfads_hps['con_dim']
ii_dim = lfads_hps['ii_dim']
gen_dim = lfads_hps['gen_dim']
factors_dim = lfads_hps['factors_dim']
ic_enc_params = {'fwd_rnn' : gru_params(next(skeys), enc_dim, data_dim),
'bwd_rnn' : gru_params(next(skeys), enc_dim, data_dim)}
gen_ic_params = affine_params(next(skeys), 2*gen_dim, 2*enc_dim) #m,v <- bi
ic_prior_params = dists.diagonal_gaussian_params(next(skeys), gen_dim, 0.0,
lfads_hps['ic_prior_var'])
con_params = gru_params(next(skeys), con_dim, 2*enc_dim + factors_dim)
con_out_params = affine_params(next(skeys), 2*ii_dim, con_dim) #m,v
ii_prior_params = dists.ar1_params(next(skeys), ii_dim,
lfads_hps['ar_mean'],
lfads_hps['ar_autocorrelation_tau'],
lfads_hps['ar_noise_variance'])
gen_params = gru_params(next(skeys), gen_dim, ii_dim)
factors_params = linear_params(next(skeys), factors_dim, gen_dim)
lograte_params = affine_params(next(skeys), data_dim, factors_dim)
return {'ic_enc' : ic_enc_params,
'gen_ic' : gen_ic_params, 'ic_prior' : ic_prior_params,
'con' : con_params, 'con_out' : con_out_params,
'ii_prior' : ii_prior_params,
'gen' : gen_params, 'factors' : factors_params,
'logrates' : lograte_params}
def lfads_encode(params, lfads_hps, key, x_t, keep_rate):
"""Run the LFADS network from input to generator initial condition vars.
Arguments:
params: a dictionary of LFADS parameters
lfads_hps: a dictionary of LFADS hyperparameters
key: random.PRNGKey for random bits
x_t: np array input for lfads with leading dimension being time
keep_rate: dropout keep rate
Returns:
3-tuple of np arrays: generator initial condition mean, log variance
and also bidirectional encoding of x_t, with leading dim being time
"""
key, skeys = utils.keygen(key, 3)
# Encode the input
x_t = run_dropout(x_t, next(skeys), keep_rate)
con_ins_t, gen_pre_ics = run_bidirectional_rnn(params['ic_enc'], gru, gru,
x_t)
# Push through to posterior mean and variance for initial conditions.
xenc_t = dropout(con_ins_t, next(skeys), keep_rate)
gen_pre_ics = dropout(gen_pre_ics, next(skeys), keep_rate)
ic_gauss_params = affine(params['gen_ic'], gen_pre_ics)
ic_mean, ic_logvar = np.split(ic_gauss_params, 2, axis=0)
return ic_mean, ic_logvar, xenc_t
def lfads_decode(params, lfads_hps, key, ic_mean, ic_logvar, xenc_t, keep_rate):
"""Run the LFADS network from latent variables to log rates.
Arguments:
params: a dictionary of LFADS parameters
lfads_hps: a dictionary of LFADS hyperparameters
key: random.PRNGKey for random bits
ic_mean: np array of generator initial condition mean
ic_logvar: np array of generator initial condition log variance
xenc_t: np array bidirectional encoding of input (x_t) with leading dim
being time
keep_rate: dropout keep rate
Returns:
7-tuple of np arrays all with leading dim being time,
controller hidden state, inferred input mean, inferred input log var,
generator hidden state, factors and log rates
"""
ntime = lfads_hps['ntimesteps']
key, skeys = utils.keygen(key, 1+2*ntime)
# Since the factors feed back to the controller,
# factors_{t-1} -> controller_t -> sample_t -> generator_t -> factors_t
# is really one big loop and therefor one RNN.
c = c0 = params['con']['h0']
g = g0 = dists.diag_gaussian_sample(next(skeys), ic_mean,
ic_logvar, lfads_hps['var_min'])
f = f0 = np.zeros((lfads_hps['factors_dim'],))
c_t = []
ii_mean_t = []
ii_logvar_t = []
ii_t = []
gen_t = []
factor_t = []
for xenc in xenc_t:
cin = np.concatenate([xenc, f], axis=0)
c = gru(params['con'], c, cin)
cout = affine(params['con_out'], c)
ii_mean, ii_logvar = np.split(cout, 2, axis=0) # inferred input params
ii = dists.diag_gaussian_sample(next(skeys), ii_mean,
ii_logvar, lfads_hps['var_min'])
g = gru(params['gen'], g, ii)
g = dropout(g, next(skeys), keep_rate)
f = normed_linear(params['factors'], g)
# Save everything.
c_t.append(c)
ii_t.append(ii)
gen_t.append(g)
ii_mean_t.append(ii_mean)
ii_logvar_t.append(ii_logvar)
factor_t.append(f)
c_t = np.array(c_t)
ii_t = np.array(ii_t)
gen_t = np.array(gen_t)
ii_mean_t = np.array(ii_mean_t)
ii_logvar_t = np.array(ii_logvar_t)
factor_t = np.array(factor_t)
lograte_t = batch_affine(params['logrates'], factor_t)
return c_t, ii_mean_t, ii_logvar_t, ii_t, gen_t, factor_t, lograte_t
def lfads(params, lfads_hps, key, x_t, keep_rate):
"""Run the LFADS network from input to output.
Arguments:
params: a dictionary of LFADS parameters
lfads_hps: a dictionary of LFADS hyperparameters
key: random.PRNGKey for random bits
x_t: np array of input with leading dim being time
keep_rate: dropout keep rate
Returns:
A dictionary of np arrays of all LFADS values of interest.
"""
key, skeys = utils.keygen(key, 2)
ic_mean, ic_logvar, xenc_t = \
lfads_encode(params, lfads_hps, next(skeys), x_t, keep_rate)
c_t, ii_mean_t, ii_logvar_t, ii_t, gen_t, factor_t, lograte_t = \
lfads_decode(params, lfads_hps, next(skeys), ic_mean, ic_logvar,
xenc_t, keep_rate)
# As this is tutorial code, we're passing everything around.
return {'xenc_t' : xenc_t, 'ic_mean' : ic_mean, 'ic_logvar' : ic_logvar,
'ii_t' : ii_t, 'c_t' : c_t, 'ii_mean_t' : ii_mean_t,
'ii_logvar_t' : ii_logvar_t, 'gen_t' : gen_t, 'factor_t' : factor_t,
'lograte_t' : lograte_t}
lfads_encode_jit = jit(lfads_encode)
lfads_decode_jit = jit(lfads_decode, static_argnums=(1,))
# Batching accomplished by vectorized mapping.
# We simultaneously map over random keys for forward-pass randomness
# and inputs for batching.
batch_lfads = vmap(lfads, in_axes=(None, None, 0, 0, None))
def lfads_losses(params, lfads_hps, key, x_bxt, kl_scale, keep_rate):
"""Compute the training loss of the LFADS autoencoder
Arguments:
params: a dictionary of LFADS parameters
lfads_hps: a dictionary of LFADS hyperparameters
key: random.PRNGKey for random bits
x_bxt: np array of input with leading dims being batch and time
keep_rate: dropout keep rate
kl_scale: scale on KL
Returns:
a dictionary of all losses, including the key 'total' used for optimization
"""
B = lfads_hps['batch_size']
key, skeys = utils.keygen(key, 2)
keys = random.split(next(skeys), B)
lfads = batch_lfads(params, lfads_hps, keys, x_bxt, keep_rate)
# Sum over time and state dims, average over batch.
# KL - g0
ic_post_mean_b = lfads['ic_mean']
ic_post_logvar_b = lfads['ic_logvar']
kl_loss_g0_b = dists.batch_kl_gauss_gauss(ic_post_mean_b, ic_post_logvar_b,
params['ic_prior'],
lfads_hps['var_min'])
kl_loss_g0_prescale = np.sum(kl_loss_g0_b) / B
kl_loss_g0 = kl_scale * kl_loss_g0_prescale
# KL - Inferred input
ii_post_mean_bxt = lfads['ii_mean_t']
ii_post_var_bxt = lfads['ii_logvar_t']
keys = random.split(next(skeys), B)
kl_loss_ii_b = dists.batch_kl_gauss_ar1(keys, ii_post_mean_bxt,
ii_post_var_bxt, params['ii_prior'],
lfads_hps['var_min'])
kl_loss_ii_prescale = np.sum(kl_loss_ii_b) / B
kl_loss_ii = kl_scale * kl_loss_ii_prescale
# Log-likelihood of data given latents.
lograte_bxt = lfads['lograte_t']
log_p_xgz = np.sum(dists.poisson_log_likelihood(x_bxt, lograte_bxt)) / B
# L2
l2reg = lfads_hps['l2reg']
flatten_lfads = lambda params: flatten_util.ravel_pytree(params)[0]
l2_loss = l2reg * np.sum(flatten_lfads(params)**2)
loss = -log_p_xgz + kl_loss_g0 + kl_loss_ii + l2_loss
all_losses = {'total' : loss, 'nlog_p_xgz' : -log_p_xgz,
'kl_g0' : kl_loss_g0, 'kl_g0_prescale' : kl_loss_g0_prescale,
'kl_ii' : kl_loss_ii, 'kl_ii_prescale' : kl_loss_ii_prescale,
'l2' : l2_loss}
return all_losses
def lfads_training_loss(params, lfads_hps, key, x_bxt, kl_scale, keep_rate):
"""Pull out the total loss for training.
Arguments:
params: a dictionary of LFADS parameters
lfads_hps: a dictionary of LFADS hyperparameters
key: random.PRNGKey for random bits
x_bxt: np array of input with leading dims being batch and time
keep_rate: dropout keep rate
kl_scale: scale on KL
Returns:
return the total loss for optimization
"""
losses = lfads_losses(params, lfads_hps, key, x_bxt, kl_scale, keep_rate)
return losses['total']
def posterior_sample_and_average(params, lfads_hps, key, x_txd):
"""Get the denoised lfad inferred values by posterior sample and average.
Arguments:
params: dictionary of lfads parameters
lfads_hps: dict of LFADS hyperparameters
key: JAX random state
x_txd: 2d np.array time by dim trial to denoise
Returns:
LFADS dictionary of inferred values, averaged over randomness.
"""
batch_size = lfads_hps['batch_size']
skeys = random.split(key, batch_size)
x_bxtxd = np.repeat(np.expand_dims(x_txd, axis=0), batch_size, axis=0)
lfads_dict = batch_lfads(params, lfads_hps, skeys, x_bxtxd, 1.0)
return utils.average_lfads_batch(lfads_dict)
### JIT
# JIT functions are orders of magnitude faster. The first time you use them,
# they will take a couple of minutes to compile, then the second time you use
# them, they will be blindingly fast.
# The static_argnums is telling JAX to ignore the lfads_hps dictionary,
# which means you'll have to pay attention if you change the params.
# How does one force a recompile?
batch_lfads_jit = jit(batch_lfads, static_argnums=(1,))
lfads_losses_jit = jit(lfads_losses, static_argnums=(1,))
lfads_training_loss_jit = jit(lfads_training_loss, static_argnums=(1,))
posterior_sample_and_average_jit = jit(posterior_sample_and_average, static_argnums=(1,))
|
the-stack_106_21554
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Ocvcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import hashlib
import inspect
import json
import logging
import os
import re
import time
import unittest
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from typing import Callable, Optional
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s OCV too low! (Should be %s OCV)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s OCV too high! (Should be %s OCV)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode: int, output: str, fun: Callable, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode: the process return code.
output: [a substring of] the process output.
fun: the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code: the error code returned by the RPC call (defined in src/rpc/protocol.h).
Set to None if checking the error code is not required.
message: [a substring of] the error string returned by the RPC call.
Set to None if checking the error string is not required.
fun: the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError("String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting OCV values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using the `wait_until()` members
from `OcvcoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
def sha256sum_file(filename):
h = hashlib.sha256()
with open(filename, 'rb') as f:
d = f.read(4096)
while len(d) > 0:
h.update(d)
d = f.read(4096)
return h.digest()
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = int(timeout)
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
write_config(os.path.join(datadir, "ocvcoin.conf"), n=n, chain=chain)
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def write_config(config_path, *, n, chain, extra_config=""):
# Translate chain subdirectory name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(config_path, 'w', encoding='utf8') as f:
if chain_name_conf_arg:
f.write("{}=1\n".format(chain_name_conf_arg))
if chain_name_conf_section:
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("fixedseeds=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("natpmp=0\n")
f.write("shrinkdebugfile=0\n")
# To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync
f.write("unsafesqlitesync=1\n")
f.write(extra_config)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "ocvcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "ocvcoin.conf")):
with open(os.path.join(datadir, "ocvcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs):
"""Build and send a transaction that spends the given inputs (specified
by lists of parent_txid:vout each), with the desired total value and fee,
equally divided up to the desired number of outputs.
Returns a tuple with the txid and the amount sent per output.
"""
send_value = satoshi_round((value - fee)/num_outputs)
inputs = []
for (txid, vout) in zip(parent_txids, vouts):
inputs.append({'txid' : txid, 'vout' : vout})
outputs = {}
for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs, 0, True)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output
return (txid, send_value)
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import tx_from_hex
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = tx_from_hex(rawtx)
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if addr == tx["vout"][i]["scriptPubKey"]["address"]:
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
# TODO: Change to pow(a, -1, n) available in Python 3.8
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class TestFrameworkUtil(unittest.TestCase):
def test_modinv(self):
test_vectors = [
[7, 11],
[11, 29],
[90, 13],
[1891, 3797],
[6003722857, 77695236973],
]
for a, n in test_vectors:
self.assertEqual(modinv(a, n), pow(a, n-2, n))
|
the-stack_106_21555
|
from django.shortcuts import render, redirect, get_object_or_404
from blog.models import Company, City, Meeting, UserNorm, PhoneCalls, Activity
from django.db.models import Q
from blog.forms import PostForm, PlanPhoneCallForm, ActivityForm
from django.contrib import messages
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from datetime import datetime, date, timedelta
from django.views.decorators.http import require_POST
def scheduler(request):
a_form = ActivityForm()
data = Activity.objects.all().order_by('date')
if request.method == "POST":
form = ActivityForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
print(instance)
instance.employee_id = request.user.id
instance.save()
context = {
'a_form': a_form,
'data': data,
}
return render(request, 'blog/scheduler.html', context)
def calendar(request):
today = datetime.now()
weekday = today.isoweekday()
start = datetime.now()-timedelta(days=weekday-1)
first_week = []
second_week = []
records_first_week = []
records_second_week = []
for i in range(0,14):
x = ""
x = Meeting.objects.filter(Q(leader__id=request.user.id) | Q(trader_1__id=request.user.id),
date__year=start.year,
date__month=start.month,
date__day=start.day).order_by('date')
if i < 7:
records_first_week.append(x)
else:
records_second_week.append(x)
start = start + timedelta(days=1)
start = datetime.now()-timedelta(days=weekday-1)
for i in range(0,14):
if i < 7:
first_week.append(start+timedelta(days=i))
records_first_week[i].dd = start+timedelta(days=i)
else:
second_week.append(start+timedelta(days=i))
# records_second_week[i].d = start+timedelta(days=i)
form = PostForm()
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
print("is valid")
# city = form.cleaned_data['company']
# print(city)
instance = form.save(commit=False)
instance.city = instance.company.city
instance.leader_id = request.user.id
instance.save()
return redirect("blog-calendar")
context = {
'first_week': first_week,
'second_week': second_week,
'records_first_week': records_first_week,
'records_second_week': records_second_week,
'today': datetime.now() ,
'cities': City.objects.all().order_by('name'),
'employee': User.objects.all().exclude(id = request.user.id).order_by('first_name'),
'form': form,
}
return render(request, 'blog/calendar.html', context)
# @require_POST
# def addTodo(request):
# todoform = PostForm(request.POST)
# print("here")
# if todoform.is_valid():
# new_todo = todoform.save()
# print("form saved")
# return redirect('blog-desktop')
def search(request):
if request.method == "GET":
# if query:
query = request.GET.get("q")
results_company = Company.objects.filter(name__contains=query).order_by('name')
results_person = User.objects.filter(Q(first_name__contains=query) | Q(last_name__contains=query))
print(results_company)
print(results_person)
context = {
'query': query,
'results_person': results_person,
'results_company': results_company,
}
return render(request, "blog/search_results.html", context)
def meeting_edit(request):
if request.method == "GET":
if 'd' in request.GET:
print("dddddddddddddd")
query = request.GET.get("d")
item = Meeting.objects.get(id=query)
print(item)
item.status = 1
item.save()
elif 'p' in request.GET:
pass
elif 'c' in request.GET:
query = request.GET.get("c")
item = Meeting.objects.get(id=query)
item.status = 2
item.save()
return redirect('blog-desktop')
def home(request):
return render(request, 'blog/home.html')
def mystats(request):
def color(pct):
if pct < 50:
return '-danger'
elif pct < 100:
return '-warning'
else:
return '-success'
title = "pulpit"
# All meetings for current user
m_all = Meeting.objects.filter(Q(leader__id=request.user.id) | Q(trader_1__id=request.user.id)).order_by('date')
m_planned = m_all.filter(status_id=1, date__gt=datetime.now())
m_done = m_all.filter(status_id=2)
# Monthly meetings
mm_done = m_all.filter(status_id=2, date__month=datetime.now().month)
mm_norm = UserNorm.objects.get(employee_id=request.user.id).meetings_month_norm
mm_pct = int(100*len(mm_done)/mm_norm)
m_to_acc = m_all.filter(status_id=1, date__lte=datetime.now())
# All phonecalls for current user
pc_all = PhoneCalls.objects.filter(employee_id=request.user.id)
# Monthly phonecalls
pcm_done = pc_all.filter(date__month=datetime.now().month)
pcm_norm = UserNorm.objects.get(employee_id=request.user.id).phonecalls_month_norm
pcm_pct = int(100*len(pcm_done)/pcm_norm)
# Weekly phonecalls
pcw_done = pc_all.filter(date__week=date.today().isocalendar()[1])
pcw_norm = UserNorm.objects.get(employee_id=request.user.id).phonecalls_week_norm
pcw_pct = int(100*len(pcw_done)/pcw_norm)
m_form = PostForm()
pc_form = PlanPhoneCallForm()
if request.method == "POST":
m_form = PostForm(request.POST)
pc_form = PlanPhoneCallForm(request.POST)
if m_form.is_valid():
instance = m_form.save(commit=False)
instance.city = instance.company.city
instance.leader_id = request.user.id
instance.status_id = 1
instance.save()
return redirect('blog-desktop')
if pc_form.is_valid():
print("ok")
instance = pc_form.save(commit=False)
instance.employee_id = request.user.id
instance.save()
return redirect('blog-desktop')
context = {
'title': title,
'm_planned': m_planned,
'm_to_acc': m_to_acc,
'm_done_last_5': m_done[:5],
'mm_done_len': len(mm_done),
'mm_norm': mm_norm,
'mm_pct': mm_pct,
'mm_color': color(mm_pct),
'pcw_norm': pcw_norm,
'pcw_done_len': len(pcw_done),
'pcw_pct': pcw_pct,
'pcw_color': color(pcw_pct),
'pcm_norm': pcm_norm,
'pcm_done_len': len(pcm_done),
'pcm_pct': pcm_pct,
'pcm_color': color(pcm_pct),
'm_form': m_form,
'pc_form': pc_form,
}
return render(request, 'blog/mystats.html', context)
def meetings(request):
if request.user.is_staff:
respond_meetings = Meeting.objects.all().order_by('-date')
else:
respond_meetings = Meeting.objects.filter(Q(leader__id=request.user.id) | Q(trader_1__id=request.user.id))
context = {
'meetings_all': respond_meetings,
'user_list': User.objects.all(),
}
return render(request, 'blog/meetings.html', context)
def phonecalls(request):
if request.user.is_staff:
respond = PhoneCalls.objects.all().order_by('-date')
else:
respond = PhoneCalls.objects.filter(employee=request.user.id).order_by('-date')
context = {
'phonecalls_all': respond,
'user_list': User.objects.all(),
}
return render(request, 'blog/phonecalls.html', context)
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
# username = form.cleaned_data.get('username')
# password = form.cleaned_data.get('password')
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"Zalogowano jako {username}")
return redirect("blog-home")
else:
messages.error(request, "Błędny login lub hasło")
else:
messages.error(request, "Błędny login lub hasło")
form = AuthenticationForm()
return render(request, "blog/login.html", {'form':form})
def logout_request(request):
logout(request)
messages.info(request, "Wylogowano!")
return redirect('blog-home')
def add_meeting(request):
context = {
'cities': City.objects.all().order_by('name'),
'employee': User.objects.all().exclude(id = request.user.id).order_by('first_name'),
}
return render(request, 'blog/add_meetings.html', context)
def add_phonecall(request):
pass
def client_detail(request, id=None):
if id:
client = get_object_or_404(Company, id=id)
phonecalls = PhoneCalls.objects.filter(company_id=id)
meetings = Meeting.objects.filter(company_id=id)
context = {
"name": client.name,
"city": client.city,
"phonecalls": phonecalls,
"meetings": meetings,
}
return render(request, "blog/client.html", context)
else:
context = {
"name": "no context",
"city": 1,
}
return render(request, "blog/client.html", context)
|
the-stack_106_21559
|
import py
from pypy.tool.bench.pypyresult import ResultDB, BenchResult
import pickle
def setup_module(mod):
mod.tmpdir = py.test.ensuretemp(__name__)
def gettestpickle(cache=[]):
if cache:
return cache[0]
pp = tmpdir.join("testpickle")
f = pp.open("wb")
pickle.dump({'./pypy-llvm-39474-O3-c_richards': 5}, f)
pickle.dump({'./pypy-llvm-39474-O3-c_richards': 42.0}, f)
f.close()
cache.append(pp)
return pp
def test_unpickle():
pp = gettestpickle()
db = ResultDB()
db.parsepickle(pp)
assert len(db.benchmarks) == 1
l = db.getbenchmarks(name="richards")
assert len(l) == 1
bench = l[0]
l = db.getbenchmarks(name="xyz")
assert not l
def test_BenchResult_cpython():
res = BenchResult("2.3.5_pystone", besttime=2.0, numruns=3)
assert res.executable == "cpython"
assert res.revision == "2.3.5"
assert res.name == "pystone"
assert res.numruns == 3
assert res.besttime == 2.0
def test_BenchResult_pypy():
res = BenchResult("pypy-llvm-39474-O3-c_richards",
besttime=2.0, numruns=3)
assert res.executable == "pypy-llvm-39474-O3-c"
assert res.revision == 39474
assert res.name == "richards"
assert res.numruns == 3
assert res.besttime == 2.0
|
the-stack_106_21560
|
#
# tests/test_http_request.py
#
import pytest
import growler
from unittest import mock
@pytest.fixture
def rt():
return growler.middleware.ResponseTime()
@pytest.fixture
def req():
return mock.MagicMock()
@pytest.fixture
def res():
m = mock.MagicMock()
m.headers = []
return m
def test_standard_responsetime_format(rt):
assert rt.format_timediff(4.2e-2) == '42.0'
def test_rounding_responsetime_format():
rt = growler.middleware.ResponseTime(digits=5)
assert rt.format_timediff(4.22384132e-2) == '42.23841'
def test_units_responsetime_format():
rt = growler.middleware.ResponseTime(digits=5, units='s')
assert rt.format_timediff(4.22384132e-2) == '0.04224'
def test_response(rt, req, res):
rt(req, res)
assert res.events.on.called
cb = res.events.on.call_args_list[0][0][1]
assert not res.set.called
cb()
assert res.set.called
assert res.set.call_args_list[0][0][0] == 'X-Response-Time'
assert res.set.call_args_list[0][0][1].endswith('ms')
def test_response_noclobber(rt, req, res):
res.headers = ['X-Response-Time']
rt.clobber_header = False
rt(req, res)
assert res.events.on.called
cb = res.events.on.call_args_list[0][0][1]
assert not res.set.called
cb()
assert not res.set.called
def test_response_clobber(rt, req, res):
res.headers = ['X-Response-Time']
rt.clobber_header = True
rt(req, res)
assert res.events.on.called
cb = res.events.on.call_args_list[0][0][1]
assert not res.set.called
cb()
assert res.set.called
def test_response_nosuffix(rt, req, res):
rt.suffix = False
rt.clobber_header = False
rt(req, res)
assert res.events.on.called
cb = res.events.on.call_args_list[0][0][1]
assert not res.set.called
cb()
assert res.set.called
assert not res.set.call_args_list[0][0][1].endswith('ms')
def test_response_set_header(req, res):
header = 'Fooo'
rt = growler.middleware.ResponseTime(header=header)
rt(req, res)
assert res.events.on.called
cb = res.events.on.call_args_list[0][0][1]
assert not res.set.called
cb()
assert res.set.called
assert res.set.call_args_list[0][0][0] == header
def test_response_log_out(req, res):
m = mock.MagicMock()
rt = growler.middleware.ResponseTime(log=m)
rt(req, res)
m.assert_not_called()
cb = res.events.on.call_args_list[0][0][1]
cb()
# print(m.mock_calls)
assert m.info.called
assert isinstance(m.info.call_args_list[0][0][0], str)
|
the-stack_106_21561
|
import scipy.misc
import random
xs = []
ys = []
#points to the end of the last batch
train_batch_pointer = 0
val_batch_pointer = 0
#read data.txt
with open("driving_dataset/data.txt") as f:
for line in f:
xs.append("driving_dataset/" + line.split()[0])
#the paper by Nvidia uses the inverse of the turning radius,
#but steering wheel angle is proportional to the inverse of turning radius
#so the steering wheel angle in radians is used as the output
ys.append(float(line.split()[1]) * scipy.pi / 180)
#get number of images
num_images = len(xs)
#shuffle list of images
c = list(zip(xs, ys))
random.shuffle(c)
xs, ys = zip(*c)
train_xs = xs[:int(len(xs) * 0.8)]
train_ys = ys[:int(len(xs) * 0.8)]
val_xs = xs[-int(len(xs) * 0.2):]
val_ys = ys[-int(len(xs) * 0.2):]
num_train_images = len(train_xs)
num_val_images = len(val_xs)
def LoadTrainBatch(batch_size):
global train_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(scipy.misc.imresize(scipy.misc.imread(train_xs[(train_batch_pointer + i) % num_train_images])[-150:], [66, 200]) / 255.0)
y_out.append([train_ys[(train_batch_pointer + i) % num_train_images]])
train_batch_pointer += batch_size
return x_out, y_out
def LoadValBatch(batch_size):
global val_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(scipy.misc.imresize(scipy.misc.imread(val_xs[(val_batch_pointer + i) % num_val_images])[-150:], [66, 200]) / 255.0)
y_out.append([val_ys[(val_batch_pointer + i) % num_val_images]])
val_batch_pointer += batch_size
return x_out, y_out
|
the-stack_106_21562
|
#!/usr/bin/env python
# -*- no-plot -*-
"""
Calculate Mandelbrot set using OpenCL
"""
import pyopencl as cl
from timeit import default_timer as timer
import numpy as np
import gr
platform = cl.get_platforms()
gpu_devices = platform[0].get_devices(device_type=cl.device_type.GPU)
info_value = gpu_devices[0].get_info(getattr(cl.device_info, 'EXTENSIONS'))
if not 'cl_khr_fp64' in info_value:
print("GPU has no support for double floating-point precision")
exit(-1)
ctx = cl.Context(devices=gpu_devices)
queue = cl.CommandQueue(ctx)
prg = cl.Program(ctx, """
#pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable
__kernel void mandelbrot(__global double2 *q, __global ushort *output,
double const min_x, double const max_x,
double const min_y, double const max_y,
ushort const width, ushort const height,
ushort const iters)
{
int ci = 0, inc = 1;
int gid = get_global_id(0);
double nreal, real = 0;
double imag = 0;
q[gid].x = min_x + (gid % width) * (max_x - min_x) / width;
q[gid].y = min_y + (gid / width) * (max_y - min_y) / height;
output[gid] = iters;
for (int curiter = 0; curiter < iters; curiter++) {
nreal = real * real - imag * imag + q[gid].x;
imag = 2 * real * imag + q[gid].y;
real = nreal;
if (real * real + imag * imag >= 4) {
output[gid] = ci;
return;
}
ci += inc;
if (ci == 0 || ci == 255)
inc = -inc;
}
}
""").build()
def calc_fractal(q, min_x, max_x, min_y, max_y, width, height, iters):
global ctx, queue, prg
output = np.empty(q.shape, dtype=np.uint16)
mf = cl.mem_flags
q_opencl = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=q)
output_opencl = cl.Buffer(ctx, mf.WRITE_ONLY, output.nbytes)
prg.mandelbrot(queue, output.shape, None, q_opencl, output_opencl,
np.double(min_x), np.double(max_x),
np.double(min_y), np.double(max_y),
np.uint16(width), np.uint16(height), np.uint16(iters))
cl.enqueue_copy(queue, output, output_opencl).wait()
return output
def create_fractal(min_x, max_x, min_y, max_y, width, height, iters):
q = np.zeros(width * height).astype(np.complex128)
output = calc_fractal(q, min_x, max_x, min_y, max_y, width, height, iters)
return output
x = -0.9223327810370947027656057193752719757635
y = 0.3102598350874576432708737495917724836010
f = 0.5
for i in range(200):
start = timer()
pixels = create_fractal(x-f, x+f, y-f, y+f, 500, 500, 400)
dt = timer() - start
print("Mandelbrot created in %f s" % dt)
ca = 1000.0 + pixels.ravel()
gr.clearws()
gr.setviewport(0, 1, 0, 1)
gr.setcolormap(13)
gr.cellarray(0, 1, 0, 1, 500, 500, ca)
gr.updatews()
f *= 0.9
|
the-stack_106_21563
|
from random import random
import numpy as np
from math import sqrt
class Blob(object):
"""Blob is the creature that is evolving in this simulation"""
def __init__(self, x, y, speed, size, energy, sense, name='000'):
self.speed = speed
self.size = size
self.energy = energy
self.sense = sense
self.food = 0
self.eps_mutation = 0.3
self.x = x
self.y = y
self.safe = False
self.dead = False
self.can_clone = False
self.is_moving = True
self.nothing_todo = False
self.name = name
self.heritage = {'speed':speed,'size':size,'energy':energy,'sense':sense}
def move(self, map):
if self.energy >= self.energy_cost():
if self.food < 2:
if self.sense_food(map):
print('sense food hmmm')
food_positions = list(filter(self.caneat_food,map.get_foods_positions(self)))
blob_positions = list(filter(self.caneat_food,map.get_blobs_positions(self)))
blob_position = None
food_position = None
if blob_positions : blob_position = self.choice(blob_positions)
if food_positions : food_position = self.choice(food_positions)
self.decrease_energy(self.energy_cost())
if blob_position:
print('\n\t\t\t eating a friend of mine\n')
self.eat_blob(map,blob_position)
elif food_position:
self.eat_food(map,food_position)
else:
print('discovering')
self.discover(map)
else:
print('going home')
self.go_home(map)
else:
self.is_moving = False
self.nothing_todo = True
def go_home(self, map):
distance_home = min([
self.distance((0,self.y)),
self.distance((map.h,self.y)),
self.distance((self.x,0)),
self.distance((0,map.w))
])
if self.cango_home(distance_home):
self.safe = True
self.x = 0
if self.food >= 2: self.can_clone = True
self.is_moving = False
self.nothing_todo = True
else:
self.is_moving = False
self.nothing_todo = True
self.dead = True
def discover(self, map):
for s in range(self.speed):
if self.energy >= self.energy_cost():
direction = np.random.randint(8)
self.move_in_direction(direction, map)
self.decrease_energy(self.energy_cost())
self.move(map)
else:
self.dead = True
break
def move_in_direction(self, direction, map):
if direction == 0:
self.x += 1
elif direction == 1:
self.x -= 1
elif direction == 2:
self.y -= 1
elif direction == 3:
self.y += 1
elif direction == 4:
self.x -= 1
self.y -= 1
elif direction == 5:
self.x -= 1
self.y += 1
elif direction == 6:
self.x += 1
self.y -= 1
elif direction == 7:
self.x += 1
self.y += 1
if self.x < 0 : self.x = 0
if self.x >= map.h : self.x = map.h - 1
if self.y < 0 : self.y = 0
if self.y >= map.w : self.y = map.w - 1
def cango_home(self, distance):
while self.energy >= self.energy_cost() and distance > 0:
distance -= 1
self.decrease_energy(self.energy_cost())
if distance <= 0:
return True
else:
return False
def go_to(self, position):
if type(position) != tuple or len(position) != 2:
raise TypeError('position must be a tuple of two elements')
if type(position[0]) != int or type(position[1]) != int:
raise TypeError('elements of the tuple must be integers')
if position[0] < 0 or position[1] < 0:
raise ValueError('The positions can\'t be negativ')
self.x = position[0]
self.y = position[1]
def eat_food(self, map, position):
self.food += 1
self.x = position[0]
self.y = position[1]
map.remove_food(position)
def eat_blob(self, map, position):
self.food += 2
self.x = position[0]
self.y = position[1]
map.kill_blob(position)
def caneat_food(self, p):
return self.senseable(p[0],p[1])
def senseable(self, x, y):
if type(x) != int or type(y) != int:
raise TypeError('the 2nd and 3rd argment must be integers')
if x < 0 or y < 0:
raise ValueError('The positions can\'t be negativ')
max_x = self.x + self.sense
min_x = self.x - self.sense
max_y = self.y + self.sense
min_y = self.y - self.sense
if x in range(min_x,max_x+1) and y in range(min_y,max_y+1):
return True
return False
def decrease_energy(self, energy_cost):
self.energy -= energy_cost
def clone(self):
if self.food < 2:
return None
speed = self.speed
if random() < self.eps_mutation:
speed = self.mutation(self.speed)
size = self.size
if random() < self.eps_mutation:
size = self.mutation(self.size)
sense = self.sense
if random() < self.eps_mutation:
sense = self.mutation(self.sense)
return Blob(self.x, self.y, speed, size, self.heritage['energy'], sense, name=self.name+'1')
def is_possible(self, p):
distance = self.distance(p)
if distance <= self.move_formula():
return True
else:
return False
def sense_food(self, map):
max_x = self.x + self.sense
min_x = self.x - self.sense
max_y = self.y + self.sense
min_y = self.y - self.sense
for x in range(min_x,max_x+1):
for y in range(min_y, max_y+1):
if (x > 0 and x < 10) and (y > 0 and y < 10):
if map.food_board[x,y] != None or map.blob_position(x,y):
return True
return False
def sense_positions(self,map):
max_x = self.x + self.sense
min_x = self.x - self.sense
max_y = self.y + self.sense
min_y = self.y - self.sense
return [ (x, y) for x in range(min_x,max_x+1) for y in range(min_y, max_y+1) if x >= 0 and y >= 0]
@staticmethod
def mutation(trait):
new_trait = trait
if random() > 0.5:
new_trait += np.random.randint(trait + 1)
else:
new_trait -= np.random.randint(trait + 1)
if new_trait == 0: return 1
return new_trait
distance = lambda self, p: sqrt((self.x - p[0])**2+(self.y - p[1])**2)
energy_cost = lambda self: 2 * self.speed + self.size**2 + self.sense
def reset(self):
self.energy = self.heritage['energy']
self.speed = self.heritage['speed']
self.sense = self.heritage['sense']
self.size = self.heritage['size']
self.food = 0
self.is_moving = True
self.safe = True
self.dead = False
self.nothing_todo = False
def die(self):
self.nothing_todo = True
self.dead = True
self.safe = False
self.is_moving = False
@staticmethod
def choice(tuple_list):
index = np.random.randint(0,len(tuple_list))
return tuple_list[index]
def __str__(self):
return f'blob{self.name} : {(self.x, self.y)}, food : {self.food}, speed {self.speed}, energy {self.energy}, size {self.size}, sense {self.sense} energy_cost : {self.energy_cost()}'
|
the-stack_106_21565
|
import json
import os
from flask import Blueprint
from flask import current_app
from flask import redirect
from flask import render_template
from flask import url_for
from flask_login import login_required
from modules.box__default.settings.helpers import get_setting
from modules.box__default.settings.helpers import set_setting
from shopyo.api.file import get_folders
# from flask import flash
# from flask import request
# from shopyo.api.html import notify_success
# from init import db
# from modules.box__default.settings.models import Settings
# from shopyo.api.forms import flash_errors
dirpath = os.path.dirname(os.path.abspath(__file__))
module_info = {}
with open(dirpath + "/info.json") as f:
module_info = json.load(f)
globals()["{}_blueprint".format(module_info["module_name"])] = Blueprint(
"{}".format(module_info["module_name"]),
__name__,
template_folder="templates",
url_prefix=module_info["url_prefix"],
)
module_settings = {"module_info": module_info}
module_blueprint = globals()["{}_blueprint".format(module_info["module_name"])]
@module_blueprint.route("/")
@login_required
def index():
context = {}
front_themes_path = os.path.join(
current_app.config["BASE_DIR"], "static", "themes", "front"
)
all_front_info = {}
front_theme_folders = get_folders(front_themes_path)
for folder in front_theme_folders:
theme_path = os.path.join(front_themes_path, folder)
info_path = os.path.join(theme_path, "info.json")
with open(info_path) as f:
all_front_info[folder] = json.load(f)
back_themes_path = os.path.join(
current_app.config["BASE_DIR"], "static", "themes", "back"
)
all_back_info = {}
back_theme_folders = get_folders(back_themes_path)
for folder in back_theme_folders:
theme_path = os.path.join(back_themes_path, folder)
info_path = os.path.join(theme_path, "info.json")
with open(info_path) as f:
all_back_info[folder] = json.load(f)
active_front_theme = get_setting("ACTIVE_FRONT_THEME")
active_back_theme = get_setting("ACTIVE_BACK_THEME")
context.update(
{
"all_front_info": all_front_info,
"all_back_info": all_back_info,
"active_front_theme": active_front_theme,
"active_back_theme": active_back_theme,
}
)
context.update(module_settings)
return render_template(
"{}/index.html".format(module_info["module_name"]), **context
)
@module_blueprint.route("/activate/front/<theme_name>")
@login_required
def activate_front_theme(theme_name):
set_setting("ACTIVE_FRONT_THEME", theme_name)
# with app.app_context():
# current_app.jinja_loader,
# print(current_app.jinja_loader.list_templates())
return redirect(url_for("{}.index".format(module_info["module_name"])))
@module_blueprint.route("/activate/back/<theme_name>")
@login_required
def activate_back_theme(theme_name):
set_setting("ACTIVE_BACK_THEME", theme_name)
# with app.app_context():
# current_app.jinja_loader,
# print(current_app.jinja_loader.list_templates())
return redirect(url_for("{}.index".format(module_info["module_name"])))
|
the-stack_106_21567
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import simplejson as json
import re
from functools import wraps
import pgadmin.browser.server_groups.servers as servers
from flask import render_template, request, jsonify, current_app
from flask_babel import gettext
from pgadmin.browser.collection import CollectionNodeModule, PGChildModule
from pgadmin.browser.server_groups.servers.utils import parse_priv_from_db, \
parse_priv_to_db
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone, bad_request
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
"""
This module is responsible for generating two nodes
1) Schema
2) Catalog
We have created single file because catalog & schema has same
functionality, the only difference is we cannot perform DDL/DML operations
on catalog, also - it allows us to share the same submodules for both
catalog, and schema modules.
This modules uses separate template paths for each respective node
- templates/catalog for Catalog node
- templates/schema for Schema node
[Each path contains node specific js files as well as sql template files.]
"""
class SchemaModule(CollectionNodeModule):
"""
class SchemaModule(CollectionNodeModule)
A module class for Schema node derived from CollectionNodeModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the Schema and it's base module.
* get_nodes(gid, sid, did)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
* script_load()
- Load the module script for schema, when any of the server node is
initialized.
"""
NODE_TYPE = 'schema'
COLLECTION_LABEL = gettext("Schemas")
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the SchemaModule and it's base module.
Args:
*args:
**kwargs:
"""
self.min_ver = None
self.max_ver = None
super(SchemaModule, self).__init__(*args, **kwargs)
def get_nodes(self, gid, sid, did):
"""
Generate the collection node
"""
yield self.generate_browser_collection_node(did)
@property
def script_load(self):
"""
Load the module script for schema, when any of the server node is
initialized.
"""
return servers.ServerModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
class CatalogModule(SchemaModule):
"""
class CatalogModule(SchemaModule)
A module class for the catalog schema node derived from SchemaModule.
"""
NODE_TYPE = 'catalog'
COLLECTION_LABEL = gettext("Catalogs")
schema_blueprint = SchemaModule(__name__)
catalog_blueprint = CatalogModule(__name__)
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to instance of the method.
Assumptions:
This function will always be used as decorator of a class method.
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
kwargs['sid']
)
if not self.manager:
return gone(errormsg="Could not find the server.")
self.conn = self.manager.connection(did=kwargs['did'])
# Set the template path for the SQL scripts
if self.manager.server_type == 'gpdb':
_temp = self.gpdb_template_path(self.manager.version)
elif self.manager.server_type == 'ppas':
_temp = self.ppas_template_path(self.manager.version)
else:
_temp = self.pg_template_path(self.manager.version)
self.template_path = self.template_initial + '/' + _temp
return f(*args, **kwargs)
return wrap
class SchemaView(PGChildNodeView):
"""
This class is responsible for generating routes for schema node.
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the SchemaView and it's base view.
* list()
- This function is used to list all the schema nodes within the
collection.
* nodes()
- This function will used to create all the child node within the
collection, Here it will create all the schema node.
* properties(gid, sid, did, scid)
- This function will show the properties of the selected schema node.
* create(gid, sid, did, scid)
- This function will create the new schema object.
* update(gid, sid, did, scid)
- This function will update the data for the selected schema node.
* delete(self, gid, sid, scid):
- This function will drop the schema object
* msql(gid, sid, did, scid)
- This function is used to return modified SQL for the selected schema
node.
* get_sql(data, scid)
- This function will generate sql from model data
* sql(gid, sid, did, scid):
- This function will generate sql to show it in sql pane for the schema
node.
* dependency(gid, sid, did, scid):
- This function will generate dependency list show it in dependency
pane for the selected schema node.
* dependent(gid, sid, did, scid):
- This function will generate dependent list to show it in dependent
pane for the selected schema node.
"""
node_type = schema_blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'}
]
ids = [
{'type': 'int', 'id': 'scid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'children': [{'get': 'children'}],
'nodes': [{'get': 'nodes'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'delete': [{'delete': 'delete'}]
})
def __init__(self, *args, **kwargs):
"""
Initialize the variables used by methods of SchemaView.
"""
super(SchemaView, self).__init__(*args, **kwargs)
self.manager = None
self.conn = None
self.template_path = None
self.template_initial = 'schema'
@staticmethod
def ppas_template_path(ver):
"""
Returns the template path for PPAS servers.
"""
return 'ppas/#{0}#'.format(ver)
@staticmethod
def pg_template_path(ver):
"""
Returns the template path for PostgreSQL servers.
"""
return 'pg/#{0}#'.format(ver)
@staticmethod
def gpdb_template_path(ver):
"""
Returns the template path for GreenPlum servers.
"""
return '#gpdb#{0}#'.format(ver)
def format_request_acls(self, data, modified=False, specific=None):
acls = {}
try:
acls = render_template(
"/".join([self.template_path, 'allowed_privs.json'])
)
acls = json.loads(acls, encoding='utf-8')
except Exception as e:
current_app.logger.exception(e)
# Privileges
for aclcol in acls:
if specific is not None:
if aclcol not in specific:
continue
if aclcol in data:
allowedacl = acls[aclcol]
if modified:
for modifier in ['added', 'changed', 'deleted']:
if modifier in data[aclcol]:
data[aclcol][modifier] = parse_priv_to_db(
data[aclcol][modifier], allowedacl['acl']
)
else:
data[aclcol] = parse_priv_to_db(data[aclcol], allowedacl['acl'])
return acls
@staticmethod
def formatdbacl(acl):
"""
Args:
acl: Privileges from ACL query
Returns:
Formatted output required for client side parsing
"""
# Reset any data for that acl if its already present in result set
data = dict()
for row in acl['rows']:
priv = parse_priv_from_db(row)
if row['deftype'] in data:
data[row['deftype']].append(priv)
else:
data[row['deftype']] = [priv]
return data
def _formatter_no_defacl(self, data, scid=None):
"""
Args:
data: Result of properties query
scid: Schema OID
Returns:
It will return formatted output of collections like
security lables, privileges
"""
# Need to format security labels according to client js collection
seclabels = []
if 'seclabels' in data and data['seclabels'] is not None:
for sec in data['seclabels']:
sec = re.search(r'([^=]+)=(.*$)', sec)
seclabels.append({
'provider': sec.group(1),
'label': sec.group(2)
})
data['seclabels'] = seclabels
# We need to parse & convert ACL coming from database to json format
SQL = render_template(
"/".join([self.template_path, 'sql/acl.sql']),
_=gettext,
scid=scid
)
status, acl = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=acl)
data.update(self.formatdbacl(acl))
return data
def _formatter(self, data, scid=None):
self._formatter_no_defacl(data, scid)
# We need to parse & convert DEFAULT ACL coming from
# database to json format
SQL = render_template(
"/".join([self.template_path, 'sql/defacl.sql']),
_=gettext,
scid=scid
)
status, defacl = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=defacl)
data.update(self.formatdbacl(defacl))
return data
@check_precondition
def list(self, gid, sid, did):
"""
This function is used to list all the schema nodes within the collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
Returns:
JSON of available schema nodes
"""
SQL = render_template(
"/".join([self.template_path, 'sql/properties.sql']),
_=gettext,
show_sysobj=self.blueprint.show_system_objects
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid=None):
"""
This function will create all the child nodes within the collection
Here it will create all the schema node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
Returns:
JSON of available schema child nodes
"""
res = []
SQL = render_template(
"/".join([self.template_path, 'sql/nodes.sql']),
show_sysobj=self.blueprint.show_system_objects,
_=gettext,
scid=scid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
icon = 'icon-{0}'.format(self.node_type)
if scid is not None:
if len(rset['rows']) == 0:
return gone(gettext("""
Could not find the schema in the database.
It may have been removed by another user.
"""))
row = rset['rows'][0]
return make_json_response(
data=self.blueprint.generate_browser_node(
row['oid'],
did,
row['name'],
icon=icon,
can_create=row['can_create'],
has_usage=row['has_usage']
),
status=200
)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
did,
row['name'],
icon=icon,
can_create=row['can_create'],
has_usage=row['has_usage']
)
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def node(self, gid, sid, did, scid):
"""
This function will fetch the properties of the schema node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
Returns:
JSON of given schema child node
"""
SQL = render_template(
"/".join([self.template_path, 'sql/nodes.sql']),
show_sysobj=self.blueprint.show_system_objects,
_=gettext,
scid=scid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
if scid is not None:
if len(rset['rows']) == 0:
return gone(gettext("""
Could not find the schema in the database.
It may have been removed by another user.
"""))
icon = 'icon-{0}'.format(self.node_type)
for row in rset['rows']:
return make_json_response(
data=self.blueprint.generate_browser_node(
row['oid'],
did,
row['name'],
icon=icon,
can_create=row['can_create'],
has_usage=row['has_usage']
),
status=200
)
@check_precondition
def properties(self, gid, sid, did, scid):
"""
This function will show the properties of the selected schema node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
Returns:
JSON of selected schema node
"""
SQL = render_template(
"/".join([self.template_path, 'sql/properties.sql']),
scid=scid,
_=gettext,
show_sysobj=self.blueprint.show_system_objects
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("Could not find the schema in the database. It may have been removed by another user."
))
# Making copy of output for future use
copy_data = dict(res['rows'][0])
copy_data = self._formatter(copy_data, scid)
return ajax_response(
response=copy_data,
status=200
)
@check_precondition
def create(self, gid, sid, did):
"""
This function will create a schema object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
required_args = {
'name': 'Name'
}
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
"Could not find the required parameter (%s)." %
required_args[arg]
)
)
try:
self.format_request_acls(data)
SQL = render_template(
"/".join([self.template_path, 'sql/create.sql']),
data=data, conn=self.conn, _=gettext
)
status, res = self.conn.execute_scalar(SQL)
if not status:
return make_json_response(
status=410,
success=0,
errormsg=res + '\n' +
'Operation failed while running create statement'
)
# we need oid to to add object in tree at browser,
# below sql will gives the same
SQL = render_template(
"/".join([self.template_path, 'sql/oid.sql']),
schema=data['name'], _=gettext
)
status, scid = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=scid)
icon = 'icon-{0}'.format(self.node_type)
return jsonify(
node=self.blueprint.generate_browser_node(
scid,
did,
data['name'],
icon=icon
)
)
except Exception as e:
current_app.logger.exception(e)
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid):
"""
This function will update an existing schema object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
SQL, name = self.get_sql(gid, sid, data, scid)
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return jsonify(
node=self.blueprint.generate_browser_node(
scid,
did,
name,
icon="icon-%s" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, did, scid):
"""
This function will delete an existing schema object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
"""
try:
# Get name for schema from did
SQL = render_template(
"/".join([self.template_path, 'sql/get_name.sql']),
_=gettext,
scid=scid
)
status, name = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=name)
if name is None:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified schema could not be found.\n'
)
)
# drop schema
SQL = render_template(
"/".join([self.template_path, 'sql/delete.sql']),
_=gettext, name=name, conn=self.conn,
cascade=True if self.cmd == 'delete' else False
)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Schema dropped"),
data={
'id': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
current_app.logger.exception(e)
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, did, scid=None):
"""
This function will generate modified sql for schema object based on
the input from the user. This route is used by the SQL tab in the
edit/create dialog.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID (When working with existing schema node)
"""
data = dict()
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
try:
SQL, name = self.get_sql(gid, sid, data, scid)
if SQL and SQL.strip('\n') and SQL.strip(' '):
return make_json_response(
data=SQL.strip('\n'),
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid=None):
"""
This function will generate sql from model data received from client
"""
if scid is not None:
SQL = render_template(
"/".join([self.template_path, 'sql/properties.sql']),
_=gettext, scid=scid,
show_sysobj=self.blueprint.show_system_objects
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
old_data = res['rows'][0]
# old_data contains all the existing data for requested schema
old_data = self._formatter(old_data, scid)
# if name is not present in request data then use old name
if 'name' not in data:
data['name'] = old_data['name']
# Privileges and Default privileges
self.format_request_acls(data, True)
SQL = render_template(
"/".join([self.template_path, 'sql/update.sql']),
_=gettext, data=data, o_data=old_data, conn=self.conn
)
return SQL, data['name'] if 'name' in data else old_data['nam']
else:
required_args = ['name']
for arg in required_args:
if arg not in data:
return " -- " + gettext("Definition incomplete.")
# Privileges
self.format_request_acls(data)
SQL = render_template(
"/".join([self.template_path, 'sql/create.sql']),
data=data, conn=self.conn, _=gettext
)
return SQL, data['name']
@check_precondition
def sql(self, gid, sid, did, scid):
"""
This function will generate reverse engineered sql for the schema
object.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
"""
SQL = render_template(
"/".join([self.template_path, 'sql/properties.sql']),
scid=scid, _=gettext
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the schema in the database. It may have been removed by another user."""))
data = res['rows'][0]
data = self._formatter(data, scid)
# Privileges and Default privileges
self.format_request_acls(data)
# Render sql from create & alter sql using properties & acl data
SQL = ''
SQL = render_template(
"/".join([self.template_path, 'sql/create.sql']),
_=gettext, data=data, conn=self.conn
)
sql_header = u"-- SCHEMA: {0}\n\n-- ".format(data['name'])
# drop schema
sql_header += render_template(
"/".join([self.template_path, 'sql/delete.sql']),
_=gettext, name=data['name'], conn=self.conn, cascade=False)
SQL = sql_header + '\n\n' + SQL
return ajax_response(response=SQL.strip("\n"))
@check_precondition
def dependents(self, gid, sid, did, scid):
"""
This function gets the dependencies and returns an ajax response.
for the schema node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
"""
dependents_result = self.get_dependents(self.conn, scid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid):
"""
This function get the dependencies and return ajax response
for the schema node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
"""
dependencies_result = self.get_dependencies(self.conn, scid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def children(self, **kwargs):
"""Build a list of treeview nodes from the child nodes."""
SQL = render_template(
"/".join([self.template_path, 'sql/is_catalog.sql']),
scid=kwargs['scid'], _=gettext
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""
Could not find the schema in the database.
It may have been removed by another user.
"""))
data = res['rows'][0]
backend_support_keywords = kwargs.copy()
backend_support_keywords['is_catalog'] = data['is_catalog']
backend_support_keywords['db_support'] = data['db_support']
backend_support_keywords['schema_name'] = data['schema_name']
nodes = []
for module in self.blueprint.submodules:
if isinstance(module, PGChildModule):
if self.manager is not None and \
module.BackendSupported(
self.manager, **backend_support_keywords
):
nodes.extend(module.get_nodes(**kwargs))
else:
nodes.extend(module.get_nodes(**kwargs))
return make_json_response(data=nodes)
class CatalogView(SchemaView):
"""
This class is responsible for generating routes for catalog schema node.
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the CatalogView and it's base view.
* create(gid, sid, did, scid)
- Raise an error - we cannot create a catalog.
* update(gid, sid, did, scid)
- This function will update the data for the selected catalog node
* delete(self, gid, sid, scid):
- Raise an error - we cannot delete a catalog.
* get_sql(data, scid)
- This function will generate sql from model data
"""
node_type = catalog_blueprint.node_type
def __init__(self, *args, **kwargs):
"""
Initialize the variables used by methods of SchemaView.
"""
super(CatalogView, self).__init__(*args, **kwargs)
self.template_initial = 'catalog'
def _formatter(self, data, scid=None):
"""
Overriding _formatter, because - we won't show the Default
privileges with the catalog schema.
"""
self._formatter_no_defacl(data, scid)
return data
def get_sql(self, gid, sid, data, scid=None):
"""
This function will generate sql from model data
"""
if scid is None:
return bad_request('Cannot create a catalog schema!')
return super(CatalogView, self).get_sql(gid, sid, data, scid)
@check_precondition
def sql(self, gid, sid, did, scid):
"""
This function will generate reverse engineered sql for schema object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
"""
SQL = render_template(
"/".join([self.template_path, 'sql/properties.sql']),
scid=scid, _=gettext
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""
Could not find the schema in the database.
It may have been removed by another user.
"""))
old_data = res['rows'][0]
old_data = self._formatter(old_data, scid)
# Privileges
self.format_request_acls(old_data, specific=['nspacl'])
# Render sql from create & alter sql using properties & acl data
SQL = ''
SQL = render_template(
"/".join([self.template_path, 'sql/create.sql']),
_=gettext, data=old_data, conn=self.conn
)
sql_header = u"""
-- CATALOG: {0}
-- DROP SCHEMA {0};(
""".format(old_data['name'])
SQL = sql_header + SQL
return ajax_response(response=SQL.strip("\n"))
SchemaView.register_node_view(schema_blueprint)
CatalogView.register_node_view(catalog_blueprint)
|
the-stack_106_21568
|
#!/usr/bin/env python
import os
import re
import fire
pre_release_placeholder = 'SNAPSHOT'
version_filepath = os.path.join('.', 'VERSION.txt')
version_pattern = re.compile(fr'^\d+.\d+.\d+(-{pre_release_placeholder})?$')
def get(with_pre_release_placeholder: bool = False):
with open(version_filepath, 'r') as version_file:
version_lines = version_file.readlines()
assert len(version_lines) == 1, 'Version file is malformed'
version = version_lines[0]
assert version_pattern.match(version), 'Version string is malformed'
if with_pre_release_placeholder:
return version
else:
return version.replace(f'-{pre_release_placeholder}', '')
def write_version_file(major: int, minor: int, patch: int):
version = f'{major}.{minor}.{patch}-{pre_release_placeholder}'
with open(version_filepath, 'w') as version_file:
version_file.write(version)
def inc_patch():
version = get()
major, minor, patch = version.split('.')
write_version_file(major, minor, int(patch) + 1)
def inc_minor():
version = get()
major, minor, patch = version.split('.')
write_version_file(major, int(minor) + 1, patch)
def inc_major():
version = get()
major, minor, patch = version.split('.')
write_version_file(int(major) + 1, minor, patch)
if __name__ == "__main__":
fire.Fire({
'get': get,
'inc-patch': inc_patch,
'inc-minor': inc_minor,
'inc-major': inc_major
})
|
the-stack_106_21571
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bandit related tensor spec utilities."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
import copy
from typing import Optional, Tuple
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import trajectory as traj
from tf_agents.typing import types
GLOBAL_FEATURE_KEY = 'global'
PER_ARM_FEATURE_KEY = 'per_arm'
NUM_ACTIONS_FEATURE_KEY = 'num_actions'
# For constrained optimization, the reward spec is expected to be a dictionary
# with the following keys that split the reward spec and the constraints spec.
REWARD_SPEC_KEY = 'reward'
CONSTRAINTS_SPEC_KEY = 'constraint'
def create_per_arm_observation_spec(
global_dim: int,
per_arm_dim: int,
max_num_actions: Optional[int] = None,
add_num_actions_feature: bool = False) -> types.NestedTensorSpec:
"""Creates an observation spec with per-arm features and possibly action mask.
Args:
global_dim: (int) The global feature dimension.
per_arm_dim: (int) The per-arm feature dimension.
max_num_actions: If specified (int), this is the maximum number of actions
in any sample, and the num_actions dimension of the per-arm features
will be set to this number. The actual number of actions for a given
sample can be lower than this parameter: it can be specified via the
NUM_ACTIONS_FEATURE_KEY, or an action mask.
add_num_actions_feature: (bool) whether to use the `num_actions` feature key
to encode the number of actions per sample.
Returns:
A nested structure of observation spec.
"""
global_obs_spec = tensor_spec.TensorSpec((global_dim,), tf.float32)
arm_obs_spec = tensor_spec.TensorSpec((max_num_actions, per_arm_dim),
tf.float32)
observation_spec = {GLOBAL_FEATURE_KEY: global_obs_spec,
PER_ARM_FEATURE_KEY: arm_obs_spec}
if add_num_actions_feature:
observation_spec.update({
NUM_ACTIONS_FEATURE_KEY:
tensor_spec.BoundedTensorSpec((),
minimum=1,
maximum=max_num_actions,
dtype=tf.int32)
})
return observation_spec
def get_context_dims_from_spec(
context_spec: types.NestedTensorSpec,
accepts_per_arm_features: bool) -> Tuple[int, int]:
"""Returns the global and per-arm context dimensions.
If the policy accepts per-arm features, this function returns the tuple of
the global and per-arm context dimension. Otherwise, it returns the (global)
context dim and zero.
Args:
context_spec: A nest of tensor specs, containing the observation spec.
accepts_per_arm_features: (bool) Whether the context_spec is for a policy
that accepts per-arm features.
Returns: A 2-tuple of ints, the global and per-arm context dimension. If the
policy does not accept per-arm features, the per-arm context dim is 0.
"""
if accepts_per_arm_features:
global_context_dim = context_spec[GLOBAL_FEATURE_KEY].shape.as_list()[0]
arm_context_dim = context_spec[PER_ARM_FEATURE_KEY].shape.as_list()[1]
else:
assert hasattr(context_spec, 'shape')
spec_shape = context_spec.shape.as_list()
global_context_dim = spec_shape[0] if spec_shape else 1
arm_context_dim = 0
return global_context_dim, arm_context_dim
def drop_arm_observation(
trajectory: traj.Trajectory) -> traj.Trajectory:
"""Drops the per-arm observation from a given trajectory (or trajectory spec)."""
transformed_trajectory = copy.deepcopy(trajectory)
del transformed_trajectory.observation[PER_ARM_FEATURE_KEY]
return transformed_trajectory
|
the-stack_106_21572
|
from ..check import Check
_REQUIRED_FIELDS = set(['description'])
_OPTIONAL_FIELDS = set([
'author', 'es5id', 'es6id', 'esid', 'features', 'flags', 'includes',
'info', 'locale', 'negative', 'timeout'
])
_VALID_FIELDS = _REQUIRED_FIELDS | _OPTIONAL_FIELDS
class CheckFrontmatter(Check):
'''Ensure tests have the expected YAML-formatted metadata.'''
ID = 'FRONTMATTER'
def run(self, name, meta, source):
if '_FIXTURE' in name:
if meta is not None:
return '"Fixture" files cannot specify metadata'
return
if meta is None:
return 'No valid YAML-formatted frontmatter'
fields = set(meta.keys())
missing = _REQUIRED_FIELDS - fields
if len(missing) > 0:
return 'Required fields missing: %s' % ', '.join(list(missing))
unrecognized = fields - _VALID_FIELDS
if len(unrecognized) > 0:
return 'Unrecognized fields: %s' % ', '.join(list(unrecognized))
|
the-stack_106_21573
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
LOG = log.getLogger(__name__)
CONF = cfg.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
plugin_name = driver.method
else:
plugin_name = plugin
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if plugin_name in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[plugin_name] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('identity_api', 'resource_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref, unscoped)
# project scope: (None, project_id, None, None)
# domain scope: (domain_id, None, None, None)
# trust scope: (None, None, trust_ref, None)
# unscoped: (None, None, None, 'unscoped')
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.resource_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.resource_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.resource_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'unscoped' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, OS-TRUST:trust or unscoped',
target='scope')
if 'unscoped' in self.auth['scope']:
self._scope_data = (None, None, None, 'unscoped')
return
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None, None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if trust_ref.get('project_id') is not None:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref, None)
else:
self._scope_data = (None, None, trust_ref, None)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref, unscoped).
If scope to a project, (None, project_id, None, None)
will be returned.
If scoped to a domain, (domain_id, None, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref, None),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None, 'unscoped') will be
returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None,
unscoped=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust, unscoped)
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
token_audit_id = auth_context.get('audit_id')
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog,
parent_audit_id=token_audit_id)
# NOTE(wanghong): We consume a trust use only when we are using
# trusts and have successfully issued a token.
if trust:
self.trust_api.consume_use(trust['id'])
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
if unscoped is not None:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.resource_api.get_project(
default_project_id)
default_project_domain_ref = self.resource_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _LW("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _LW("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _LW("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
if context['environment'].get('REMOTE_USER'):
try:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
pass
except exception.Unauthorized:
# If external fails then continue and attempt to determine
# user identity using remaining auth methods
pass
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
def _combine_lists_uniquely(self, a, b):
# it's most likely that only one of these will be filled so avoid
# the combination if possible.
if a and b:
return dict((x['id'], x) for x in a + b).values()
else:
return a or b
@controller.protected()
def get_auth_projects(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.ProjectV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_domains(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.DomainV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_catalog(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
project_id = auth_context.get('project_id')
if not project_id:
raise exception.Forbidden(
_('A project-scoped token is required to produce a service '
'catalog.'))
# The V3Controller base methods mostly assume that you're returning
# either a collection or a single element from a collection, neither of
# which apply to the catalog. Because this is a special case, this
# re-implements a tiny bit of work done by the base controller (such as
# self-referential link building) to avoid overriding or refactoring
# several private methods.
return {
'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
'links': {'self': self.base_url(context, path='auth/catalog')}
}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
|
the-stack_106_21574
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib import losses
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.session_bundle import exporter
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.training import training
# TODO(zakaria): add functions that creates a head and returns ModelOpFn
def _regression_head(label_name=None,
weight_column_name=None,
target_dimension=1,
enable_centered_bias=False, head_name=None):
"""Creates a _Head for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
target_dimension: dimension of the target for multilabels.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
Returns:
An instance of _Head
"""
return _RegressionHead(train_loss_fn=_mean_squared_loss,
eval_loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
target_dimension=target_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
# TODO(zakaria): Add logistic_regression_head
def _multi_class_head(n_classes, label_name=None, weight_column_name=None,
enable_centered_bias=False, head_name=None,
thresholds=None):
"""Creates a _Head for multi class single label classification.
The Head uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of _MultiClassHead.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
else:
loss_fn = _softmax_cross_entropy_loss
return _MultiClassHead(train_loss_fn=loss_fn,
eval_loss_fn=loss_fn,
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def _binary_svm_head(label_name=None, weight_column_name=None,
enable_centered_bias=False, head_name=None,
thresholds=None,):
"""Creates a _TargetColumn for binary classification with SVMs.
The head uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of _TargetColumn.
"""
return _BinarySvmHead(label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def _multi_label_head(n_classes, label_name=None, weight_column_name=None,
enable_centered_bias=False, head_name=None,
thresholds=None):
"""Creates a _Head for multi label classification.
The Head uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of _MultiClassHead.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
return _MultiLabelHead(n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
# TODO(zakaria): Make the classes public once we are ready for users to subclass
# them.
class _Head(object):
"""Interface for the head/top of a model.
Given logits or output of a hidden layer, a Head knows how to compute
predictions, loss, default metric and export signature.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
raise NotImplementedError("Calling an abstract method.")
def head_ops(self, features, target, mode, train_op_fn, logits=None,
logits_input=None):
"""Returns ops for a model_fn.
Args:
features: input dict.
target: target dict or tensor.
mode: estimator's ModeKeys
train_op_fn: function that takes a scalar loss and returns an op to
optimize with the loss.
logits: logits to be used for the head.
logits_input: tensor to build logits from.
Returns:
`estimator.ModelFnOps`
Raises:
ValueError: if mode is not recognized.
"""
_check_logits_input_not_supported(logits, logits_input)
if mode == estimator.ModeKeys.TRAIN:
loss, additional_train_op = self._training_loss(features, target,
logits, logits_input)
train_op = train_op_fn(loss)
if additional_train_op:
if train_op:
train_op = control_flow_ops.group(train_op, *additional_train_op)
else:
train_op = control_flow_ops.group(*additional_train_op)
return estimator.ModelFnOps(None, loss, train_op,
self._default_metric(),
self._create_signature_fn(), mode)
if mode == estimator.ModeKeys.INFER:
predictions = self._infer_op(logits, logits_input)
return estimator.ModelFnOps(predictions, None, None,
self._default_metric(),
self._create_signature_fn(), mode)
if mode == estimator.ModeKeys.EVAL:
predictions, loss = self._eval_op(features, target, logits, logits_input)
return estimator.ModelFnOps(predictions, loss, None,
self._default_metric(),
self._create_signature_fn(), mode)
raise ValueError("mode=%s unrecognized" % str(mode))
@abc.abstractmethod
def _training_loss(self, features, target, logits=None, logits_input=None,
name="training_loss"):
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def _infer_op(self, logits=None, logits_input=None, name="infer_op"):
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def _eval_op(self, features, target, logits=None, logits_input=None,
name="eval_op"):
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def _default_metric(self):
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def _create_signature_fn(self):
"""Creates signature function for the Head.
"""
raise NotImplementedError("Calling an abstract method.")
class _RegressionHead(_Head):
"""_Head for regression."""
def __init__(self, train_loss_fn, eval_loss_fn, label_name,
weight_column_name, target_dimension, enable_centered_bias,
head_name):
"""Base type for all single heads.
Args:
train_loss_fn: loss_fn for training.
eval_loss_fn: loss_fn for eval.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
target_dimension: Integer, number of label columns.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
"""
self._train_loss_fn = train_loss_fn
self._eval_loss_fn = eval_loss_fn
self._logits_dimension = target_dimension
self._label_name = label_name
self._weight_column_name = weight_column_name
self._head_name = head_name
self._enable_centered_bias = enable_centered_bias
self._centered_bias_weight_collection = _head_prefixed(head_name,
"centered_bias")
@property
def logits_dimension(self):
return self._logits_dimension
def _training_loss(self, features, target, logits=None,
logits_input=None, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
features: features dict.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
logits: logits, a float tensor.
logits_input: Output of last hidden layer.
name: Op name.
Returns:
A tuple of training Loss and additional_train_op (possibly None)
"""
target = _check_target(target, self._label_name)
centered_bias_step = None
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
centered_bias_step = [_centered_bias_step(
self.logits_dimension,
self._centered_bias_weight_collection,
target,
self._train_loss_fn)]
loss_unweighted = self._train_loss_fn(logits, target)
loss, weighted_average_loss = _loss(
loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
logging_ops.scalar_summary(_head_prefixed(self._head_name, "loss"),
weighted_average_loss)
return loss, centered_bias_step
def _eval_op(self, features, target, logits=None, logits_input=None,
name="eval_op"):
target = _check_target(target, self._label_name)
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
loss_unweighted = self._eval_loss_fn(logits, target)
loss, _ = _loss(loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
predictions = self._logits_to_prediction(logits)
return predictions, loss
def _infer_op(self, logits=None, logits_input=None):
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
return self._logits_to_prediction(logits)
def _logits_to_prediction(self, logits=None):
predictions = {}
if self.logits_dimension == 1:
predictions[PredictionKey.SCORES] = array_ops.squeeze(
logits, squeeze_dims=[1])
else:
predictions[PredictionKey.SCORES] = logits
return predictions
# pylint: disable=undefined-variable
def _create_signature_fn(self):
def _regression_signature_fn(examples, unused_features, predictions):
if isinstance(predictions, dict):
score = predictions[PredictionKey.SCORES]
else:
score = predictions
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=score)
# TODO(zakaria): add validation
return default_signature, {}
return _regression_signature_fn
def _default_metric(self):
return {_head_prefixed(self._head_name, MetricKey.LOSS):
_weighted_average_loss_metric_spec(self._eval_loss_fn,
PredictionKey.SCORES,
self._label_name,
self._weight_column_name)}
class _MultiClassHead(_Head):
"""_Head for classification."""
def __init__(self, train_loss_fn, eval_loss_fn, n_classes, label_name,
weight_column_name, enable_centered_bias, head_name,
thresholds=None):
"""Base type for all single heads.
Args:
train_loss_fn: loss_fn for training.
eval_loss_fn: loss_fn for eval.
n_classes: number of classes.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval.
Raises:
ValueError: if n_classes is invalid.
"""
if n_classes < 2:
raise ValueError("n_classes must be >= 2")
self._thresholds = thresholds if thresholds else [.5]
self._train_loss_fn = train_loss_fn
self._eval_loss_fn = eval_loss_fn
self._logits_dimension = 1 if n_classes == 2 else n_classes
self._label_name = label_name
self._weight_column_name = weight_column_name
self._head_name = head_name
self._enable_centered_bias = enable_centered_bias
self._centered_bias_weight_collection = _head_prefixed(head_name,
"centered_bias")
@property
def logits_dimension(self):
return self._logits_dimension
def _training_loss(self, features, target, logits=None,
logits_input=None, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
features: features dict.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
logits: logits, a float tensor.
logits_input: Output of last hidden layer.
name: Op name.
Returns:
A tuple of training Loss and additional_train_op (possibly None)
"""
target = _check_target(target, self._label_name)
centered_bias_step = None
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
centered_bias_step = [_centered_bias_step(
self.logits_dimension,
self._centered_bias_weight_collection,
target,
self._train_loss_fn)]
loss_unweighted = self._train_loss_fn(logits, target)
loss, weighted_average_loss = _loss(
loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
logging_ops.scalar_summary(_head_prefixed(self._head_name, "loss"),
weighted_average_loss)
return loss, centered_bias_step
def _eval_op(self, features, target, logits=None, logits_input=None,
name="eval_op"):
target = _check_target(target, self._label_name)
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
loss_unweighted = self._eval_loss_fn(logits, target)
loss, _ = _loss(loss_unweighted,
_weight_tensor(features, self._weight_column_name),
name=name)
predictions = self._logits_to_prediction(logits)
return predictions, loss
def _infer_op(self, logits=None, logits_input=None):
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
self.logits_dimension,
self._centered_bias_weight_collection))
return self._logits_to_prediction(logits)
def _logits_to_prediction(self, logits=None):
predictions = {PredictionKey.LOGITS: logits}
if self.logits_dimension == 1:
predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[PredictionKey.PROBABILITIES] = nn.softmax(logits)
predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)
return predictions
def _create_signature_fn(self):
"""See superclass."""
def _classification_signature_fn(examples, unused_features, predictions):
"""Servo signature function."""
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
input_tensor=examples,
classes_tensor=predictions[PredictionKey.CLASSES],
scores_tensor=predictions[PredictionKey.PROBABILITIES])
else:
default_signature = exporter.classification_signature(
input_tensor=examples,
scores_tensor=predictions)
# TODO(zakaria): add validation
return default_signature, {}
return _classification_signature_fn
def _default_metric(self):
metrics = {_head_prefixed(self._head_name, MetricKey.LOSS):
_weighted_average_loss_metric_spec(self._eval_loss_fn,
PredictionKey.LOGITS,
self._label_name,
self._weight_column_name)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_head_prefixed(self._head_name, MetricKey.ACCURACY)] = (
metric_spec.MetricSpec(metrics_lib.streaming_accuracy,
PredictionKey.CLASSES, self._label_name,
self._weight_column_name))
if self.logits_dimension == 1:
def _add_binary_metric(metric_key, metric_fn):
metrics[_head_prefixed(self._head_name, metric_key)] = (
metric_spec.MetricSpec(metric_fn,
PredictionKey.LOGISTIC,
self._label_name,
self._weight_column_name))
_add_binary_metric(MetricKey.PREDICTION_MEAN, _predictions_streaming_mean)
_add_binary_metric(MetricKey.TARGET_MEAN, _target_streaming_mean)
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
_add_binary_metric(MetricKey.ACCURACY_BASELINE, _target_streaming_mean)
_add_binary_metric(MetricKey.AUC, _streaming_auc)
for threshold in self._thresholds:
_add_binary_metric(MetricKey.ACCURACY_MEAN % threshold,
_accuracy_at_threshold(threshold))
# Precision for positive examples.
_add_binary_metric(MetricKey.PRECISION_MEAN % threshold,
_streaming_at_threshold(
metrics_lib.streaming_precision_at_thresholds,
threshold),)
# Recall for positive examples.
_add_binary_metric(MetricKey.RECALL_MEAN % threshold,
_streaming_at_threshold(
metrics_lib.streaming_recall_at_thresholds,
threshold))
return metrics
def _check_target(target, label_name):
target = target[label_name] if isinstance(target, dict) else target
if isinstance(target, ops.SparseTensor):
raise ValueError("SparseTensor is not supported as a target/label.")
return target
class _BinarySvmHead(_MultiClassHead):
"""_Head for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name, enable_centered_bias,
head_name, thresholds):
def loss_fn(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
return losses.hinge_loss(logits, target)
super(_BinarySvmHead, self).__init__(
train_loss_fn=loss_fn,
eval_loss_fn=loss_fn,
n_classes=2,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def _logits_to_prediction(self, logits=None):
predictions = {}
predictions[PredictionKey.LOGITS] = logits
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)
return predictions
def _default_metric(self):
metrics = {_head_prefixed(self._head_name, MetricKey.LOSS):
_weighted_average_loss_metric_spec(self._eval_loss_fn,
PredictionKey.LOGITS,
self._label_name,
self._weight_column_name)}
metrics[_head_prefixed(self._head_name, MetricKey.ACCURACY)] = (
metric_spec.MetricSpec(metrics_lib.streaming_accuracy,
PredictionKey.CLASSES, self._label_name,
self._weight_column_name))
# TODO(sibyl-vie3Poto): add more metrics relevant for svms.
return metrics
class _MultiLabelHead(_MultiClassHead):
"""_Head for multlabel classification."""
# TODO(zakaria): add signature and metric for multilabel.
def __init__(self, n_classes, label_name,
weight_column_name, enable_centered_bias, head_name,
thresholds):
super(_MultiLabelHead, self).__init__(
train_loss_fn=_sigmoid_cross_entropy_loss,
eval_loss_fn=_sigmoid_cross_entropy_loss,
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def _logits_to_prediction(self, logits=None):
predictions = {PredictionKey.LOGITS: logits}
if self.logits_dimension == 1:
predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[PredictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
predictions[PredictionKey.CLASSES] = math_ops.to_int64(
math_ops.greater(logits, 0))
return predictions
def _weighted_loss(loss, weight):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.mul(unweighted_loss,
array_ops.reshape(
weight, shape=(-1,)))
return weighted_loss
def _weight_tensor(features, weight_column_name):
if not weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[weight_column_name]),
shape=(-1,))
def _loss(loss_unweighted, weight, name):
"""Returns loss."""
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name)
return loss, loss
else:
loss_weighted = _weighted_loss(loss_unweighted, weight)
weighted_average_loss = math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
loss = math_ops.reduce_mean(loss_weighted, name=name)
return loss, weighted_average_loss
def _check_logits_input_not_supported(logits, logits_input):
if logits_input is not None or logits is None:
raise NotImplementedError("logits_input is not supported yet, "
"must pass logits")
def _centered_bias(logits_dimension, weight_collection):
"""Creates and returns centered bias."""
centered_bias = variables.Variable(
array_ops.zeros([logits_dimension]),
collections=[weight_collection, ops.GraphKeys.VARIABLES],
name="centered_bias_weight")
logging_ops.scalar_summary(
["centered_bias_%d" % cb for cb in range(logits_dimension)],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(logits_dimension, weight_collection, target,
train_loss_fn):
"""Creates and returns training op for centered bias."""
centered_bias = ops.get_collection(weight_collection)
batch_size = array_ops.shape(target)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, logits_dimension])
with ops.name_scope(None, "centered_bias", (target, logits)):
centered_bias_loss = math_ops.reduce_mean(
train_loss_fn(logits, target), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=centered_bias)
def _head_prefixed(head_name, val):
return "%s_%s" % (head_name, val) if head_name else val
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=[1])
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _log_loss_with_two_classes(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(logits,
math_ops.to_float(target))
return loss_vec
def _softmax_cross_entropy_loss(logits, target):
# Check that we got integer for classification.
if not target.dtype.is_integer:
raise ValueError("Target's dtype should be integer "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target)
return loss_vec
def _sigmoid_cross_entropy_loss(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] target.
return nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target))
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
def _weighted_average_loss_metric_spec(loss_fn, predictoin_key,
label_key, weight_key):
def _streaming_weighted_average_loss(predictions, target, weights=None):
loss_unweighted = loss_fn(predictions, target)
if weights is not None:
weights = math_ops.to_float(weights)
_, weighted_average_loss = _loss(loss_unweighted,
weights,
name="eval_loss")
return metrics_lib.streaming_mean(weighted_average_loss)
return metric_spec.MetricSpec(_streaming_weighted_average_loss,
predictoin_key, label_key, weight_key)
def _target_streaming_mean(unused_predictions, target, weights=None):
return metrics_lib.streaming_mean(target, weights=weights)
def _predictions_streaming_mean(predictions, unused_target, weights=None):
return metrics_lib.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, target, weights=None):
return metrics_lib.streaming_auc(predictions, target,
weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, target, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
labels=target,
weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, target, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions, labels=target, thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
return _streaming_metrics
class PredictionKey(object):
CLASSES = "classes"
PROBABILITIES = "probabilities"
LOGITS = "logits"
LOGISTIC = "logistic"
SCORES = "scores"
class MetricKey(object):
LOSS = "loss"
AUC = "auc"
PREDICTION_MEAN = "labels/prediction_mean"
TARGET_MEAN = "labels/actual_target_mean"
ACCURACY = "accuracy"
ACCURACY_BASELINE = "accuracy/baseline_target_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
|
the-stack_106_21575
|
#!/usr/bin/python3
import pygame
import os
from Target import *
class Tracks(pygame.sprite.Sprite, Target):
def __init__(self,x,y):
pygame.sprite.Sprite.__init__(self)
Target.__init__(self, False,'tracks')
self.image= pygame.image.load(os.path.join('Images','tracks.png')).convert_alpha()
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def display(self, display):
display.blit(self.image, (self.rect.x, self.rect.y))
|
the-stack_106_21576
|
class CollectionFinder:
def __init__(self, tokens, intents, filters_intent):
self.tokens = tokens
self.intents = intents
self.filters_intent = filters_intent
self.score = 0
def collection(self):
query_collection = None
# Format user query
collection = next(
(intent["name"] for intent in self.intents if intent["type"] == "collection"), None)
# if collection is None then find collection from filters
if collection is not None:
query_collection = collection
# Calculate confidence
self.score += 100
else:
# Filter collections based on frequency
filter_collections = {}
# find filters collections and their frequencies
for filter_intent in self.filters_intent:
# if collection not exist add it otherwise increase the feq. number
if filter_intent["collection"] not in filter_collections:
filter_collections[filter_intent["collection"]] = 1
else:
filter_collections[filter_intent["collection"]] += 1
# Find collection with high frequency
try:
query_collection = max(
filter_collections, key=filter_collections.get)
# Calculate confidence
self.score += 10
except ValueError:
query_collection = None
# Check if still collection is None
if query_collection is None:
# if token is only one e.g (2:1) or (2-1)
# then consider Quran as a collection
if len(self.tokens) == 1:
if ":" in self.tokens[0] or "-" in self.tokens[0]:
query_collection = "quran"
# Calculate confidence
self.score += 10
return query_collection
|
the-stack_106_21577
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# Based on MS-OXMSG protocol specification
# ref: https://blogs.msdn.microsoft.com/openspecification/2010/06/20/msg-file-format-rights-managed-email-message-part-2/
# ref: https://msdn.microsoft.com/en-us/library/cc463912(v=EXCHG.80).aspx
import email
import json
import os
import re
from struct import unpack
from olefile import OleFileIO, isOleFile
from .data_models import DataModel
from .email_builder import EmailFormatter
from .properties.ms_props_id_map import PROPS_ID_MAP
TOP_LEVEL_HEADER_SIZE = 32
RECIPIENT_HEADER_SIZE = 8
ATTACHMENT_HEADER_SIZE = 8
EMBEDDED_MSG_HEADER_SIZE = 24
CONTROL_CHARS = re.compile(r'[\n\r\t]')
class Message(object):
"""
Class to store Message properties
"""
def __init__(self, directory_entries):
self._streams = self._process_directory_entries(directory_entries)
self._data_model = DataModel()
self._nested_attachments_depth = 0
self.properties = self._get_properties()
self.attachments = self._get_attachments()
self.recipients = self._get_recipients()
def as_dict(self):
"""
returns message attributes as a python dictionary.
:return: dict
"""
message_dict = {
"attachments": self.attachments,
"recipients": self.recipients
}
message_dict.update(self.properties)
return message_dict
def _set_property_stream_info(self, ole_file, header_size):
property_dir_entry = ole_file.openstream('__properties_version1.0')
version_stream_data = property_dir_entry.read()
if not version_stream_data:
raise Exception("Invalid MSG file provided, 'properties_version1.0' stream data is empty.")
if version_stream_data:
if header_size >= EMBEDDED_MSG_HEADER_SIZE:
properties_metadata = unpack('8sIIII', version_stream_data[:24])
if not properties_metadata or not len(properties_metadata) >= 5:
raise Exception("'properties_version1.0' stream data is corrupted.")
self.next_recipient_id = properties_metadata[1]
self.next_attachment_id = properties_metadata[2]
self.recipient_count = properties_metadata[3]
self.attachment_count = properties_metadata[4]
if (len(version_stream_data) - header_size) % 16 != 0:
raise Exception('Property Stream size less header is not exactly divisible by 16')
self.property_entries_count = (len(version_stream_data) - header_size) / 16
@staticmethod
def _process_directory_entries(directory_entries):
streams = {
"properties": {},
"recipients": {},
"attachments": {}
}
for name, stream in directory_entries.items():
# collect properties
if "__substg1.0_" in name:
streams["properties"][name] = stream
# collect attachments
elif "__attach_" in name:
streams["attachments"][name] = stream.kids
# collect recipients
elif "__recip_" in name:
streams["recipients"][name] = stream.kids
# unknown stream name
else:
continue
return streams
def _get_properties(self):
directory_entries = self._streams.get("properties")
directory_name_filter = "__substg1.0_"
property_entries = {}
for directory_name, directory_entry in directory_entries.items():
if directory_name_filter not in directory_name:
continue
if not directory_entry:
continue
if isinstance(directory_entry, list):
directory_values = {}
for property_entry in directory_entry:
property_data = self._get_property_data(directory_name, property_entry, is_list=True)
if property_data:
directory_values.update(property_data)
property_entries[directory_name] = directory_values
else:
property_data = self._get_property_data(directory_name, directory_entry)
if property_data:
property_entries.update(property_data)
return property_entries
def _get_recipients(self):
directory_entries = self._streams.get("recipients")
directory_name_filter = "__recip_version1.0_"
recipient_entries = {}
for directory_name, directory_entry in directory_entries.items():
if directory_name_filter not in directory_name:
continue
if not directory_entry:
continue
if isinstance(directory_entry, list):
directory_values = {}
for property_entry in directory_entry:
property_data = self._get_property_data(directory_name, property_entry, is_list=True)
if property_data:
directory_values.update(property_data)
recipient_address = directory_values.get(
'EmailAddress', directory_values.get('SmtpAddress', directory_name)
)
recipient_entries[recipient_address] = directory_values
else:
property_data = self._get_property_data(directory_name, directory_entry)
if property_data:
recipient_entries.update(property_data)
return recipient_entries
def _get_attachments(self):
directory_entries = self._streams.get("attachments")
directory_name_filter = "__attach_version1.0_"
attachment_entries = {}
for directory_name, directory_entry in directory_entries.items():
if directory_name_filter not in directory_name:
continue
if not directory_entry:
continue
if isinstance(directory_entry, list):
directory_values = {}
for property_entry in directory_entry:
kids = property_entry.kids
if kids:
embedded_message = Message(property_entry.kids_dict)
directory_values["EmbeddedMessage"] = {
"properties": embedded_message.properties,
"recipients": embedded_message.recipients,
"attachments": embedded_message.attachments
}
property_data = self._get_property_data(directory_name, property_entry, is_list=True)
if property_data:
directory_values.update(property_data)
attachment_entries[directory_name] = directory_values
else:
property_data = self._get_property_data(directory_name, directory_entry)
if property_data:
attachment_entries.update(property_data)
return attachment_entries
def _get_property_data(self, directory_name, directory_entry, is_list=False):
directory_entry_name = directory_entry.name
if is_list:
stream_name = [directory_name, directory_entry_name]
else:
stream_name = [directory_entry_name]
ole_file = directory_entry.olefile
property_details = self._get_canonical_property_name(directory_entry_name)
if not property_details:
return None
property_name = property_details.get("name")
property_type = property_details.get("data_type")
if not property_type:
return None
try:
raw_content = ole_file.openstream(stream_name).read()
except IOError:
raw_content = None
property_value = self._data_model.get_value(raw_content, data_type=property_type)
if property_value:
property_detail = {property_name: property_value}
else:
property_detail = None
return property_detail
@staticmethod
def _get_canonical_property_name(dir_entry_name):
if not dir_entry_name:
return None
if "__substg1.0_" in dir_entry_name:
name = dir_entry_name.replace("__substg1.0_", "")
prop_name_id = "0x" + name[0:4]
prop_details = PROPS_ID_MAP.get(prop_name_id)
return prop_details
return None
def __repr__(self):
return u'Message [%s]' % self.properties.get('InternetMessageId', self.properties.get("Subject"))
class Recipient(object):
"""
class to store recipient attributes
"""
def __init__(self, recipients_properties):
self.AddressType = recipients_properties.get("AddressType")
self.Account = recipients_properties.get("Account")
self.EmailAddress = recipients_properties.get("SmtpAddress")
self.DisplayName = recipients_properties.get("DisplayName")
self.ObjectType = recipients_properties.get("ObjectType")
self.RecipientType = recipients_properties.get("RecipientType")
def __repr__(self):
return '%s (%s)' % (self.DisplayName, self.EmailAddress)
class Attachment(object):
"""
class to store attachment attributes
"""
def __init__(self, attachment_properties):
self.DisplayName = attachment_properties.get("DisplayName")
self.AttachEncoding = attachment_properties.get("AttachEncoding")
self.AttachContentId = attachment_properties.get("AttachContentId")
self.AttachMethod = attachment_properties.get("AttachMethod")
self.AttachmentSize = format_size(attachment_properties.get("AttachmentSize"))
self.AttachFilename = attachment_properties.get("AttachFilename")
self.AttachLongFilename = attachment_properties.get("AttachLongFilename")
if self.AttachLongFilename:
self.Filename = self.AttachLongFilename
else:
self.Filename = self.AttachFilename
if self.Filename:
self.Filename = os.path.basename(self.Filename)
else:
self.Filename = '[NoFilename_Method%s]' % self.AttachMethod
self.data = attachment_properties.get("AttachDataObject")
self.AttachMimeTag = attachment_properties.get("AttachMimeTag", "application/octet-stream")
self.AttachExtension = attachment_properties.get("AttachExtension")
def __repr__(self):
return '%s (%s / %s)' % (self.Filename, self.AttachmentSize, len(self.data or []))
class MsOxMessage(object):
"""
Base class for Microsoft Message Object
"""
def __init__(self, msg_file_path):
self.msg_file_path = msg_file_path
self.include_attachment_data = False
if not self.is_valid_msg_file():
raise Exception("Invalid file provided, please provide valid Microsoft’s Outlook MSG file.")
with OleFileIO(msg_file_path) as ole_file:
# process directory entries
ole_root = ole_file.root
kids_dict = ole_root.kids_dict
self._message = Message(kids_dict)
self._message_dict = self._message.as_dict()
# process msg properties
self._set_properties()
# process msg recipients
self._set_recipients()
# process attachments
self._set_attachments()
def get_properties(self):
properties = {}
for key, value in self._message_dict.items():
if key == "attachments" and value:
properties["attachments"] = self.attachments
elif key == "recipients" and value:
properties["recipients"] = self.recipients
else:
properties[key] = value
return properties
def get_properties_as_dict(self):
return self._message
def get_message_as_json(self):
try:
if not self.include_attachment_data:
for _, attachment in self._message_dict.get("attachments", []).items():
if not isinstance(attachment, dict):
continue
attachment["AttachDataObject"] = {}
json_string = json.dumps(
self._message_dict, skipkeys=True,
ensure_ascii=False, encoding="latin-1",
indent=4)
return json_string
except ValueError:
return None
def get_email_mime_content(self):
email_obj = EmailFormatter(self)
return email_obj.build_email()
def save_email_file(self, file_path):
email_obj = EmailFormatter(self)
email_obj.save_file(file_path)
return True
def _set_properties(self):
property_values = self._message.properties
# setting generally required properties to easily access using MsOxMessage instance.
self.subject = property_values.get("Subject")
header = property_values.get("TransportMessageHeaders")
self.header = parse_email_headers(header, True)
self.header_dict = parse_email_headers(header) or {}
self.created_date = property_values.get("CreationTime")
self.received_date = property_values.get("ReceiptTime")
sent_date = property_values.get("DeliverTime")
if not sent_date:
sent_date = self.header_dict.get("Date")
self.sent_date = sent_date
sender_address = self.header_dict.get("From")
if not sender_address:
sender_address = property_values.get("SenderRepresentingSmtpAddress")
self.sender = sender_address
reply_to_address = self.header_dict.get("Reply-To")
if not reply_to_address:
reply_to_address = property_values.get("ReplyRecipientNames")
self.reply_to = reply_to_address
self.message_id = property_values.get("InternetMessageId")
to_address = self.header_dict.get("TO")
if not to_address:
to_address = property_values.get("DisplayTo")
if not to_address:
to_address = property_values.get("ReceivedRepresentingSmtpAddress")
self.to = to_address
cc_address = self.header_dict.get("CC")
# if cc_address:
# cc_address = [CONTROL_CHARS.sub(" ", cc_add) for cc_add in cc_address.split(",")]
self.cc = cc_address
bcc_address = self.header_dict.get("BCC")
self.bcc = bcc_address
# prefer HTMl over plain text
if "Html" in property_values:
self.body = property_values.get("Html")
else:
self.body = property_values.get("Body")
if not self.body and "RtfCompressed" in property_values:
try:
import compressed_rtf
except ImportError:
compressed_rtf = None
if compressed_rtf:
compressed_rtf_body = property_values['RtfCompressed']
self.body = compressed_rtf.decompress(compressed_rtf_body)
def _set_recipients(self):
recipients = self._message.recipients
self.recipients = []
for recipient_name, recipient in recipients.items():
if self.to and recipient_name in self.to:
recipient["RecipientType"] = "TO"
if self.cc and recipient_name in self.cc:
recipient["RecipientType"] = "CC"
if self.bcc and recipient_name in self.bcc:
recipient["RecipientType"] = "BCC"
if self.reply_to and recipient_name in self.reply_to:
recipient["RecipientType"] = "ReplyTo"
self.recipients.append(Recipient(recipient))
def _set_attachments(self):
attachments = self._message.attachments
self.attachments = [Attachment(attach) for attach in attachments.values()]
def is_valid_msg_file(self):
if not os.path.exists(self.msg_file_path):
return False
if not isOleFile(self.msg_file_path):
return False
return True
def format_size(num, suffix='B'):
if not num:
return "unknown"
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def parse_email_headers(header, raw=False):
if not header:
return None
headers = email.message_from_string(header)
if raw:
return headers
email_address_headers = {
"To": [],
"From": [],
"CC": [],
"BCC": [],
"Reply-To": [],
}
for addr in email_address_headers.keys():
for (name, email_address) in email.utils.getaddresses(headers.get_all(addr, [])):
email_address_headers[addr].append("{} <{}>".format(name, email_address))
parsed_headers = dict(headers)
parsed_headers.update(email_address_headers)
return parsed_headers
|
the-stack_106_21578
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.ticker as mtick
from matplotlib.collections import LineCollection
###############################################################################
#Non-Standard Imports
###############################################################################
import addpath
import dunlin as dn
import dunlin.simulate as sim
import dunlin.curvefit as cf
import dunlin.dataparser as dp
import dunlin.traceanalysis as ta
add = lambda x, y: x + y
minus = lambda x, y: x - y
mul = lambda x, y: x * y
div = lambda x, y: x / y
dxdt = lambda x: x.diff().divide(np.diff(x.index, prepend=np.NAN), axis=0)/x
def apply2data(data, name, func, *states, **kwargs):
if type(func) == str:
if func == '+':
func_ = add
elif func == '-':
func_ = minus
elif func == '*':
func_ = mul
elif func == '/':
func_ = div
elif func == 'dxdt':
func_ = dxdt
elif isnum(func):
func_ = lambda x: x*func
elif type(func) == np.ndarray:
func_ = lambda x: x.multiply(func, axis=0)
else:
func_ = func
tables = [data[s] if type(s) == str else s for s in states]
new = func_(*tables, **kwargs)
new = pd.concat({name: new}, axis=1, names=data.columns.names)
new = pd.concat([data, new], axis=1)
return new
def isnum(x):
try:
float(x)
return True
except:
return False
def dai(mu):
gradient = 5.78656638987421
intercept = 0.03648482880435973
return mu*gradient + intercept
plt.close('all')
plt.style.use(dn.styles['light_style_multi'])
# plt.style.use(dn.styles['dark_style_multi'])
#Read the file
date = '210721'
df = pd.read_excel(f'data_{date}_MCr.xlsx', index_col=0, header=[0, 1, 2, 3], sheet_name='Compiled')
#Thin and preprocess
idx = list(range(50))[::2] + list(range(50, len(df)))[::5]
df = df.iloc[idx]
time = df.index.values
data = {i: g.droplevel(axis=1, level=0) for i, g in df.groupby(axis=1, level=0)}
df = df.droplevel(3, axis=1)
#Wrap/shortcut
t = time
w = lambda name, func, *states, **kwargs: apply2data(df, name, func, *states, **kwargs)
df = w('RFP/OD', '/', 'RFP', 'OD600')
df = w('GFP/OD', '/', 'GFP', 'OD600')
df = w('mu', 'dxdt', 'OD600')
#Set up plots
fig, AX = sim.figure(3, 2, None)
# fig, AX_ = sim.figure(3, 2, None)
# AX = AX + AX_
make_colors = lambda n, base, palette_type='light': sim.palette_types[palette_type](n, color=sim.colors[base])
#Plot time response
base_colors = [sim.colors[c] for c in ['cobalt', 'goldenrod', 'coral']]
col_lvl = 'Type'
sc_lvl = 'Inducer'
for n0, [i0, g0] in enumerate(df.groupby(by=col_lvl, axis=1)):
base_color = base_colors[n0]
lvl1 = g0.groupby(by=sc_lvl, axis=1)
lvl1 = sorted(lvl1, key=lambda x: x[0])
colors = sim.palette_types['light'](len(lvl1), color=base_color)
for n1, [i1, g1] in enumerate(lvl1):
color = colors[n1]
label = i1
AX[0].plot(t, g1['OD600'], 'o', color=color, label=label)
AX[1].plot(t, g1['RFP/OD'], 'o', color=color, label=label)
AX[3].plot(t, g1['mu'], 'o', color=color, label=label)
if i0 != 'MCR':
AX[2].plot(t, g1['GFP/OD'], 'o', color=color, label=label)
start = 980
stop = 1300
x = g1['GFP/OD'].loc[start:stop]
y = g1['RFP/OD'].loc[start:stop]
AX[4].plot(x, y, 'o', color=color, label=label)
start = 40
stop = 180
lag = 30
x = g1['mu'].loc[start+lag:stop+lag]
y = g1['RFP/OD'].loc[start:stop]
AX[5].plot(x, y, 'o', color=color, label=label)
# start = 40
# stop = 200
# lag = 0
# x = g1['mu'].loc[start+lag:stop+lag]
# y = g1['GFP/OD'].loc[start:stop]
# AX[6].plot(x, y, 'o', color=color, label=label)
AX[0].set_title(f'OD600 {date}')
AX[1].set_title('RFP/OD')
AX[2].set_title('GFP/OD')
AX[3].set_title('mu')
AX[4].set_title('RFP/OD vs GFP/OD (st)')
AX[5].set_title('RFP/OD vs mu (exp)')
# AX[6].set_title('GFP/OD vs mu (exp)')
AX[0].legend()
|
the-stack_106_21579
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
import logging
import trafaret
from smtpush import validate, redis_receiver, sendmail
class TestSMTPush(TestCase):
def test_validate_errors(self):
with self.assertRaises(trafaret.DataError):
validate({})
with self.assertRaises(trafaret.DataError):
validate({'to': ['[email protected]'], 'body': 'b'})
with self.assertRaises(trafaret.DataError):
validate({'to': ['[email protected]'], 'subj': 's'})
with self.assertRaises(trafaret.DataError):
validate({'to': ['t@g'], 'body': 'b', 'subj': 's'})
with self.assertRaises(trafaret.DataError):
validate({'to': ['[email protected]'], 'body': 'b', 'subj': 's', 'from': 'f'})
with self.assertRaises(trafaret.DataError):
validate({'to': ['[email protected]'], 'body': 'b', 'subj': 's', 'cc': '[email protected]'})
with self.assertRaises(trafaret.DataError):
validate({'to': ['[email protected]'], 'body': 'b', 'subj': 's', 'bcc': '[email protected]'})
with self.assertRaises(trafaret.DataError):
validate({'to': ['[email protected]'], 'body': 'b', 'subj': 's', 'html': True})
def test_validate_success(self):
# short
data = {'to': ['[email protected]'], 'body': 'b', 'subj': 's'}
self.assertEqual(validate(data), data)
# full
data = {
'to': ['[email protected]'],
'body': 'b',
'subj': 's',
'from': '[email protected]',
'cc': ['[email protected]'],
'bcc': ['[email protected]'],
'html': '<h1>h</h1>',
}
res = {
'to': ['[email protected]'],
'body': 'b',
'subj': 's',
'sender': '[email protected]',
'cc': ['[email protected]'],
'bcc': ['[email protected]'],
'html': '<h1>h</h1>',
}
self.assertEqual(validate(data), res)
def test_redis_receiver(self):
with self.assertLogs('smtpush', logging.ERROR) as cm:
list(redis_receiver('{"to": "[email protected]"}', None))
self.assertIn('DataError', cm.output[0])
with self.assertLogs('smtpush', logging.ERROR) as cm:
list(redis_receiver('nojson', None))
self.assertIn('is not valid json', cm.output[0])
with self.assertLogs('smtpush', logging.INFO) as cm:
sendmail.return_value = yield None
data = '{"to": ["[email protected]"], "body": "b", "subj": "s"}'
list(redis_receiver(data, None))
self.assertIn('send to', cm.output[0])
@patch('smtpush.SMTP_SSL')
@patch('smtpush.SMTP')
def test_sendmail_1(self, mock, mock_ssl):
config = {
'host': 'localhost',
'port': 25,
'username': 'user',
'password': 'pass',
}
smtp = MagicMock()
mock.return_value = smtp
list(sendmail(['[email protected]'], 'subj', 'body', config=config))
smtp.ehlo.assert_called_once_with()
self.assertFalse(smtp.starttls.called)
smtp.login.assert_called_once_with(user=config['username'], password=config['password'])
smtp.sendmail.assert_called_once()
smtp.close.assert_called_once_with()
mock.assert_called_once()
self.assertFalse(mock_ssl.called)
@patch('smtpush.SMTP_SSL')
@patch('smtpush.SMTP')
def test_sendmail_2(self, mock, mock_ssl):
config = {
'host': 'localhost',
'port': 25,
'username': 'user',
'password': 'pass',
'tls': True,
'ssl': True,
}
smtp = MagicMock()
mock_ssl.return_value = smtp
list(sendmail(['[email protected]'], 'subj', 'body', 'html', '[email protected]', ['[email protected]'], config=config))
smtp.ehlo.assert_called_once_with()
smtp.starttls.assert_called_once_with()
smtp.login.assert_called_once_with(user=config['username'], password=config['password'])
smtp.sendmail.assert_called_once()
smtp.close.assert_called_once_with()
self.assertFalse(mock.called)
mock_ssl.assert_called_once()
|
the-stack_106_21581
|
# Copyright 2019 Alexander L. Hayes
"""
Clean individual variables.
"""
import logging
import numpy as np
LOGGER = logging.getLogger(__name__)
class VariableCleaner:
"""
Clean individual variables in-place.
"""
def __init__(self, data_frame):
self.frame = data_frame
def clean(self, operations_list):
"""
:param operations_list: List of dictionaries with 'operator', 'columns', and 'value' keys.
"""
LOGGER.debug("Started variable cleaning.")
operations = {
"default_value": self._default_value,
"difference": self._difference,
"multiply_constant": self._multiply_constant,
"replace": self._replace,
}
for aggregation in operations_list:
_operation = aggregation["operator"]
_columns = aggregation["columns"]
_value = aggregation["value"]
LOGGER.debug("{0},{1},{2}".format(_operation, _columns, _value))
operations[_operation](_columns, _value)
LOGGER.debug("Finished variable cleaning.")
def _default_value(self, columns, value):
self.frame[columns] = self.frame[columns].fillna(value)
def _difference(self, columns, value):
if not isinstance(value, str):
# 'value' is numeric and we should be able to subtract the constant.
self.frame[columns] = self.frame[columns] - value
else:
if len(columns) > 1:
raise ValueError(
'"operation": "difference" between two columns is ambiguous.'
)
try:
self.frame[columns[0]] = self.frame[columns[0]] - self.frame[value]
except TypeError:
try:
self.frame[columns[0]] = self.frame[columns[0]].astype(float) - self.frame[value].astype(float)
except ValueError as _message:
LOGGER.error(
"Error: {0} in (columns: {1})".format(_message, columns)
)
raise RuntimeError(
'Could not complete "difference" operation on "{0}". Try "default_value" or "replace" first.'.format(
columns
)
)
def _multiply_constant(self, columns, value):
# TODO(@hayesall): Generalize to allow multiplying by content of a column.
try:
# Default behavior: multiply.
self.frame[columns] = self.frame[columns] * value
except TypeError:
# Try catching a TypeError and converting to float
try:
self.frame[columns] = self.frame[columns].astype(float) * value
except ValueError as _message:
# ValueError will be thrown if we cannot convert to float
LOGGER.error("Error: {0} in (columns: {1})".format(_message, columns))
raise RuntimeError(
'Could not "multiply_constant" operation on "{0}". Try "default_value" or "replace" first.'.format(
columns
)
)
def _replace(self, columns, value):
# Replace a specific value with another value.
if value[1] == "NaN":
self.frame[columns] = self.frame[columns].replace(value[0], np.nan)
else:
self.frame[columns] = self.frame[columns].replace(value[0], value[1])
|
the-stack_106_21584
|
"""Functions for authenticating, and several alternatives
for persisting credentials.
Both auth and reauth functions require the following kwargs:
client_id
client_secret
base_url
"""
import datetime
import httplib2
import json
default_expires_in = 900
_datetime_format = "%Y-%m-%d %H:%M:%S" # assume UTC
def _datetime_serialize(dt):
return dt.strftime(_datetime_format)
def _datetime_deserialize(s):
return datetime.datetime.strptime(s, _datetime_format)
def need_to_reauth(tolerance=10, **kwargs):
"""Determine whether reauthentication is necessary."""
if "expires" not in kwargs:
return True
expires = _datetime_deserialize(kwargs["expires"])
now = (
datetime.datetime.utcnow() +
datetime.timedelta(0, tolerance)
)
return now >= expires
def auth(**kwargs):
"""Do password authentication.
Also requires kwargs "username" and "password".
"""
data = dict(
grant_type="password",
username=kwargs["username"],
password=kwargs["password"],
)
result = _auth(data, **kwargs)
del result["password"]
return result
def reauth(**kwargs):
"""Use the refresh token to update the access token.
Also requires kwarg "refresh_token".
"""
data = dict(
grant_type="refresh_token",
refresh_token=kwargs["refresh_token"],
)
return _auth(data, **kwargs)
def _auth(data, auth_path="/oauth2/token", **kwargs):
body = dict(
client_id=kwargs["client_id"],
client_secret=kwargs["client_secret"],
**data
)
http = httplib2.Http()
resp, content = http.request(
"".join([kwargs["base_url"], auth_path]),
"POST",
headers={"Content-Type": "application/json"},
body=json.dumps(body),
)
# TODO handle case of bad auth information
if resp["status"] != "201" and resp["status"] != "200":
raise RuntimeError(
"expected HTTP 200 or 201, but got %s for auth" % resp["status"]
)
data = json.loads(content)["data"]
# make up an expiration time for the access token,
# if one is not provided
if "expires_in" not in data:
data["expires_in"] = str(default_expires_in)
# compute the expiration time in UTC and format in
# seconds since the epoch
expires = (
datetime.datetime.utcnow() +
datetime.timedelta(0, int(data["expires_in"]))
)
new_auth_data = dict(kwargs)
# do this second to be sure we overwrite any old tokens
new_auth_data.update(dict(
access_token=data["access_token"],
refresh_token=data["refresh_token"],
expires=_datetime_serialize(expires),
))
return new_auth_data
|
the-stack_106_21585
|
#coding:utf-8
import numpy as np
import tensorflow as tf
from .Model import Model
import logging
l1 = logging.getLogger('root')
l1.setLevel(logging.WARNING)
# l1.setLevel(logging.DEBUG)
gv_log = logging.FileHandler('y_and_res.log')
gv_log.setLevel(logging.DEBUG)
l1.addHandler(gv_log)
class ComplEx_freeze(Model):
def embedding_def(self):
config = self.get_config()
# Real is first half of embedding, Im is second
real_idx = config.hidden_size // 2
im_idx = config.hidden_size
logging.warning("real_idx {}".format(real_idx))
ent1_initilializer = tf.constant_initializer(np.array(config.ent_embedding_initializer)[:,0:real_idx] , verify_shape=True)
ent2_initilializer = tf.constant_initializer(np.array(config.ent_embedding_initializer)[:,real_idx:im_idx] , verify_shape=True)
rel1_initilializer = tf.constant_initializer(np.array(config.rel_embedding_initializer)[:,0:real_idx] , verify_shape=True)
rel2_initilializer = tf.constant_initializer(np.array(config.rel_embedding_initializer)[:,real_idx:im_idx] , verify_shape=True)
self.ent1_embeddings = tf.get_variable(name = "ent1_embeddings",\
shape = [config.entTotal, config.hidden_size//2],\
initializer = ent1_initilializer,\
trainable = True) #initialize with old embeddings
self.ent2_embeddings = tf.get_variable(name = "ent2_embeddings",\
shape = [config.entTotal, config.hidden_size//2],\
initializer = ent2_initilializer,\
trainable = True) #initialize with old embeddings
self.rel1_embeddings = tf.get_variable(name = "rel1_embeddings",\
shape = [config.relTotal, config.hidden_size//2],\
initializer = rel1_initilializer,\
trainable = True) #initialize with old embeddings
self.rel2_embeddings = tf.get_variable(name = "rel2_embeddings",\
shape = [config.relTotal, config.hidden_size//2],\
initializer = rel2_initilializer,\
trainable = True) #initialize with old embeddings
self.parameter_lists = {"ent_re_embeddings":self.ent1_embeddings, \
"ent_im_embeddings":self.ent2_embeddings, \
"rel_re_embeddings":self.rel1_embeddings, \
"rel_im_embeddings":self.rel2_embeddings}
r'''
ComplEx extends DistMult by introducing complex-valued embeddings so as to better model asymmetric relations.
It is proved that HolE is subsumed by ComplEx as a special case.
'''
def _calc(self, e1_h, e2_h, e1_t, e2_t, r1, r2):
return e1_h * e1_t * r1 + e2_h * e2_t * r1 + e1_h * e2_t * r2 - e2_h * e1_t * r2
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
batch_size = config.batch_size
negative_ent = config.negative_ent
negative_rel = config.negative_rel
#To get positive triples and negative triples for training
#To get labels for the triples, positive triples as 1 and negative triples as -1
#The shapes of h, t, r, y are (batch_size, 1 + negative_ent + negative_rel)
h, t, r = self.get_all_instance()
# y = self.get_all_labels()
logging.warning("h dim: {}".format(h.shape)) # (neg_ent + neg_rel + 1)*batch_size (+1 is from 1 pos_ent per set of negs)
# logging.warning("y dim: {}".format(y.shape))
#Embedding entities and relations of triples
e1_h = tf.nn.embedding_lookup(self.ent1_embeddings, h)
e2_h = tf.nn.embedding_lookup(self.ent2_embeddings, h)
e1_t = tf.nn.embedding_lookup(self.ent1_embeddings, t)
e2_t = tf.nn.embedding_lookup(self.ent2_embeddings, t)
r1 = tf.nn.embedding_lookup(self.rel1_embeddings, r)
r2 = tf.nn.embedding_lookup(self.rel2_embeddings, r)
#Calculating score functions for all positive triples and negative triples
res = tf.reduce_sum(self._calc(e1_h, e2_h, e1_t, e2_t, r1, r2), 1, keep_dims = False)
# Labels are simply a list of 1s as long as the batch size, with an accompanying zero
labels = tf.stack(tf.split(tf.tile([1,0],[batch_size]), batch_size))
# Get positive and negative scores. Positive scores are the first N_batch size, and
# the remaining are the negative scores. for each positive score there are negative_ent + negative_rel
# negative scores
pos_scores = tf.split(res[0:batch_size], batch_size)
neg_scores = tf.split(res[batch_size:], batch_size)
# shortcut to save computation time
logsumexp_neg_scores = tf.math.reduce_logsumexp(neg_scores, 1, keep_dims=True)
logits = tf.concat([pos_scores, logsumexp_neg_scores], axis=1)
loss_func = tf.losses.softmax_cross_entropy(onehot_labels=labels,
logits=logits,
reduction=tf.losses.Reduction.SUM)
logging.warning("Res dim: {}".format(res.shape))
# logging.warning("- y * res dim: {}".format((- y * res).shape))
l1.debug("res : {}".format(res))
# l1.debug("y : {}".format(y))
# l1.debug("y2 : {}".format(y_cross_ent)) # Convert y to cross entropy range
l1.debug("------")
# For freezing embeddings using a typical regularizer such as this is not particularly meaningful, as it is tabulating the
# function for many vectors that we have no wish to change
regul_func = tf.reduce_mean(e1_h ** 2) + tf.reduce_mean(e1_t ** 2) + tf.reduce_mean(e2_h ** 2) + tf.reduce_mean(e2_t ** 2) + tf.reduce_mean(r1 ** 2) + tf.reduce_mean(r2 ** 2)
# I am imagining some future scenario where a part of the loss function is something that
# Penalizes distributional differences between positive and negative samples, since we can almost guarantee
# that negative samples will be drawn from the (much larger) training set. For now, I just
# wish to be able to track the mean magnitude of the newly produced vectors
self.pos_ent_mean_magnitude = tf.reduce_mean(tf.reduce_mean(tf.math.abs(e1_h[0:batch_size,]), 1)) # Mean of means of embeddings
self.pos_ent_min = tf.reduce_min(e1_h[0:batch_size,])
self.pos_ent_max = tf.reduce_max(e1_h[0:batch_size,])
self.pos_ent_sd = tf.reduce_mean(tf.math.reduce_std(e1_h[0:batch_size,], 1)) # mean of sds of embeddings
# Another option is to clamp max norm of the weight vectors using something like the keras.constrains.MaxNorm function after weight update
# See:
# https://stats.stackexchange.com/questions/257996/what-is-maxnorm-constraint-how-is-it-useful-in-convolutional-neural-networks
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/keras/constraints.py
# http://cs231n.github.io/neural-networks-2/#reg
#Calculating loss to get what the framework will optimize
self.loss = loss_func + config.lmbda * regul_func
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e1 = tf.nn.embedding_lookup(self.ent1_embeddings, predict_h)
predict_t_e1 = tf.nn.embedding_lookup(self.ent1_embeddings, predict_t)
predict_r_e1 = tf.nn.embedding_lookup(self.rel1_embeddings, predict_r)
predict_h_e2 = tf.nn.embedding_lookup(self.ent2_embeddings, predict_h)
predict_t_e2 = tf.nn.embedding_lookup(self.ent2_embeddings, predict_t)
predict_r_e2 = tf.nn.embedding_lookup(self.rel2_embeddings, predict_r)
self.predict = -tf.reduce_sum(self._calc(predict_h_e1, predict_h_e2, predict_t_e1, predict_t_e2, predict_r_e1, predict_r_e2), 1, keep_dims = True)
|
the-stack_106_21586
|
# Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from . import (
conditionals,
config,
covariances,
expectations,
inducing_variables,
kernels,
kullback_leiblers,
likelihoods,
logdensities,
mean_functions,
models,
monitor,
optimizers,
probability_distributions,
quadrature,
utilities,
)
from .base import Module, Parameter
from .config import default_float, default_int, default_jitter
from .utilities import set_trainable
from .versions import __version__
__all__ = [export for export in dir()]
|
the-stack_106_21587
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Turku University (2018) Department of Future Technologies
# Foresail-1 / PATE Monitor / Middleware (PMAPI)
# PSU controller daemon
#
# control.py - Jani Tammi <[email protected]>
# 0.1 2018.11.14 Initial version.
# 0.2 2018.11.18 Added status.
# 0.3 2019.06.13 Logging now provided by log.py.
#
#
# Loop that processess 'command' table rows into SCPI commands
# and keeps updating the 'psu' table.
#
# NOTE: print()'s seem to crash the deamon, although stdout is /dev/null
#
import os
import sys
import time
#import logging
# Application specific
import log
from Config import Config
from IntervalScheduler import IntervalScheduler
from Database import Database
from PSU import PSU
# Device class 'PSU' raises serial.SerialTimeoutException on BOTH
# read and write timeouts. They need to be caught specially.
from serial import SerialTimeoutException, SerialException
# Number of consecutive Exceptions allowed before exiting
_retry_count = 3
#
# Ticker to be used ONLY in console mode!
#
def ticker():
"""Rotating character. Used only on non-daemon case."""
if Config.PSU.Daemon.systemd:
return
try:
c = ('|', '/', '-', '\\')[ticker.value]
ticker.value += 1
except:
ticker.value = 0
ticker()
else:
print("\r[{}]".format(c), end="", flush=True)
def psu():
"""PSU controller main loop."""
try:
# Programmable Power Supply (PPSU)
# ..also since this context/function is "psu",
# we need another name...
ppsu = PSU(Config.PSU.Serial.port)
with \
Database(Config.database_file) as db, \
IntervalScheduler(
command_interval = Config.PSU.Daemon.Interval.command,
update_interval = Config.PSU.Daemon.Interval.update
) as event:
log.info("Entering main loop...")
log.debug("PSU at port '{}'".format(ppsu.port.name))
consecutive_error_count = 0
lastupdate = time.time()
while True:
if not Config.PSU.Daemon.run_as_daemon:
ticker()
events = event.next()
#
# 'command' table read event
#
if events & IntervalScheduler.COMMAND:
# (id, command, value)
cmd = db.command.next()
if cmd:
# cmd_receipt is a tuple of (success: boolean, value)
# command interface will be modified to support this...
try:
now = time.time()
if cmd[1] == "SET VOLTAGE":
ppsu.voltage = float(cmd[2])
cmd_receipt = (True, str(ppsu.voltage))
elif cmd[1] == "SET CURRENT LIMIT":
ppsu.current_limit = float(cmd[2])
cmd_receipt = (True, str(ppsu.current_limit))
elif cmd[1] == "SET POWER":
ppsu.power = (cmd[2] == "ON")
cmd_receipt = (True, ("OFF", "ON")[ppsu.power])
except KeyboardInterrupt:
# re-raise to exit
cmd_receipt = (False, "CTRL-C Keyboard Interrupt!")
raise
except SerialTimeoutException:
cmd_receipt = (False, "Serial Timeout!")
if consecutive_error_count > _retry_count:
raise
# Retry count not exceeded, increment
consecutive_error_count += 1
except Exception as e:
cmd_receipt = (False, str(e).replace('\n', '\\n'))
if consecutive_error_count > _retry_count:
raise
# Retry count not exceeded, increment
consecutive_error_count += 1
else:
# On success, reset error count
consecutive_error_count = 0
finally:
# TODO: Add success: boolean to Database.close()
db.command.close(cmd[0], cmd_receipt[1])
if cmd_receipt[0]:
log.debug(
"PSU:{} took {:1.3f} ms".format(
cmd[1], (time.time() - now) * 1000
)
)
else:
log.error(
"PSU:{} failed! (error count: {}/{})".format(
cmd[1],
consecutive_error_count,
_retry_count
)
)
#
# 'psu' table update event
#
if events & IntervalScheduler.UPDATE:
now = time.time()
try:
db.psu.update(ppsu.values)
except KeyboardInterrupt:
# re-raise to exit
raise
except:
consecutive_error_count += 1
log.error(
"PSU:update failed! (error count: {}/{})".format(
consecutive_error_count, _retry_count
)
)
if consecutive_error_count >= _retry_count:
raise
else:
consecutive_error_count = 0
log.debug(
"PSU:update took {:1.3f} ms, previous {:1.1f} ms ago".format(
(time.time() - now) * 1000,
(now - lastupdate) * 1000
)
)
lastupdate = now
except KeyboardInterrupt:
# NOTE: print() would be OK here, because daemon code
# can never receive KeyboardInterrupt
log.info("Terminated with CTRL-C")
pass
except SerialTimeoutException as e:
# Case 1: USB-serial adapter has disconnected
if not os.path.exists(ppsu.port.name):
log.error(
"USB Serial Adapter '{}' disconnected!".format(
ppsu.port.name
)
)
# Case (all others): Unknown reason
else:
log.error("Repeated serial timeouts!")
#raise
except SerialException as e:
# Case 1: USB-serial adapter has disconnected
if not os.path.exists(ppsu.port.name):
log.error(
"USB Serial Adapter '{}' disconnected!".format(
ppsu.port.name
)
)
# Case (all others): Unknown reason
else:
log.error("Unusual SerialException!")
raise
except Exception as e:
# Terminating due to exception
log.error("Abnormal termination!")
raise
# EOF
|
the-stack_106_21593
|
n = int(input())
tree = list(map(int,input().split()))
tree.sort()
tree.reverse()
x = 0
for i in range(2,n+2):
t = tree[i-2]+i
if t>x:
x=t
print(x)
|
the-stack_106_21597
|
import unittest
import numpy
from eig.battleship import Ship, BattleshipHypothesis, \
Parser, Executor
from eig.battleship.program import ProgramSyntaxError
class TestParser(unittest.TestCase):
def test_parse_basic(self):
question = Parser.parse("(== (color 1-1) Blue)")
reference = {'type': 'equal',
'children': [
{'type': 'color_fn',
'children': [
{'type': 'location', 'value': (0, 0)}
]},
{'type': 'color', 'value': 1}
]}
self.assertEqual(question.to_dict(), reference)
def test_parse_error_basic(self):
with self.assertRaises(ProgramSyntaxError) as cm:
question = Parser.parse("(== (color 1-1) Blu)")
exception = cm.exception
self.assertEqual(exception.error_msg, "Unrecognized token")
with self.assertRaises(ProgramSyntaxError) as cm1:
question = Parser.parse("(== (color 1-1 2-2) Blue)")
exception = cm1.exception
self.assertEqual(exception.error_msg, "Operand number mismatch. 1 expected, found 2")
with self.assertRaises(ProgramSyntaxError) as cm2:
question = Parser.parse("(== (color Red) Blue)")
exception = cm2.exception
self.assertEqual(exception.error_msg, "Parameter type mismatch. "
"Expected DataType.LOCATION for parameter 1, get DataType.COLOR")
with self.assertRaises(ProgramSyntaxError) as cm3:
question = Parser.parse("(setSize AllTiles)")
exception = cm3.exception
self.assertEqual(exception.error_msg, "Parameter type mismatch. "
"Expected DataType.SET_L for parameter 1, get DataType.SET_LITERAL_L")
def test_parse_lambda(self):
question = Parser.parse("(any (map (lambda x0 (== (orient x0) H)) (set AllColors)))")
reference = {
'type': 'any_op',
'children': [
{'type': 'map_op',
'children': [
{'type': 'lambda_op',
'children': [
{'type': 'lambda_x', 'value': 'x0'},
{'type': 'equal',
'children': [
{'type': 'orient_fn',
'children': [ {'type': 'lambda_x', 'value': 'x0'} ]
},
{'type': 'orientation', 'value': 'H'}
]}
]},
{'type': 'set_op',
'children': [
{'type': 'set_allcolors', 'value': 'AllColors'}
]}
]}
]}
self.assertEqual(question.to_dict(), reference)
def test_parse_error_lambda(self):
with self.assertRaises(ProgramSyntaxError) as cm:
question = Parser.parse("(map (lambda x0 (+ 1 2)) (set AllColors))")
exception = cm.exception
self.assertEqual(exception.error_msg, "Top level type cannot be DataType.SET_N")
with self.assertRaises(ProgramSyntaxError) as cm:
question = Parser.parse("(any (map (lambda x1 (== (color x2) Red)) (set AllColors)))")
exception = cm.exception
self.assertEqual(exception.error_msg, "Lambda variable x2 should not exist here")
with self.assertRaises(ProgramSyntaxError) as cm:
question = Parser.parse("(++ (map (lambda x1 (== (size x1) (++ (map (lambda x1 (== (orient x1) V)) (set AllColors))))) (set AllColors)))")
exception = cm.exception
self.assertEqual(exception.error_msg, "Lambda variable x1 has already been defined")
with self.assertRaises(ProgramSyntaxError) as cm:
question = Parser.parse("(any (map (lambda x2 (== (orient x2) H)) (set AllTiles)))")
exception = cm.exception
self.assertEqual(exception.error_msg, "Parameter type mismatch. "
"Expected one of\n (DataType.LAMBDA_FXB, DataType.SET_S),\n (DataType.LAMBDA_FYB, DataType.SET_L),\n"
" (DataType.LAMBDA_FXL, DataType.SET_S),\n (DataType.LAMBDA_FXN, DataType.SET_S),\nget (DataType.LAMBDA_FXB, DataType.SET_L)")
with self.assertRaises(ProgramSyntaxError) as cm:
question = Parser.parse("(++ (lambda (lambda x0 (size x0)) (set AllColors)))")
exception = cm.exception
self.assertEqual(exception.error_msg, "Parameter type mismatch. "
"The first child of lambda operator should be lambda\n"
"variable (x0, x1, y2, etc.), get lambda_op")
def test_optimization(self):
# AllTiles and AllColors depends on hypothesis configuration (the number of ships, the size of boards, etc.)
# It cannot be optimized without board information
"""
question = Parser.parse("(bottomright (set AllTiles))", optimization=True)
ref = {'type': 'location', 'value': (5, 5)}
self.assertEqual(question.to_dict(), ref)
question = Parser.parse("(and (all (map (lambda y0 (== (color y0) Red)) (set AllTiles))) FALSE)", optimization=True)
ref = {'type': 'boolean', 'value': False}
self.assertEqual(question.to_dict(), ref)
question = Parser.parse("(== (topleft (coloredTiles Blue)) (bottomright (set AllTiles)))", optimization=True)
ref = {'type': 'equal',
'children': [
{'type': 'topleft',
'children': [
{'type': 'colored_tiles_fn',
'children':[
{'type': 'color', 'value': 1}
]}
]},
{'type': 'location', 'value': (5, 5)}
]}
self.assertEqual(question.to_dict(), ref)
question = Parser.parse("(topleft (union (map (lambda x0 1-1) (set AllColors)) (coloredTiles Blue)))", optimization=True)
ref = {'type': 'topleft',
'children': [
{'type': 'union',
'children': [
{'type': 'set_op',
'children':[
{'type': 'location', 'value': (0, 0)},
{'type': 'location', 'value': (0, 0)},
{'type': 'location', 'value': (0, 0)}
]},
{'type': 'colored_tiles_fn',
'children': [
{'type': 'color', 'value': 1}
]}
]}
]}
self.assertEqual(question.to_dict(), ref)
question = Parser.parse("(++ (map (lambda x0 (+ 1 1)) (set AllColors)))", optimization=True)
ref = {'type': 'number', 'value': 6}
self.assertEqual(question.to_dict(), ref)
"""
pass
|
the-stack_106_21598
|
#!/usr/bin/env python
"""
Benchmark script to measure time taken to set values using a variety of
different methods (set, set_bulk).
"""
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import contextlib
import time
from ukt import *
server = EmbeddedServer(quiet=True)
server.run()
db = server.client
def do_set(nrows, klen, vlen):
kfmt = '%%0%sd' % klen
vfmt = '%%0%sd' % vlen
for i in range(nrows):
db.set(kfmt % i, vfmt % i)
def do_set_bulk(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
vfmt = '%%0%sd' % vlen
for i in range(0, nrows, chunksize):
accum = {kfmt % j: vfmt % j for j in range(i, i + chunksize)}
db.set_bulk(accum)
@contextlib.contextmanager
def timed(msg, *params):
pstr = ', '.join(map(str, params))
s = time.time()
yield
print('%0.3fs - %s(%s)' % (time.time() - s, msg, pstr))
SETTINGS = (
# (nrows, chunksiz, ksiz, vsiz).
(200000, 10000, 48, 512), # ~100MB of data, 20 batches.
(25000, 1250, 256, 1024 * 4), # ~100MB of data, 20 batches.
(1700, 100, 256, 1024 * 64), # ~100MB of data, 17 batches.
)
for nrows, chunksiz, ksiz, vsiz in SETTINGS:
with timed('set', nrows, ksiz, vsiz):
do_set(nrows, ksiz, vsiz)
db.clear()
with timed('set_bulk', nrows, chunksiz, ksiz, vsiz):
do_set_bulk(nrows, chunksiz, ksiz, vsiz)
db.clear()
print('\n')
try:
server.stop()
except OSError:
pass
|
the-stack_106_21599
|
"""
========================
Merge moves with HDP-HMM
========================
How to try merge moves efficiently for time-series datasets.
This example reviews three possible ways to plan and execute merge
proposals.
* try merging all pairs of clusters
* pick fewer merge pairs (at most 5 per cluster) in a size-biased way
* pick fewer merge pairs (at most 5 per cluster) in objective-driven way
"""
# sphinx_gallery_thumbnail_number = 2
import bnpy
import numpy as np
import os
from matplotlib import pylab
import seaborn as sns
FIG_SIZE = (10, 5)
pylab.rcParams['figure.figsize'] = FIG_SIZE
###############################################################################
#
# Setup: Load data
# ----------------
# Read bnpy's built-in "Mocap6" dataset from file.
dataset_path = os.path.join(bnpy.DATASET_PATH, 'mocap6')
dataset = bnpy.data.GroupXData.read_npz(
os.path.join(dataset_path, 'dataset.npz'))
###############################################################################
#
# Setup: Initialization hyperparameters
# -------------------------------------
init_kwargs = dict(
K=20,
initname='randexamples',
)
alg_kwargs = dict(
nLap=29,
nTask=1, nBatch=1, convergeThr=0.0001,
)
###############################################################################
#
# Setup: HDP-HMM hyperparameters
# ------------------------------
hdphmm_kwargs = dict(
gamma = 5.0, # top-level Dirichlet concentration parameter
transAlpha = 0.5, # trans-level Dirichlet concentration parameter
)
###############################################################################
#
# Setup: Gaussian observation model hyperparameters
# -------------------------------------------------
gauss_kwargs = dict(
sF = 1.0, # Set prior so E[covariance] = identity
ECovMat = 'eye',
)
###############################################################################
#
# All-Pairs : Try all possible pairs of merges every 10 laps
# ----------------------------------------------------------
#
# This is expensive, but a good exhaustive test.
allpairs_merge_kwargs = dict(
m_startLap = 10,
# Set limits to number of merges attempted each lap.
# This value specifies max number of tries for each cluster
# Setting this very high (to 50) effectively means try all pairs
m_maxNumPairsContainingComp = 50,
# Set "reactivation" limits
# So that each cluster is eligible again after 10 passes thru dataset
# Or when it's size changes by 400%
m_nLapToReactivate = 10,
m_minPercChangeInNumAtomsToReactivate = 400 * 0.01,
# Specify how to rank pairs (determines order in which merges are tried)
# 'total_size' and 'descending' means try largest combined clusters first
m_pair_ranking_procedure = 'total_size',
m_pair_ranking_direction = 'descending',
)
allpairs_trained_model, allpairs_info_dict = bnpy.run(
dataset, 'HDPHMM', 'DiagGauss', 'memoVB',
output_path='/tmp/mocap6/trymerge-K=20-model=HDPHMM+DiagGauss-ECovMat=1*eye-merge_strategy=all_pairs/',
moves='merge,shuffle',
**dict(
alg_kwargs.items()
+ init_kwargs.items()
+ hdphmm_kwargs.items()
+ gauss_kwargs.items()
+ allpairs_merge_kwargs.items()))
###############################################################################
#
# Large-Pairs : Try 5-largest-size pairs of merges every 10 laps
# --------------------------------------------------------------
#
# This is much cheaper than all pairs. Let's see how well it does.
largepairs_merge_kwargs = dict(
m_startLap = 10,
# Set limits to number of merges attempted each lap.
# This value specifies max number of tries for each cluster
m_maxNumPairsContainingComp = 5,
# Set "reactivation" limits
# So that each cluster is eligible again after 10 passes thru dataset
# Or when it's size changes by 400%
m_nLapToReactivate = 10,
m_minPercChangeInNumAtomsToReactivate = 400 * 0.01,
# Specify how to rank pairs (determines order in which merges are tried)
# 'total_size' and 'descending' means try largest size clusters first
m_pair_ranking_procedure = 'total_size',
m_pair_ranking_direction = 'descending',
)
largepairs_trained_model, largepairs_info_dict = bnpy.run(
dataset, 'HDPHMM', 'DiagGauss', 'memoVB',
output_path='/tmp/mocap6/trymerge-K=20-model=HDPHMM+DiagGauss-ECovMat=1*eye-merge_strategy=large_pairs/',
moves='merge,shuffle',
**dict(
alg_kwargs.items()
+ init_kwargs.items()
+ hdphmm_kwargs.items()
+ gauss_kwargs.items()
+ largepairs_merge_kwargs.items()))
###############################################################################
#
# Good-ELBO-Pairs : Rank pairs of merges by improvement to observation model
# --------------------------------------------------------------------------
#
# This is much cheaper than all pairs and perhaps more principled.
# Let's see how well it does.
goodelbopairs_merge_kwargs = dict(
m_startLap = 10,
# Set limits to number of merges attempted each lap.
# This value specifies max number of tries for each cluster
m_maxNumPairsContainingComp = 5,
# Set "reactivation" limits
# So that each cluster is eligible again after 10 passes thru dataset
# Or when it's size changes by 400%
m_nLapToReactivate = 10,
m_minPercChangeInNumAtomsToReactivate = 400 * 0.01,
# Specify how to rank pairs (determines order in which merges are tried)
# 'obsmodel_elbo' means rank pairs by improvement to observation model ELBO
m_pair_ranking_procedure = 'obsmodel_elbo',
m_pair_ranking_direction = 'descending',
)
goodelbopairs_trained_model, goodelbopairs_info_dict = bnpy.run(
dataset, 'HDPHMM', 'DiagGauss', 'memoVB',
output_path='/tmp/mocap6/trymerge-K=20-model=HDPHMM+DiagGauss-ECovMat=1*eye-merge_strategy=good_elbo_pairs/',
moves='merge,shuffle',
**dict(
alg_kwargs.items()
+ init_kwargs.items()
+ hdphmm_kwargs.items()
+ gauss_kwargs.items()
+ goodelbopairs_merge_kwargs.items()))
###############################################################################
#
# Compare loss function vs wallclock time
# ---------------------------------------
#
pylab.figure()
for info_dict, color_str, label_str in [
(allpairs_info_dict, 'k', 'all_pairs'),
(largepairs_info_dict, 'g', 'large_pairs'),
(goodelbopairs_info_dict, 'b', 'good_elbo_pairs')]:
pylab.plot(
info_dict['elapsed_time_sec_history'],
info_dict['loss_history'],
'.-',
color=color_str,
label=label_str)
pylab.legend(loc='upper right')
pylab.xlabel('elapsed time (sec)')
pylab.ylabel('loss')
###############################################################################
#
# Compare number of active clusters vs wallclock time
# ---------------------------------------------------
#
pylab.figure()
for info_dict, color_str, label_str in [
(allpairs_info_dict, 'k', 'all_pairs'),
(largepairs_info_dict, 'g', 'large_pairs'),
(goodelbopairs_info_dict, 'b', 'good_elbo_pairs')]:
pylab.plot(
info_dict['elapsed_time_sec_history'],
info_dict['K_history'],
'.-',
color=color_str,
label=label_str)
pylab.legend(loc='upper right')
pylab.xlabel('elapsed time (sec)')
pylab.ylabel('num. clusters (K)')
pylab.show()
|
the-stack_106_21600
|
'''
Setup.py for creating a binary distribution.
'''
from __future__ import print_function
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
from os import environ
from os.path import dirname, join, exists
import re
import sys
from platform import machine
from setup_sdist import SETUP_KWARGS
# XXX hack to be able to import jnius.env withough having build
# jnius.jnius yet, better solution welcome
syspath = sys.path[:]
sys.path.insert(0, 'jnius')
from env import (
get_possible_homes,
get_library_dirs,
get_include_dirs,
get_libraries,
find_javac,
PY2,
)
sys.path = syspath
def getenv(key):
'''Get value from environment and decode it.'''
val = environ.get(key)
if val is not None and not PY2:
try:
return val.decode()
except AttributeError:
return val
return val
FILES = [
'jni.pxi',
'jnius_compat.pxi',
'jnius_conversion.pxi',
'jnius_export_class.pxi',
'jnius_export_func.pxi',
'jnius_jvm_android.pxi',
'jnius_jvm_desktop.pxi',
'jnius_jvm_dlopen.pxi',
'jnius_localref.pxi',
'jnius.pyx',
'jnius_utils.pxi',
]
EXTRA_LINK_ARGS = []
INSTALL_REQUIRES = ['six>=1.7.0']
SETUP_REQUIRES = []
# detect Python for android
PLATFORM = sys.platform
NDKPLATFORM = getenv('NDKPLATFORM')
if NDKPLATFORM is not None and getenv('LIBLINK'):
PLATFORM = 'android'
# detect cython
if PLATFORM != 'android':
SETUP_REQUIRES.append('cython')
INSTALL_REQUIRES.append('cython')
else:
FILES = [fn[:-3] + 'c' for fn in FILES if fn.endswith('pyx')]
def compile_native_invocation_handler(*possible_homes):
'''Find javac and compile NativeInvocationHandler.java.'''
javac = find_javac(PLATFORM, possible_homes)
source_level = '1.7'
try:
subprocess.check_call([
javac, '-target', source_level, '-source', source_level,
join('jnius', 'src', 'org', 'jnius', 'NativeInvocationHandler.java')
])
except FileNotFoundError:
subprocess.check_call([
javac.replace('"', ''), '-target', source_level, '-source', source_level,
join('jnius', 'src', 'org', 'jnius', 'NativeInvocationHandler.java')
])
compile_native_invocation_handler(*get_possible_homes(PLATFORM))
# generate the config.pxi
with open(join(dirname(__file__), 'jnius', 'config.pxi'), 'w') as fd:
fd.write('DEF JNIUS_PLATFORM = {0!r}\n\n'.format(PLATFORM))
if not PY2:
fd.write('# cython: language_level=3\n\n')
fd.write('DEF JNIUS_PYTHON3 = True\n\n')
else:
fd.write('# cython: language_level=2\n\n')
fd.write('DEF JNIUS_PYTHON3 = False\n\n')
# pop setup.py from included files in the installed package
SETUP_KWARGS['py_modules'].remove('setup')
# create the extension
setup(
cmdclass={'build_ext': build_ext},
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
ext_modules=[
Extension(
'jnius', [join('jnius', x) for x in FILES],
libraries=get_libraries(PLATFORM),
library_dirs=get_library_dirs(PLATFORM),
include_dirs=get_include_dirs(PLATFORM),
extra_link_args=EXTRA_LINK_ARGS,
)
],
extras_require={
'dev': ['pytest', 'wheel', 'pytest-cov', 'pycodestyle'],
'ci': ['coveralls', 'pytest-rerunfailures', 'setuptools>=34.4.0'],
},
**SETUP_KWARGS
)
|
the-stack_106_21601
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from sqlalchemy import Integer, Column, ForeignKey, Sequence, String, Date, Unicode
from sqlalchemy.orm import relationship, subqueryload
from json import JSONEncoder
import regex
from . import Base
class ORMEncoder(JSONEncoder):
def default(self, obj):
# convert object to a dict
d = {}
if isinstance(obj, Version):
return {'date': str(obj.date), 'counties': list(obj.counties)}
if isinstance(obj, County):
return {'code': obj.code, 'name': obj.name, 'towns': obj.towns}
if isinstance(obj, Town):
return {'code': obj.code, 'name': obj.name, 'sections': obj.sections}
if isinstance(obj, Section):
return {'code': obj.code, 'section_name': obj.section_name, 'small_section_name': obj.small_section_name,
'office': obj.office, 'code6': obj.code6, 'code7': obj.code7}
d.update(obj.__dict__)
return d
def hook(dict):
if dict.get('counties'):
version = Version()
version.__dict__.update(dict)
return version
if dict.get('towns'):
county = County()
county.__dict__.update(dict)
return county
if dict.get('sections'):
town = Town()
town.__dict__.update(dict)
return town
else:
section = Section()
section.__dict__.update(dict)
return section
class Version(Base):
__tablename__ = 'version'
id = Column(Integer, Sequence('version_id_seq'), primary_key=True, nullable=False)
date = Column(Date)
counties = relationship('County')
@staticmethod
def get_latest_version(session):
return session.query(Version).order_by(Version.date.desc()).first()
@staticmethod
def get_version(session, date):
version = session.query(Version).filter(Version.date == date)\
.options(subqueryload(Version.counties).subqueryload(County.towns).subqueryload(Town.sections)).first()
# remove all object instance from this Session
session.expunge_all()
return version
def find(self, county_str):
return [county for county in self.counties if regex.match(r'(?b)('+county_str+'){i<=1}', county.name)]
class County(Base):
__tablename__ = 'county'
id = Column(Integer, Sequence('county_id_seq'), primary_key=True, nullable=False)
code = Column(String(1))
name = Column(Unicode(5))
version_id = Column(Integer, ForeignKey('version.id'))
version = relationship('Version', back_populates='counties')
towns = relationship('Town')
def find(self, town_str):
return [town for town in self.towns if regex.match(r'(?b)('+town_str+'){i<=1}', town.name)]
class Town(Base):
__tablename__ = 'town'
id = Column(Integer, Sequence('county_id_seq'), primary_key=True, nullable=False)
code = Column(String(3))
name = Column(Unicode(5))
county_id = Column(Integer, ForeignKey('county.id'))
county = relationship('County', back_populates='towns')
sections = relationship('Section')
class Section(Base):
__tablename__ = 'section'
id = Column(Integer, Sequence('section_id_seq'), primary_key=True, nullable=False)
code = Column(String(4))
office = Column(String(2))
section_name = Column(Unicode(20))
small_section_name = Column(Unicode(20))
code6 = Column(String(6))
code7 = Column(String(7))
town_id = Column(Integer, ForeignKey('town.id'))
town = relationship('Town', back_populates='sections')
def count_section_fuzzy(self, section_str):
self.section_fc = regex.fullmatch(r'(?e)('+section_str+'){e}', self.section_name).fuzzy_counts
def count_small_section_fuzzy(self, small_section_str):
self.small_section_fc = regex.fullmatch(r'(?e)(' + small_section_str + '){e}', self.small_section_name).fuzzy_counts
|
the-stack_106_21602
|
import json
import os
import base64
import datetime
import hashlib
import copy
import itertools
import codecs
import random
import string
import tempfile
import threading
import pytz
import sys
import time
import uuid
from bisect import insort
from importlib import reload
from moto.core import (
ACCOUNT_ID,
BaseBackend,
BaseModel,
CloudFormationModel,
CloudWatchMetricProvider,
)
from moto.core.utils import (
iso_8601_datetime_without_milliseconds_s3,
rfc_1123_datetime,
unix_time_millis,
)
from moto.cloudwatch.models import MetricDatum
from moto.utilities.tagging_service import TaggingService
from moto.utilities.utils import LowercaseDict
from moto.s3.exceptions import (
AccessDeniedByLock,
BucketAlreadyExists,
BucketNeedsToBeNew,
MissingBucket,
InvalidBucketName,
InvalidPart,
InvalidRequest,
EntityTooSmall,
MissingKey,
InvalidNotificationDestination,
MalformedXML,
InvalidStorageClass,
InvalidTargetBucketForLogging,
CrossLocationLoggingProhibitted,
NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration,
WrongPublicAccessBlockAccountIdError,
NoSuchUpload,
ObjectLockConfigurationNotFoundError,
InvalidTagError,
)
from .cloud_formation import cfn_to_api_encryption, is_replacement_update
from .utils import clean_key_name, _VersionedKeyStore, undo_clean_key_name
from ..settings import get_s3_default_key_buffer_size, S3_UPLOAD_PART_MIN_SIZE
MAX_BUCKET_NAME_LENGTH = 63
MIN_BUCKET_NAME_LENGTH = 3
UPLOAD_ID_BYTES = 43
STORAGE_CLASS = [
"STANDARD",
"REDUCED_REDUNDANCY",
"STANDARD_IA",
"ONEZONE_IA",
"INTELLIGENT_TIERING",
"GLACIER",
"DEEP_ARCHIVE",
]
DEFAULT_TEXT_ENCODING = sys.getdefaultencoding()
OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"
def get_moto_s3_account_id():
"""This makes it easy for mocking AWS Account IDs when using AWS Config
-- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free.
"""
return ACCOUNT_ID
class FakeDeleteMarker(BaseModel):
def __init__(self, key):
self.key = key
self.name = key.name
self.last_modified = datetime.datetime.utcnow()
self._version_id = str(uuid.uuid4())
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_without_milliseconds_s3(self.last_modified)
@property
def version_id(self):
return self._version_id
class FakeKey(BaseModel):
def __init__(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=None,
multipart=None,
bucket_name=None,
encryption=None,
kms_key_id=None,
bucket_key_enabled=None,
lock_mode=None,
lock_legal_status=None,
lock_until=None,
):
self.name = name
self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl("private")
self.website_redirect_location = None
self._storage_class = storage if storage else "STANDARD"
self._metadata = LowercaseDict()
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
self.multipart = multipart
self.bucket_name = bucket_name
self._max_buffer_size = (
max_buffer_size if max_buffer_size else get_s3_default_key_buffer_size()
)
self._value_buffer = tempfile.SpooledTemporaryFile(self._max_buffer_size)
self.value = value
self.lock = threading.Lock()
self.encryption = encryption
self.kms_key_id = kms_key_id
self.bucket_key_enabled = bucket_key_enabled
self.lock_mode = lock_mode
self.lock_legal_status = lock_legal_status
self.lock_until = lock_until
# Default metadata values
self._metadata["Content-Type"] = "binary/octet-stream"
@property
def version_id(self):
return self._version_id
@property
def value(self):
self.lock.acquire()
self._value_buffer.seek(0)
r = self._value_buffer.read()
r = copy.copy(r)
self.lock.release()
return r
@property
def arn(self):
# S3 Objects don't have an ARN, but we do need something unique when creating tags against this resource
return "arn:aws:s3:::{}/{}/{}".format(
self.bucket_name, self.name, self.version_id
)
@value.setter
def value(self, new_value):
self._value_buffer.seek(0)
self._value_buffer.truncate()
# Hack for working around moto's own unit tests; this probably won't
# actually get hit in normal use.
if isinstance(new_value, str):
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
self._value_buffer.write(new_value)
self.contentsize = len(new_value)
def set_metadata(self, metadata, replace=False):
if replace:
self._metadata = {}
self._metadata.update(metadata)
def set_storage_class(self, storage):
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
self._storage_class = storage
def set_expiry(self, expiry):
self._expiry = expiry
def set_acl(self, acl):
self.acl = acl
def append_to_value(self, value):
self.contentsize += len(value)
self._value_buffer.seek(0, os.SEEK_END)
self._value_buffer.write(value)
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id = str(uuid.uuid4())
else:
self._version_id = None
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
@property
def etag(self):
if self._etag is None:
value_md5 = hashlib.md5()
self._value_buffer.seek(0)
while True:
block = self._value_buffer.read(16 * 1024 * 1024) # read in 16MB chunks
if not block:
break
value_md5.update(block)
self._etag = value_md5.hexdigest()
return '"{0}"'.format(self._etag)
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_without_milliseconds_s3(self.last_modified)
@property
def last_modified_RFC1123(self):
# Different datetime formats depending on how the key is obtained
# https://github.com/boto/boto/issues/466
return rfc_1123_datetime(self.last_modified)
@property
def metadata(self):
return self._metadata
@property
def response_dict(self):
res = {
"ETag": self.etag,
"last-modified": self.last_modified_RFC1123,
"content-length": str(self.size),
}
if self.encryption is not None:
res["x-amz-server-side-encryption"] = self.encryption
if self.encryption == "aws:kms" and self.kms_key_id is not None:
res["x-amz-server-side-encryption-aws-kms-key-id"] = self.kms_key_id
if self.bucket_key_enabled is not None:
res[
"x-amz-server-side-encryption-bucket-key-enabled"
] = self.bucket_key_enabled
if self._storage_class != "STANDARD":
res["x-amz-storage-class"] = self._storage_class
if self._expiry is not None:
rhdr = 'ongoing-request="false", expiry-date="{0}"'
res["x-amz-restore"] = rhdr.format(self.expiry_date)
if self._is_versioned:
res["x-amz-version-id"] = str(self.version_id)
if self.website_redirect_location:
res["x-amz-website-redirect-location"] = self.website_redirect_location
if self.lock_legal_status:
res["x-amz-object-lock-legal-hold"] = self.lock_legal_status
if self.lock_until:
res["x-amz-object-lock-retain-until-date"] = self.lock_until
if self.lock_mode:
res["x-amz-object-lock-mode"] = self.lock_mode
if self.lock_legal_status:
res["x-amz-object-lock-legal-hold"] = self.lock_legal_status
if self.lock_until:
res["x-amz-object-lock-retain-until-date"] = self.lock_until
if self.lock_mode:
res["x-amz-object-lock-mode"] = self.lock_mode
return res
@property
def size(self):
return self.contentsize
@property
def storage_class(self):
return self._storage_class
@property
def expiry_date(self):
if self._expiry is not None:
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
# Keys need to be pickleable due to some implementation details of boto3.
# Since file objects aren't pickleable, we need to override the default
# behavior. The following is adapted from the Python docs:
# https://docs.python.org/3/library/pickle.html#handling-stateful-objects
def __getstate__(self):
state = self.__dict__.copy()
state["value"] = self.value
del state["_value_buffer"]
del state["lock"]
return state
def __setstate__(self, state):
self.__dict__.update({k: v for k, v in state.items() if k != "value"})
self._value_buffer = tempfile.SpooledTemporaryFile(
max_size=self._max_buffer_size
)
self.value = state["value"]
self.lock = threading.Lock()
@property
def is_locked(self):
if self.lock_legal_status == "ON":
return True
if self.lock_mode == "COMPLIANCE":
now = datetime.datetime.utcnow()
try:
until = datetime.datetime.strptime(
self.lock_until, "%Y-%m-%dT%H:%M:%SZ"
)
except ValueError:
until = datetime.datetime.strptime(
self.lock_until, "%Y-%m-%dT%H:%M:%S.%fZ"
)
if until > now:
return True
return False
class FakeMultipart(BaseModel):
def __init__(self, key_name, metadata, storage=None, tags=None):
self.key_name = key_name
self.metadata = metadata
self.storage = storage
self.tags = tags
self.parts = {}
self.partlist = [] # ordered list of part ID's
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
self.id = (
rand_b64.decode("utf-8").replace("=", "").replace("+", "").replace("/", "")
)
def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
part_etag = None
if part is not None:
part_etag = part.etag.replace('"', "")
etag = etag.replace('"', "")
if part is None or part_etag != etag:
raise InvalidPart()
if last is not None and last.contentsize < S3_UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
last = part
count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
def set_part(self, part_id, value):
if part_id < 1:
raise NoSuchUpload(upload_id=part_id)
key = FakeKey(part_id, value)
self.parts[part_id] = key
if part_id not in self.partlist:
insort(self.partlist, part_id)
return key
def list_parts(self, part_number_marker, max_parts):
for part_id in self.partlist:
part = self.parts[part_id]
if part_number_marker <= part.name < part_number_marker + max_parts:
yield part
class FakeGrantee(BaseModel):
def __init__(self, id="", uri="", display_name=""):
self.id = id
self.uri = uri
self.display_name = display_name
def __eq__(self, other):
if not isinstance(other, FakeGrantee):
return False
return (
self.id == other.id
and self.uri == other.uri
and self.display_name == other.display_name
)
@property
def type(self):
return "Group" if self.uri else "CanonicalUser"
def __repr__(self):
return "FakeGrantee(display_name: '{}', id: '{}', uri: '{}')".format(
self.display_name, self.id, self.uri
)
ALL_USERS_GRANTEE = FakeGrantee(uri="http://acs.amazonaws.com/groups/global/AllUsers")
AUTHENTICATED_USERS_GRANTEE = FakeGrantee(
uri="http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
)
LOG_DELIVERY_GRANTEE = FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")
PERMISSION_FULL_CONTROL = "FULL_CONTROL"
PERMISSION_WRITE = "WRITE"
PERMISSION_READ = "READ"
PERMISSION_WRITE_ACP = "WRITE_ACP"
PERMISSION_READ_ACP = "READ_ACP"
CAMEL_CASED_PERMISSIONS = {
"FULL_CONTROL": "FullControl",
"WRITE": "Write",
"READ": "Read",
"WRITE_ACP": "WriteAcp",
"READ_ACP": "ReadAcp",
}
class FakeGrant(BaseModel):
def __init__(self, grantees, permissions):
self.grantees = grantees
self.permissions = permissions
def __repr__(self):
return "FakeGrant(grantees: {}, permissions: {})".format(
self.grantees, self.permissions
)
class FakeAcl(BaseModel):
def __init__(self, grants=None):
grants = grants or []
self.grants = grants
@property
def public_read(self):
for grant in self.grants:
if ALL_USERS_GRANTEE in grant.grantees:
if PERMISSION_READ in grant.permissions:
return True
if PERMISSION_FULL_CONTROL in grant.permissions:
return True
return False
def __repr__(self):
return "FakeAcl(grants: {})".format(self.grants)
def to_config_dict(self):
"""Returns the object into the format expected by AWS Config"""
data = {
"grantSet": None, # Always setting this to None. Feel free to change.
"owner": {"displayName": None, "id": OWNER},
}
# Add details for each Grant:
grant_list = []
for grant in self.grants:
permissions = (
grant.permissions
if isinstance(grant.permissions, list)
else [grant.permissions]
)
for permission in permissions:
for grantee in grant.grantees:
if grantee.uri:
grant_list.append(
{
"grantee": grantee.uri.split(
"http://acs.amazonaws.com/groups/s3/"
)[1],
"permission": CAMEL_CASED_PERMISSIONS[permission],
}
)
else:
grant_list.append(
{
"grantee": {
"id": grantee.id,
"displayName": None
if not grantee.display_name
else grantee.display_name,
},
"permission": CAMEL_CASED_PERMISSIONS[permission],
}
)
if grant_list:
data["grantList"] = grant_list
return data
def get_canned_acl(acl):
owner_grantee = FakeGrantee(id=OWNER)
grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])]
if acl == "private":
pass # no other permissions
elif acl == "public-read":
grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == "public-read-write":
grants.append(
FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ, PERMISSION_WRITE])
)
elif acl == "authenticated-read":
grants.append(FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == "bucket-owner-read":
pass # TODO: bucket owner ACL
elif acl == "bucket-owner-full-control":
pass # TODO: bucket owner ACL
elif acl == "aws-exec-read":
pass # TODO: bucket owner, EC2 Read
elif acl == "log-delivery-write":
grants.append(
FakeGrant([LOG_DELIVERY_GRANTEE], [PERMISSION_READ_ACP, PERMISSION_WRITE])
)
else:
assert False, "Unknown canned acl: %s" % (acl,)
return FakeAcl(grants=grants)
class LifecycleFilter(BaseModel):
def __init__(self, prefix=None, tag=None, and_filter=None):
self.prefix = prefix
(self.tag_key, self.tag_value) = tag if tag else (None, None)
self.and_filter = and_filter
def to_config_dict(self):
if self.prefix is not None:
return {
"predicate": {"type": "LifecyclePrefixPredicate", "prefix": self.prefix}
}
elif self.tag_key:
return {
"predicate": {
"type": "LifecycleTagPredicate",
"tag": {"key": self.tag_key, "value": self.tag_value},
}
}
else:
return {
"predicate": {
"type": "LifecycleAndOperator",
"operands": self.and_filter.to_config_dict(),
}
}
class LifecycleAndFilter(BaseModel):
def __init__(self, prefix=None, tags=None):
self.prefix = prefix
self.tags = tags
def to_config_dict(self):
data = []
if self.prefix is not None:
data.append({"type": "LifecyclePrefixPredicate", "prefix": self.prefix})
for key, value in self.tags.items():
data.append(
{"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},}
)
return data
class LifecycleRule(BaseModel):
def __init__(
self,
id=None,
prefix=None,
lc_filter=None,
status=None,
expiration_days=None,
expiration_date=None,
transition_days=None,
transition_date=None,
storage_class=None,
expired_object_delete_marker=None,
nve_noncurrent_days=None,
nvt_noncurrent_days=None,
nvt_storage_class=None,
aimu_days=None,
):
self.id = id
self.prefix = prefix
self.filter = lc_filter
self.status = status
self.expiration_days = expiration_days
self.expiration_date = expiration_date
self.transition_days = transition_days
self.transition_date = transition_date
self.storage_class = storage_class
self.expired_object_delete_marker = expired_object_delete_marker
self.nve_noncurrent_days = nve_noncurrent_days
self.nvt_noncurrent_days = nvt_noncurrent_days
self.nvt_storage_class = nvt_storage_class
self.aimu_days = aimu_days
def to_config_dict(self):
"""Converts the object to the AWS Config data dict.
Note: The following are missing that should be added in the future:
- transitions (returns None for now)
- noncurrentVersionTransitions (returns None for now)
:param kwargs:
:return:
"""
lifecycle_dict = {
"id": self.id,
"prefix": self.prefix,
"status": self.status,
"expirationInDays": int(self.expiration_days)
if self.expiration_days
else None,
"expiredObjectDeleteMarker": self.expired_object_delete_marker,
"noncurrentVersionExpirationInDays": -1 or int(self.nve_noncurrent_days),
"expirationDate": self.expiration_date,
"transitions": None, # Replace me with logic to fill in
"noncurrentVersionTransitions": None, # Replace me with logic to fill in
}
if self.aimu_days:
lifecycle_dict["abortIncompleteMultipartUpload"] = {
"daysAfterInitiation": self.aimu_days
}
else:
lifecycle_dict["abortIncompleteMultipartUpload"] = None
# Format the filter:
if self.prefix is None and self.filter is None:
lifecycle_dict["filter"] = {"predicate": None}
elif self.prefix:
lifecycle_dict["filter"] = None
else:
lifecycle_dict["filter"] = self.filter.to_config_dict()
return lifecycle_dict
class CorsRule(BaseModel):
def __init__(
self,
allowed_methods,
allowed_origins,
allowed_headers=None,
expose_headers=None,
max_age_seconds=None,
):
self.allowed_methods = (
[allowed_methods] if isinstance(allowed_methods, str) else allowed_methods
)
self.allowed_origins = (
[allowed_origins] if isinstance(allowed_origins, str) else allowed_origins
)
self.allowed_headers = (
[allowed_headers] if isinstance(allowed_headers, str) else allowed_headers
)
self.exposed_headers = (
[expose_headers] if isinstance(expose_headers, str) else expose_headers
)
self.max_age_seconds = max_age_seconds
class Notification(BaseModel):
def __init__(self, arn, events, filters=None, id=None):
self.id = (
id
if id
else "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(50)
)
)
self.arn = arn
self.events = events
self.filters = filters if filters else {}
def to_config_dict(self):
data = {}
# Type and ARN will be filled in by NotificationConfiguration's to_config_dict:
data["events"] = [event for event in self.events]
if self.filters:
data["filter"] = {
"s3KeyFilter": {
"filterRules": [
{"name": fr["Name"], "value": fr["Value"]}
for fr in self.filters["S3Key"]["FilterRule"]
]
}
}
else:
data["filter"] = None
# Not sure why this is a thing since AWS just seems to return this as filters ¯\_(ツ)_/¯
data["objectPrefixes"] = []
return data
class NotificationConfiguration(BaseModel):
def __init__(self, topic=None, queue=None, cloud_function=None):
self.topic = (
[
Notification(
t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")
)
for t in topic
]
if topic
else []
)
self.queue = (
[
Notification(
q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")
)
for q in queue
]
if queue
else []
)
self.cloud_function = (
[
Notification(
c["CloudFunction"],
c["Event"],
filters=c.get("Filter"),
id=c.get("Id"),
)
for c in cloud_function
]
if cloud_function
else []
)
def to_config_dict(self):
data = {"configurations": {}}
for topic in self.topic:
topic_config = topic.to_config_dict()
topic_config["topicARN"] = topic.arn
topic_config["type"] = "TopicConfiguration"
data["configurations"][topic.id] = topic_config
for queue in self.queue:
queue_config = queue.to_config_dict()
queue_config["queueARN"] = queue.arn
queue_config["type"] = "QueueConfiguration"
data["configurations"][queue.id] = queue_config
for cloud_function in self.cloud_function:
cf_config = cloud_function.to_config_dict()
cf_config["queueARN"] = cloud_function.arn
cf_config["type"] = "LambdaConfiguration"
data["configurations"][cloud_function.id] = cf_config
return data
def convert_str_to_bool(item):
"""Converts a boolean string to a boolean value"""
if isinstance(item, str):
return item.lower() == "true"
return False
class PublicAccessBlock(BaseModel):
def __init__(
self,
block_public_acls,
ignore_public_acls,
block_public_policy,
restrict_public_buckets,
):
# The boto XML appears to expect these values to exist as lowercase strings...
self.block_public_acls = block_public_acls or "false"
self.ignore_public_acls = ignore_public_acls or "false"
self.block_public_policy = block_public_policy or "false"
self.restrict_public_buckets = restrict_public_buckets or "false"
def to_config_dict(self):
# Need to make the string values booleans for Config:
return {
"blockPublicAcls": convert_str_to_bool(self.block_public_acls),
"ignorePublicAcls": convert_str_to_bool(self.ignore_public_acls),
"blockPublicPolicy": convert_str_to_bool(self.block_public_policy),
"restrictPublicBuckets": convert_str_to_bool(self.restrict_public_buckets),
}
class FakeBucket(CloudFormationModel):
def __init__(self, name, region_name):
self.name = name
self.region_name = region_name
self.keys = _VersionedKeyStore()
self.multiparts = {}
self.versioning_status = None
self.rules = []
self.policy = None
self.website_configuration = None
self.acl = get_canned_acl("private")
self.cors = []
self.logging = {}
self.notification_configuration = None
self.accelerate_configuration = None
self.payer = "BucketOwner"
self.creation_date = datetime.datetime.now(tz=pytz.utc)
self.public_access_block = None
self.encryption = None
self.object_lock_enabled = False
self.default_lock_mode = ""
self.default_lock_days = 0
self.default_lock_years = 0
@property
def location(self):
return self.region_name
@property
def creation_date_ISO8601(self):
return iso_8601_datetime_without_milliseconds_s3(self.creation_date)
@property
def is_versioned(self):
return self.versioning_status == "Enabled"
def set_lifecycle(self, rules):
self.rules = []
for rule in rules:
# Extract and validate actions from Lifecycle rule
expiration = rule.get("Expiration")
transition = rule.get("Transition")
try:
top_level_prefix = (
rule["Prefix"] or ""
) # If it's `None` the set to the empty string
except KeyError:
top_level_prefix = None
nve_noncurrent_days = None
if rule.get("NoncurrentVersionExpiration") is not None:
if rule["NoncurrentVersionExpiration"].get("NoncurrentDays") is None:
raise MalformedXML()
nve_noncurrent_days = rule["NoncurrentVersionExpiration"][
"NoncurrentDays"
]
nvt_noncurrent_days = None
nvt_storage_class = None
if rule.get("NoncurrentVersionTransition") is not None:
if rule["NoncurrentVersionTransition"].get("NoncurrentDays") is None:
raise MalformedXML()
if rule["NoncurrentVersionTransition"].get("StorageClass") is None:
raise MalformedXML()
nvt_noncurrent_days = rule["NoncurrentVersionTransition"][
"NoncurrentDays"
]
nvt_storage_class = rule["NoncurrentVersionTransition"]["StorageClass"]
aimu_days = None
if rule.get("AbortIncompleteMultipartUpload") is not None:
if (
rule["AbortIncompleteMultipartUpload"].get("DaysAfterInitiation")
is None
):
raise MalformedXML()
aimu_days = rule["AbortIncompleteMultipartUpload"][
"DaysAfterInitiation"
]
eodm = None
if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None:
# This cannot be set if Date or Days is set:
if expiration.get("Days") or expiration.get("Date"):
raise MalformedXML()
eodm = expiration["ExpiredObjectDeleteMarker"]
# Pull out the filter:
lc_filter = None
if rule.get("Filter"):
# Can't have both `Filter` and `Prefix` (need to check for the presence of the key):
try:
# 'Prefix' cannot be outside of a Filter:
if rule["Prefix"] or not rule["Prefix"]:
raise MalformedXML()
except KeyError:
pass
filters = 0
try:
prefix_filter = (
rule["Filter"]["Prefix"] or ""
) # If it's `None` the set to the empty string
filters += 1
except KeyError:
prefix_filter = None
and_filter = None
if rule["Filter"].get("And"):
filters += 1
and_tags = {}
if rule["Filter"]["And"].get("Tag"):
if not isinstance(rule["Filter"]["And"]["Tag"], list):
rule["Filter"]["And"]["Tag"] = [
rule["Filter"]["And"]["Tag"]
]
for t in rule["Filter"]["And"]["Tag"]:
and_tags[t["Key"]] = t.get("Value", "")
try:
and_prefix = (
rule["Filter"]["And"]["Prefix"] or ""
) # If it's `None` then set to the empty string
except KeyError:
and_prefix = None
and_filter = LifecycleAndFilter(prefix=and_prefix, tags=and_tags)
filter_tag = None
if rule["Filter"].get("Tag"):
filters += 1
filter_tag = (
rule["Filter"]["Tag"]["Key"],
rule["Filter"]["Tag"].get("Value", ""),
)
# Can't have more than 1 filter:
if filters > 1:
raise MalformedXML()
lc_filter = LifecycleFilter(
prefix=prefix_filter, tag=filter_tag, and_filter=and_filter
)
# If no top level prefix and no filter is present, then this is invalid:
if top_level_prefix is None:
try:
rule["Filter"]
except KeyError:
raise MalformedXML()
self.rules.append(
LifecycleRule(
id=rule.get("ID"),
prefix=top_level_prefix,
lc_filter=lc_filter,
status=rule["Status"],
expiration_days=expiration.get("Days") if expiration else None,
expiration_date=expiration.get("Date") if expiration else None,
transition_days=transition.get("Days") if transition else None,
transition_date=transition.get("Date") if transition else None,
storage_class=transition.get("StorageClass")
if transition
else None,
expired_object_delete_marker=eodm,
nve_noncurrent_days=nve_noncurrent_days,
nvt_noncurrent_days=nvt_noncurrent_days,
nvt_storage_class=nvt_storage_class,
aimu_days=aimu_days,
)
)
def delete_lifecycle(self):
self.rules = []
def set_cors(self, rules):
self.cors = []
if len(rules) > 100:
raise MalformedXML()
for rule in rules:
assert isinstance(rule["AllowedMethod"], list) or isinstance(
rule["AllowedMethod"], str
)
assert isinstance(rule["AllowedOrigin"], list) or isinstance(
rule["AllowedOrigin"], str
)
assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(
rule.get("AllowedHeader", ""), str
)
assert isinstance(rule.get("ExposeHeader", []), list) or isinstance(
rule.get("ExposeHeader", ""), str
)
assert isinstance(rule.get("MaxAgeSeconds", "0"), str)
if isinstance(rule["AllowedMethod"], str):
methods = [rule["AllowedMethod"]]
else:
methods = rule["AllowedMethod"]
for method in methods:
if method not in ["GET", "PUT", "HEAD", "POST", "DELETE"]:
raise InvalidRequest(method)
self.cors.append(
CorsRule(
rule["AllowedMethod"],
rule["AllowedOrigin"],
rule.get("AllowedHeader"),
rule.get("ExposeHeader"),
rule.get("MaxAgeSeconds"),
)
)
def delete_cors(self):
self.cors = []
def set_logging(self, logging_config, bucket_backend):
if not logging_config:
self.logging = {}
return
# Target bucket must exist in the same account (assuming all moto buckets are in the same account):
if not bucket_backend.buckets.get(logging_config["TargetBucket"]):
raise InvalidTargetBucketForLogging(
"The target bucket for logging does not exist."
)
# Does the target bucket have the log-delivery WRITE and READ_ACP permissions?
write = read_acp = False
for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants:
# Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery
for grantee in grant.grantees:
if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery":
if (
"WRITE" in grant.permissions
or "FULL_CONTROL" in grant.permissions
):
write = True
if (
"READ_ACP" in grant.permissions
or "FULL_CONTROL" in grant.permissions
):
read_acp = True
break
if not write or not read_acp:
raise InvalidTargetBucketForLogging(
"You must give the log-delivery group WRITE and READ_ACP"
" permissions to the target bucket"
)
# Buckets must also exist within the same region:
if (
bucket_backend.buckets[logging_config["TargetBucket"]].region_name
!= self.region_name
):
raise CrossLocationLoggingProhibitted()
# Checks pass -- set the logging config:
self.logging = logging_config
def set_notification_configuration(self, notification_config):
if not notification_config:
self.notification_configuration = None
return
self.notification_configuration = NotificationConfiguration(
topic=notification_config.get("TopicConfiguration"),
queue=notification_config.get("QueueConfiguration"),
cloud_function=notification_config.get("CloudFunctionConfiguration"),
)
# Validate that the region is correct:
for thing in ["topic", "queue", "cloud_function"]:
for t in getattr(self.notification_configuration, thing):
region = t.arn.split(":")[3]
if region != self.region_name:
raise InvalidNotificationDestination()
def set_accelerate_configuration(self, accelerate_config):
if self.accelerate_configuration is None and accelerate_config == "Suspended":
# Cannot "suspend" a not active acceleration. Leaves it undefined
return
self.accelerate_configuration = accelerate_config
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in [
"Arn",
"DomainName",
"DualStackDomainName",
"RegionalDomainName",
"WebsiteURL",
]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
elif attribute_name == "DomainName":
return self.domain_name
elif attribute_name == "DualStackDomainName":
return self.dual_stack_domain_name
elif attribute_name == "RegionalDomainName":
return self.regional_domain_name
elif attribute_name == "WebsiteURL":
return self.website_url
raise UnformattedGetAttTemplateException()
def set_acl(self, acl):
self.acl = acl
@property
def arn(self):
return "arn:aws:s3:::{}".format(self.name)
@property
def domain_name(self):
return "{}.s3.amazonaws.com".format(self.name)
@property
def dual_stack_domain_name(self):
return "{}.s3.dualstack.{}.amazonaws.com".format(self.name, self.region_name)
@property
def regional_domain_name(self):
return "{}.s3.{}.amazonaws.com".format(self.name, self.region_name)
@property
def website_url(self):
return "http://{}.s3-website.{}.amazonaws.com".format(
self.name, self.region_name
)
@property
def physical_resource_id(self):
return self.name
@staticmethod
def cloudformation_name_type():
return "BucketName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-s3-bucket.html
return "AWS::S3::Bucket"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
bucket = s3_backend.create_bucket(resource_name, region_name)
properties = cloudformation_json.get("Properties", {})
if "BucketEncryption" in properties:
bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"])
s3_backend.put_bucket_encryption(
bucket_name=resource_name, encryption=bucket_encryption
)
return bucket
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if is_replacement_update(properties):
resource_name_property = cls.cloudformation_name_type()
if resource_name_property not in properties:
properties[resource_name_property] = new_resource_name
new_resource = cls.create_from_cloudformation_json(
properties[resource_name_property], cloudformation_json, region_name
)
properties[resource_name_property] = original_resource.name
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
if "BucketEncryption" in properties:
bucket_encryption = cfn_to_api_encryption(
properties["BucketEncryption"]
)
s3_backend.put_bucket_encryption(
bucket_name=original_resource.name, encryption=bucket_encryption
)
return original_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
s3_backend.delete_bucket(resource_name)
def to_config_dict(self):
"""Return the AWS Config JSON format of this S3 bucket.
Note: The following features are not implemented and will need to be if you care about them:
- Bucket Accelerate Configuration
"""
config_dict = {
"version": "1.3",
"configurationItemCaptureTime": str(self.creation_date),
"configurationItemStatus": "ResourceDiscovered",
"configurationStateId": str(
int(time.mktime(self.creation_date.timetuple()))
), # PY2 and 3 compatible
"configurationItemMD5Hash": "",
"arn": self.arn,
"resourceType": "AWS::S3::Bucket",
"resourceId": self.name,
"resourceName": self.name,
"awsRegion": self.region_name,
"availabilityZone": "Regional",
"resourceCreationTime": str(self.creation_date),
"relatedEvents": [],
"relationships": [],
"tags": s3_backend.tagger.get_tag_dict_for_resource(self.arn),
"configuration": {
"name": self.name,
"owner": {"id": OWNER},
"creationDate": self.creation_date.isoformat(),
},
}
# Make the supplementary configuration:
# This is a dobule-wrapped JSON for some reason...
s_config = {
"AccessControlList": json.dumps(json.dumps(self.acl.to_config_dict()))
}
if self.public_access_block:
s_config["PublicAccessBlockConfiguration"] = json.dumps(
self.public_access_block.to_config_dict()
)
# Tagging is special:
if config_dict["tags"]:
s_config["BucketTaggingConfiguration"] = json.dumps(
{"tagSets": [{"tags": config_dict["tags"]}]}
)
# TODO implement Accelerate Configuration:
s_config["BucketAccelerateConfiguration"] = {"status": None}
if self.rules:
s_config["BucketLifecycleConfiguration"] = {
"rules": [rule.to_config_dict() for rule in self.rules]
}
s_config["BucketLoggingConfiguration"] = {
"destinationBucketName": self.logging.get("TargetBucket", None),
"logFilePrefix": self.logging.get("TargetPrefix", None),
}
s_config["BucketPolicy"] = {
"policyText": self.policy.decode("utf-8") if self.policy else None
}
s_config["IsRequesterPaysEnabled"] = (
"false" if self.payer == "BucketOwner" else "true"
)
if self.notification_configuration:
s_config[
"BucketNotificationConfiguration"
] = self.notification_configuration.to_config_dict()
else:
s_config["BucketNotificationConfiguration"] = {"configurations": {}}
config_dict["supplementaryConfiguration"] = s_config
return config_dict
@property
def has_default_lock(self):
if not self.object_lock_enabled:
return False
if self.default_lock_mode:
return True
return False
def default_retention(self):
now = datetime.datetime.utcnow()
now += datetime.timedelta(self.default_lock_days)
now += datetime.timedelta(self.default_lock_years * 365)
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
class S3Backend(BaseBackend, CloudWatchMetricProvider):
"""
Moto implementation for S3.
Custom S3 endpoints are supported, if you are using a S3-compatible storage solution like Ceph.
Example usage:
.. sourcecode:: python
os.environ["MOTO_S3_CUSTOM_ENDPOINTS"] = "http://custom.internal.endpoint,http://custom.other.endpoint"
@mock_s3
def test_my_custom_endpoint():
boto3.client("s3", endpoint_url="http://custom.internal.endpoint")
...
Note that this only works if the environment variable is set **before** the mock is initialized.
"""
def __init__(self):
self.buckets = {}
self.account_public_access_block = None
self.tagger = TaggingService()
@property
def _url_module(self):
# The urls-property can be different depending on env variables
# Force a reload, to retrieve the correct set of URLs
import moto.s3.urls as backend_urls_module
reload(backend_urls_module)
return backend_urls_module
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""List of dicts representing default VPC endpoints for this service."""
accesspoint = {
"AcceptanceRequired": False,
"AvailabilityZones": zones,
"BaseEndpointDnsNames": [
f"accesspoint.s3-global.{service_region}.vpce.amazonaws.com",
],
"ManagesVpcEndpoints": False,
"Owner": "amazon",
"PrivateDnsName": "*.accesspoint.s3-global.amazonaws.com",
"PrivateDnsNameVerificationState": "verified",
"PrivateDnsNames": [
{"PrivateDnsName": "*.accesspoint.s3-global.amazonaws.com"}
],
"ServiceId": f"vpce-svc-{BaseBackend.vpce_random_number()}",
"ServiceName": "com.amazonaws.s3-global.accesspoint",
"ServiceType": [{"ServiceType": "Interface"}],
"Tags": [],
"VpcEndpointPolicySupported": True,
}
return (
BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "s3", "Interface"
)
+ BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "s3", "Gateway"
)
+ [accesspoint]
)
# TODO: This is broken! DO NOT IMPORT MUTABLE DATA TYPES FROM OTHER AREAS -- THIS BREAKS UNMOCKING!
# WRAP WITH A GETTER/SETTER FUNCTION
# Register this class as a CloudWatch Metric Provider
# Must provide a method 'get_cloudwatch_metrics' that will return a list of metrics, based on the data available
# metric_providers["S3"] = self
@classmethod
def get_cloudwatch_metrics(cls):
metrics = []
for name, bucket in s3_backend.buckets.items():
metrics.append(
MetricDatum(
namespace="AWS/S3",
name="BucketSizeBytes",
value=bucket.keys.item_size(),
dimensions=[
{"Name": "StorageType", "Value": "StandardStorage"},
{"Name": "BucketName", "Value": name},
],
timestamp=datetime.datetime.now(tz=pytz.utc).replace(
hour=0, minute=0, second=0, microsecond=0
),
unit="Bytes",
)
)
metrics.append(
MetricDatum(
namespace="AWS/S3",
name="NumberOfObjects",
value=len(bucket.keys),
dimensions=[
{"Name": "StorageType", "Value": "AllStorageTypes"},
{"Name": "BucketName", "Value": name},
],
timestamp=datetime.datetime.now(tz=pytz.utc).replace(
hour=0, minute=0, second=0, microsecond=0
),
unit="Count",
)
)
return metrics
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
raise BucketAlreadyExists(bucket=bucket_name)
if not MIN_BUCKET_NAME_LENGTH <= len(bucket_name) <= MAX_BUCKET_NAME_LENGTH:
raise InvalidBucketName()
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
self.buckets[bucket_name] = new_bucket
return new_bucket
def list_buckets(self):
return self.buckets.values()
def get_bucket(self, bucket_name):
try:
return self.buckets[bucket_name]
except KeyError:
raise MissingBucket(bucket=bucket_name)
def head_bucket(self, bucket_name):
return self.get_bucket(bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
def set_bucket_versioning(self, bucket_name, status):
self.get_bucket(bucket_name).versioning_status = status
def get_bucket_versioning(self, bucket_name):
return self.get_bucket(bucket_name).versioning_status
def get_bucket_encryption(self, bucket_name):
return self.get_bucket(bucket_name).encryption
def list_object_versions(
self,
bucket_name,
delimiter=None,
encoding_type=None,
key_marker=None,
max_keys=None,
version_id_marker=None,
prefix="",
):
bucket = self.get_bucket(bucket_name)
common_prefixes = []
requested_versions = []
delete_markers = []
all_versions = itertools.chain(
*(copy.deepcopy(l) for key, l in bucket.keys.iterlists())
)
all_versions = list(all_versions)
# sort by name, revert last-modified-date
all_versions.sort(key=lambda r: (r.name, -unix_time_millis(r.last_modified)))
last_name = None
for version in all_versions:
name = version.name
# guaranteed to be sorted - so the first key with this name will be the latest
version.is_latest = name != last_name
if version.is_latest:
last_name = name
# Differentiate between FakeKey and FakeDeleteMarkers
if not isinstance(version, FakeKey):
delete_markers.append(version)
continue
# skip all keys that alphabetically come before keymarker
if key_marker and name < key_marker:
continue
# Filter for keys that start with prefix
if not name.startswith(prefix):
continue
# separate out all keys that contain delimiter
if delimiter and delimiter in name:
index = name.index(delimiter) + len(delimiter)
prefix_including_delimiter = name[0:index]
common_prefixes.append(prefix_including_delimiter)
continue
requested_versions.append(version)
common_prefixes = sorted(set(common_prefixes))
return requested_versions, common_prefixes, delete_markers
def get_bucket_policy(self, bucket_name):
return self.get_bucket(bucket_name).policy
def put_bucket_policy(self, bucket_name, policy):
self.get_bucket(bucket_name).policy = policy
def delete_bucket_policy(self, bucket_name, body):
bucket = self.get_bucket(bucket_name)
bucket.policy = None
def put_bucket_encryption(self, bucket_name, encryption):
self.get_bucket(bucket_name).encryption = encryption
def delete_bucket_encryption(self, bucket_name):
self.get_bucket(bucket_name).encryption = None
def get_bucket_replication(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return getattr(bucket, "replication", None)
def put_bucket_replication(self, bucket_name, replication):
if isinstance(replication["Rule"], dict):
replication["Rule"] = [replication["Rule"]]
for rule in replication["Rule"]:
if "Priority" not in rule:
rule["Priority"] = 1
if "ID" not in rule:
rule["ID"] = "".join(
random.choice(string.ascii_letters + string.digits)
for _ in range(30)
)
bucket = self.get_bucket(bucket_name)
bucket.replication = replication
def delete_bucket_replication(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.replication = None
def put_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules)
def delete_bucket_lifecycle(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_lifecycle()
def set_bucket_website_configuration(self, bucket_name, website_configuration):
bucket = self.get_bucket(bucket_name)
bucket.website_configuration = website_configuration
def get_bucket_website_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.website_configuration
def delete_bucket_website(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.website_configuration = None
def get_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if not bucket.public_access_block:
raise NoSuchPublicAccessBlockConfiguration()
return bucket.public_access_block
def get_account_public_access_block(self, account_id):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
if not self.account_public_access_block:
raise NoSuchPublicAccessBlockConfiguration()
return self.account_public_access_block
def put_object(
self,
bucket_name,
key_name,
value,
storage=None,
etag=None,
multipart=None,
encryption=None,
kms_key_id=None,
bucket_key_enabled=None,
lock_mode=None,
lock_legal_status=None,
lock_until=None,
):
key_name = clean_key_name(key_name)
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
bucket = self.get_bucket(bucket_name)
# getting default config from bucket if not included in put request
if bucket.encryption:
bucket_key_enabled = bucket_key_enabled or bucket.encryption["Rule"].get(
"BucketKeyEnabled", False
)
kms_key_id = kms_key_id or bucket.encryption["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("KMSMasterKeyID")
encryption = (
encryption
or bucket.encryption["Rule"]["ApplyServerSideEncryptionByDefault"][
"SSEAlgorithm"
]
)
new_key = FakeKey(
name=key_name,
value=value,
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=str(uuid.uuid4()) if bucket.is_versioned else "null",
multipart=multipart,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
lock_mode=lock_mode,
lock_legal_status=lock_legal_status,
lock_until=lock_until,
)
keys = [
key
for key in bucket.keys.getlist(key_name, [])
if key.version_id != new_key.version_id
] + [new_key]
bucket.keys.setlist(key_name, keys)
return new_key
def put_object_acl(self, bucket_name, key_name, acl):
key = self.get_object(bucket_name, key_name)
# TODO: Support the XML-based ACL format
if key is not None:
key.set_acl(acl)
else:
raise MissingKey(key=key_name)
def put_object_legal_hold(
self, bucket_name, key_name, version_id, legal_hold_status
):
key = self.get_object(bucket_name, key_name, version_id=version_id)
key.lock_legal_status = legal_hold_status
def put_object_retention(self, bucket_name, key_name, version_id, retention):
key = self.get_object(bucket_name, key_name, version_id=version_id)
key.lock_mode = retention[0]
key.lock_until = retention[1]
def append_to_key(self, bucket_name, key_name, value):
key = self.get_object(bucket_name, key_name)
key.append_to_value(value)
return key
def get_object(self, bucket_name, key_name, version_id=None, part_number=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
key = None
if bucket:
if version_id is None:
if key_name in bucket.keys:
key = bucket.keys[key_name]
else:
for key_version in bucket.keys.getlist(key_name, default=[]):
if str(key_version.version_id) == str(version_id):
key = key_version
break
if part_number and key and key.multipart:
key = key.multipart.parts[part_number]
if isinstance(key, FakeKey):
return key
else:
return None
def head_object(self, bucket_name, key_name, version_id=None, part_number=None):
return self.get_object(bucket_name, key_name, version_id, part_number)
def get_object_acl(self, key):
return key.acl
def get_object_legal_hold(self, key):
return key.lock_legal_status
def get_object_lock_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if not bucket.object_lock_enabled:
raise ObjectLockConfigurationNotFoundError
return (
bucket.object_lock_enabled,
bucket.default_lock_mode,
bucket.default_lock_days,
bucket.default_lock_years,
)
def get_object_tagging(self, key):
return self.tagger.list_tags_for_resource(key.arn)
def set_key_tags(self, key, tags, key_name=None):
if key is None:
raise MissingKey(key=key_name)
boto_tags_dict = self.tagger.convert_dict_to_tags_input(tags)
errmsg = self.tagger.validate_tags(boto_tags_dict)
if errmsg:
raise InvalidTagError(errmsg)
self.tagger.delete_all_tags_for_resource(key.arn)
self.tagger.tag_resource(
key.arn, boto_tags_dict,
)
return key
def get_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return self.tagger.list_tags_for_resource(bucket.arn)
def put_bucket_tagging(self, bucket_name, tags):
bucket = self.get_bucket(bucket_name)
self.tagger.delete_all_tags_for_resource(bucket.arn)
self.tagger.tag_resource(
bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()],
)
def put_object_lock_configuration(
self, bucket_name, lock_enabled, mode=None, days=None, years=None
):
bucket = self.get_bucket(bucket_name)
if bucket.keys.item_size() > 0:
raise BucketNeedsToBeNew
if lock_enabled:
bucket.object_lock_enabled = True
bucket.versioning_status = "Enabled"
bucket.default_lock_mode = mode
bucket.default_lock_days = days
bucket.default_lock_years = years
def delete_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
self.tagger.delete_all_tags_for_resource(bucket.arn)
def put_bucket_cors(self, bucket_name, cors_rules):
bucket = self.get_bucket(bucket_name)
bucket.set_cors(cors_rules)
def put_bucket_logging(self, bucket_name, logging_config):
bucket = self.get_bucket(bucket_name)
bucket.set_logging(logging_config, self)
def delete_bucket_cors(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_cors()
def delete_public_access_block(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.public_access_block = None
def delete_account_public_access_block(self, account_id):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
self.account_public_access_block = None
def put_bucket_notification_configuration(self, bucket_name, notification_config):
bucket = self.get_bucket(bucket_name)
bucket.set_notification_configuration(notification_config)
def put_bucket_accelerate_configuration(
self, bucket_name, accelerate_configuration
):
if accelerate_configuration not in ["Enabled", "Suspended"]:
raise MalformedXML()
bucket = self.get_bucket(bucket_name)
if bucket.name.find(".") != -1:
raise InvalidRequest("PutBucketAccelerateConfiguration")
bucket.set_accelerate_configuration(accelerate_configuration)
def put_bucket_public_access_block(self, bucket_name, pub_block_config):
bucket = self.get_bucket(bucket_name)
if not pub_block_config:
raise InvalidPublicAccessBlockConfiguration()
bucket.public_access_block = PublicAccessBlock(
pub_block_config.get("BlockPublicAcls"),
pub_block_config.get("IgnorePublicAcls"),
pub_block_config.get("BlockPublicPolicy"),
pub_block_config.get("RestrictPublicBuckets"),
)
def put_account_public_access_block(self, account_id, pub_block_config):
# The account ID should equal the account id that is set for Moto:
if account_id != ACCOUNT_ID:
raise WrongPublicAccessBlockAccountIdError()
if not pub_block_config:
raise InvalidPublicAccessBlockConfiguration()
self.account_public_access_block = PublicAccessBlock(
pub_block_config.get("BlockPublicAcls"),
pub_block_config.get("IgnorePublicAcls"),
pub_block_config.get("BlockPublicPolicy"),
pub_block_config.get("RestrictPublicBuckets"),
)
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is None:
return
del bucket.multiparts[multipart_id]
key = self.put_object(
bucket_name, multipart.key_name, value, etag=etag, multipart=multipart
)
key.set_metadata(multipart.metadata)
return key
def abort_multipart_upload(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
multipart_data = bucket.multiparts.get(multipart_id, None)
if not multipart_data:
raise NoSuchUpload(upload_id=multipart_id)
del bucket.multiparts[multipart_id]
def list_parts(
self, bucket_name, multipart_id, part_number_marker=0, max_parts=1000
):
bucket = self.get_bucket(bucket_name)
if multipart_id not in bucket.multiparts:
raise NoSuchUpload(upload_id=multipart_id)
return list(
bucket.multiparts[multipart_id].list_parts(part_number_marker, max_parts)
)
def is_truncated(self, bucket_name, multipart_id, next_part_number_marker):
bucket = self.get_bucket(bucket_name)
return len(bucket.multiparts[multipart_id].parts) >= next_part_number_marker
def create_multipart_upload(
self, bucket_name, key_name, metadata, storage_type, tags
):
multipart = FakeMultipart(key_name, metadata, storage=storage_type, tags=tags)
bucket = self.get_bucket(bucket_name)
bucket.multiparts[multipart.id] = multipart
return multipart.id
def complete_multipart_upload(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is not None:
del bucket.multiparts[multipart_id]
return multipart, value, etag
def get_all_multiparts(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.multiparts
def upload_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
def copy_part(
self,
dest_bucket_name,
multipart_id,
part_id,
src_bucket_name,
src_key_name,
src_version_id,
start_byte,
end_byte,
):
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
src_value = self.get_object(
src_bucket_name, src_key_name, version_id=src_version_id
).value
if start_byte is not None:
src_value = src_value[start_byte : end_byte + 1]
return multipart.set_part(part_id, src_value)
def list_objects(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
for key_name, key in bucket.keys.items():
if key_name.startswith(prefix):
key_without_prefix = key_name.replace(prefix, "", 1)
if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results
key_without_delimiter = key_without_prefix.split(delimiter)[0]
folder_results.add(
"{0}{1}{2}".format(prefix, key_without_delimiter, delimiter)
)
else:
key_results.add(key)
else:
for key_name, key in bucket.keys.items():
if delimiter and delimiter in key_name:
# If delimiter, we need to split out folder_results
folder_results.add(key_name.split(delimiter)[0] + delimiter)
else:
key_results.add(key)
key_results = filter(
lambda key: not isinstance(key, FakeDeleteMarker), key_results
)
key_results = sorted(key_results, key=lambda key: key.name)
folder_results = [
folder_name for folder_name in sorted(folder_results, key=lambda key: key)
]
return key_results, folder_results
def list_objects_v2(self, bucket, prefix, delimiter):
result_keys, result_folders = self.list_objects(bucket, prefix, delimiter)
# sort the combination of folders and keys into lexicographical order
all_keys = result_keys + result_folders
all_keys.sort(key=self._get_name)
return all_keys
@staticmethod
def _get_name(key):
if isinstance(key, FakeKey):
return key.name
else:
return key
def _set_delete_marker(self, bucket_name, key_name):
bucket = self.get_bucket(bucket_name)
delete_marker = FakeDeleteMarker(key=bucket.keys[key_name])
bucket.keys[key_name] = delete_marker
return delete_marker
def delete_object_tagging(self, bucket_name, key_name, version_id=None):
key = self.get_object(bucket_name, key_name, version_id=version_id)
self.tagger.delete_all_tags_for_resource(key.arn)
def delete_object(self, bucket_name, key_name, version_id=None, bypass=False):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
response_meta = {}
try:
if not bucket.is_versioned:
bucket.keys.pop(key_name)
else:
if version_id is None:
delete_marker = self._set_delete_marker(bucket_name, key_name)
response_meta["version-id"] = delete_marker.version_id
else:
if key_name not in bucket.keys:
raise KeyError
response_meta["delete-marker"] = "false"
for key in bucket.keys.getlist(key_name):
if str(key.version_id) == str(version_id):
if (
hasattr(key, "is_locked")
and key.is_locked
and not bypass
):
raise AccessDeniedByLock
if type(key) is FakeDeleteMarker:
response_meta["delete-marker"] = "true"
break
bucket.keys.setlist(
key_name,
[
key
for key in bucket.keys.getlist(key_name)
if str(key.version_id) != str(version_id)
],
)
if not bucket.keys.getlist(key_name):
bucket.keys.pop(key_name)
return True, response_meta
except KeyError:
return False, None
def delete_objects(self, bucket_name, objects):
deleted_objects = []
for object_ in objects:
key_name = object_["Key"]
version_id = object_.get("VersionId", None)
self.delete_object(
bucket_name, undo_clean_key_name(key_name), version_id=version_id
)
deleted_objects.append((key_name, version_id))
return deleted_objects
def copy_object(
self,
src_bucket_name,
src_key_name,
dest_bucket_name,
dest_key_name,
storage=None,
acl=None,
src_version_id=None,
encryption=None,
kms_key_id=None,
):
key = self.get_object(src_bucket_name, src_key_name, version_id=src_version_id)
new_key = self.put_object(
bucket_name=dest_bucket_name,
key_name=dest_key_name,
value=key.value,
storage=storage or key.storage_class,
multipart=key.multipart,
encryption=encryption or key.encryption,
kms_key_id=kms_key_id or key.kms_key_id,
bucket_key_enabled=key.bucket_key_enabled,
lock_mode=key.lock_mode,
lock_legal_status=key.lock_legal_status,
lock_until=key.lock_until,
)
self.tagger.copy_tags(key.arn, new_key.arn)
if acl is not None:
new_key.set_acl(acl)
if key.storage_class in "GLACIER":
# Object copied from Glacier object should not have expiry
new_key.set_expiry(None)
def put_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)
bucket.set_acl(acl)
def get_bucket_acl(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.acl
def get_bucket_cors(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.cors
def get_bucket_lifecycle(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.rules
def get_bucket_location(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.location
def get_bucket_logging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.logging
def get_bucket_notification_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.notification_configuration
s3_backend = S3Backend()
|
the-stack_106_21604
|
A='HamzaShabbirisCool'
stack_Memory=[]
Reverse_stack=[]
b=''
for i in range(len(A)): # pushing into stack
stack_Memory.append(A[i])
print(stack_Memory)
for i in range(len(stack_Memory)): # popping from stack
Reverse_stack.append(stack_Memory.pop())
print(stack_Memory)
print(Reverse_stack)
b=b.join(Reverse_stack)
print(b)
c=b+'933839'
print(c)
dummy=[]
for i in range(len(c)):
dummy.append(c[i])
print(dummy)
final=''
for i in range(3,len(dummy),4):
dummy[i]='_'
final=final.join(dummy)
print(dummy)
print(final)
|
the-stack_106_21605
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The policy and value networks share a majority of their architecture.
This helps the intermediate layers extract concepts that are relevant to both
move prediction and score estimation.
"""
import collections
import functools
import math
import numpy as np
import os.path
import itertools
import sys
import tensorflow as tf
from tensorflow.python.training.summary_io import SummaryWriterCache
from tqdm import tqdm
from typing import Dict
import features
import preprocessing
import symmetries
import go
from pgrad import *
# How many positions to look at per generation.
# Per AGZ, 2048 minibatch * 1k = 2M positions/generation
#EXAMPLES_PER_GENERATION = 2000000
EXAMPLES_PER_GENERATION = 100000
# How many positions can fit on a graphics card. 256 for 9s, 16 or 32 for 19s.
TRAIN_BATCH_SIZE = 16
#TRAIN_BATCH_SIZE = 256
class DualNetwork():
def __init__(self, save_file, **hparams):
self.save_file = save_file
self.hparams = get_default_hyperparams(**hparams)
self.inference_input = None
self.inference_output = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=tf.Graph(), config=config)
self.initialize_graph()
def initialize_graph(self):
with self.sess.graph.as_default():
features, labels = get_inference_input()
estimator_spec = model_fn(features, labels,
tf.estimator.ModeKeys.PREDICT, self.hparams)
self.inference_input = features
self.inference_output = estimator_spec.predictions
if self.save_file is not None:
self.initialize_weights(self.save_file)
else:
self.sess.run(tf.global_variables_initializer())
def initialize_weights(self, save_file):
"""Initialize the weights from the given save_file.
Assumes that the graph has been constructed, and the
save_file contains weights that match the graph. Used
to set the weights to a different version of the player
without redifining the entire graph."""
tf.train.Saver().restore(self.sess, save_file)
def run(self, position, use_random_symmetry=True):
probs, values = self.run_many([position],
use_random_symmetry=use_random_symmetry)
return probs[0], values[0]
def run_many(self, positions, use_random_symmetry=True):
processed = list(map(features.extract_features, positions))
if use_random_symmetry:
syms_used, processed = symmetries.randomize_symmetries_feat(
processed)
outputs = self.sess.run(self.inference_output,
feed_dict={self.inference_input: processed})
probabilities, value = outputs['policy_output'], outputs['value_output']
if use_random_symmetry:
probabilities = symmetries.invert_symmetries_pi(
syms_used, probabilities)
return probabilities, value
def get_inference_input():
"""Set up placeholders for input features/labels.
Returns the feature, output tensors that get passed into model_fn."""
return (tf.placeholder(tf.float32,
[None, go.N, go.N, features.NEW_FEATURES_PLANES],
name='pos_tensor'),
{'pi_tensor': tf.placeholder(tf.float32, [None, go.N * go.N + 1]),
'value_tensor': tf.placeholder(tf.float32, [None])})
def _round_power_of_two(n):
"""Finds the nearest power of 2 to a number.
Thus 84 -> 64, 120 -> 128, etc.
"""
return 2 ** int(round(math.log(n, 2)))
def get_default_hyperparams(**overrides):
"""Returns the hyperparams for the neural net.
In other words, returns a dict whose parameters come from the AGZ
paper:
k: number of filters (AlphaGoZero used 256). We use 128 by
default for a 19x19 go board.
fc_width: Dimensionality of the fully connected linear layer
num_shared_layers: number of shared residual blocks. AGZ used both 19
and 39. Here we use 19 because it's faster to train.
l2_strength: The L2 regularization parameter.
momentum: The momentum parameter for training
"""
k = _round_power_of_two(go.N ** 2 / 3) # width of each layer
hparams = {
'k': k, # Width of each conv layer
'fc_width': 2 * k, # Width of each fully connected layer
'num_shared_layers': go.N, # Number of shared trunk layers
'l2_strength': 1e-4, # Regularization strength
'momentum': 0.9, # Momentum used in SGD
}
hparams.update(**overrides)
return hparams
def model_fn(features, labels, mode, params, config=None):
'''
Args:
features: tensor with shape
[BATCH_SIZE, go.N, go.N, features.NEW_FEATURES_PLANES]
labels: dict from string to tensor with shape
'pi_tensor': [BATCH_SIZE, go.N * go.N + 1]
'value_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only)
params: a dict of hyperparams
config: ignored; is required by Estimator API.
Returns: tf.estimator.EstimatorSpec with props
mode: same as mode arg
predictions: dict of tensors
'policy': [BATCH_SIZE, go.N * go.N + 1]
'value': [BATCH_SIZE]
loss: a single value tensor
train_op: train op
eval_metric_ops
return dict of tensors
logits: [BATCH_SIZE, go.N * go.N + 1]
'''
my_batchn = functools.partial(
tf.layers.batch_normalization,
momentum=.997, epsilon=1e-5, fused=True, center=True, scale=True,
training=(mode == tf.estimator.ModeKeys.TRAIN))
my_conv2d = functools.partial(
tf.layers.conv2d,
filters=params['k'], kernel_size=[3, 3], padding="same")
def my_res_layer(inputs):
int_layer1 = my_batchn(my_conv2d(tf.reshape(id_bf16cut(inputs),tf.shape(inputs))))
initial_output = tf.nn.relu(int_layer1)
int_layer2 = my_batchn(my_conv2d(tf.reshape(id_bf16cut(initial_output),tf.shape(initial_output))))
output = tf.nn.relu(inputs + int_layer2)
return output
initial_output = tf.nn.relu(my_batchn(my_conv2d(tf.reshape(id_bf16cut(features),tf.shape(features)))))
# the shared stack
shared_output = initial_output
for i in range(params['num_shared_layers']):
shared_output = my_res_layer(shared_output)
# policy head
policy_conv = tf.nn.relu(my_batchn(
my_conv2d(tf.reshape(id_bf16cut(shared_output),tf.shape(shared_output)), filters=2, kernel_size=[1, 1]),
center=False, scale=False))
logits = tf.layers.dense(tf.reshape(id_bf16cut(
tf.reshape(policy_conv, [-1, go.N * go.N * 2])),[-1, go.N * go.N * 2]),
go.N * go.N + 1)
policy_output = tf.nn.softmax(logits, name='policy_output')
# value head
value_conv = tf.nn.relu(my_batchn(
my_conv2d(tf.reshape(id_bf16cut(shared_output),tf.shape(shared_output)), filters=1, kernel_size=[1, 1]),
center=False, scale=False))
value_fc_hidden = tf.nn.relu(tf.layers.dense(
tf.reshape(id_bf16cut(tf.reshape(value_conv, [-1, go.N * go.N])),[-1, go.N * go.N]),
params['fc_width']))
value_output = tf.nn.tanh(
tf.reshape(tf.layers.dense(tf.reshape(id_bf16cut(value_fc_hidden),tf.shape(value_fc_hidden)), 1), [-1]),
name='value_output')
# train ops
global_step = tf.train.get_or_create_global_step()
policy_cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels['pi_tensor']))
value_cost = tf.reduce_mean(
tf.square(value_output - labels['value_tensor']))
l2_cost = params['l2_strength'] * tf.add_n([tf.nn.l2_loss(v)
for v in tf.trainable_variables() if not 'bias' in v.name])
combined_cost = policy_cost + value_cost + l2_cost
policy_entropy = -tf.reduce_mean(tf.reduce_sum(
policy_output * tf.log(policy_output), axis=1))
boundaries = [int(1e6), int(2e6)]
values = [1e-2, 1e-3, 1e-4]
learning_rate = tf.train.piecewise_constant(
global_step, boundaries, values)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.MomentumOptimizer(
learning_rate, params['momentum']).minimize(
combined_cost, global_step=global_step)
metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels['pi_tensor'],
predictions=policy_output,
name='accuracy_op'),
'policy_cost': tf.metrics.mean(policy_cost),
'value_cost': tf.metrics.mean(value_cost),
'l2_cost': tf.metrics.mean(l2_cost),
'policy_entropy': tf.metrics.mean(policy_entropy),
'combined_cost': tf.metrics.mean(combined_cost),
}
# Create summary ops so that they show up in SUMMARIES collection
# That way, they get logged automatically during training
for metric_name, metric_op in metric_ops.items():
tf.summary.scalar(metric_name, metric_op[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'policy_output': policy_output,
'value_output': value_output,
},
loss=combined_cost,
train_op=train_op,
eval_metric_ops=metric_ops,
)
def get_estimator(working_dir, **hparams):
hparams = get_default_hyperparams(**hparams)
return tf.estimator.Estimator(
model_fn,
model_dir=working_dir,
params=hparams)
def bootstrap(working_dir, **hparams):
"""Initialize a tf.Estimator run with random initial weights.
Args:
working_dir: The directory where tf.estimator will drop logs,
checkpoints, and so on
hparams: hyperparams of the model.
"""
hparams = get_default_hyperparams(**hparams)
# a bit hacky - forge an initial checkpoint with the name that subsequent
# Estimator runs will expect to find.
#
# Estimator will do this automatically when you call train(), but calling
# train() requires data, and I didn't feel like creating training data in
# order to run the full train pipeline for 1 step.
estimator_initial_checkpoint_name = 'model.ckpt-1'
save_file = os.path.join(working_dir, estimator_initial_checkpoint_name)
sess = tf.Session(graph=tf.Graph())
with sess.graph.as_default():
features, labels = get_inference_input()
model_fn(features, labels, tf.estimator.ModeKeys.PREDICT, hparams)
sess.run(tf.global_variables_initializer())
tf.train.Saver().save(sess, save_file)
def export_model(working_dir, model_path):
"""Take the latest checkpoint and export it to model_path for selfplay.
Assumes that all relevant model files are prefixed by the same name.
(For example, foo.index, foo.meta and foo.data-00000-of-00001).
Args:
working_dir: The directory where tf.estimator keeps its checkpoints
model_path: The path (can be a gs:// path) to export model to
"""
estimator = tf.estimator.Estimator(model_fn, model_dir=working_dir,
params='ignored')
latest_checkpoint = estimator.latest_checkpoint()
all_checkpoint_files = tf.gfile.Glob(latest_checkpoint + '*')
for filename in all_checkpoint_files:
suffix = filename.partition(latest_checkpoint)[2]
destination_path = model_path + suffix
print("Copying {} to {}".format(filename, destination_path))
tf.gfile.Copy(filename, destination_path)
def train(working_dir, tf_records, generation_num, **hparams):
assert generation_num > 0, "Model 0 is random weights"
estimator = get_estimator(working_dir, **hparams)
max_steps = generation_num * EXAMPLES_PER_GENERATION // TRAIN_BATCH_SIZE
def input_fn(): return preprocessing.get_input_tensors(
TRAIN_BATCH_SIZE, tf_records)
update_ratio_hook = UpdateRatioSessionHook(working_dir)
estimator.train(input_fn, hooks=[update_ratio_hook], max_steps=max_steps)
def validate(working_dir, tf_records, checkpoint_name=None, **hparams):
estimator = get_estimator(working_dir, **hparams)
if checkpoint_name is None:
checkpoint_name = estimator.latest_checkpoint()
def input_fn(): return preprocessing.get_input_tensors(
TRAIN_BATCH_SIZE, tf_records, shuffle_buffer_size=1000,
filter_amount=0.05)
estimator.evaluate(input_fn, steps=1000)
def compute_update_ratio(weight_tensors, before_weights, after_weights):
"""Compute the ratio of gradient norm to weight norm."""
deltas = [after - before for after,
before in zip(after_weights, before_weights)]
delta_norms = [np.linalg.norm(d.ravel()) for d in deltas]
weight_norms = [np.linalg.norm(w.ravel()) for w in before_weights]
ratios = [d / w for d, w in zip(delta_norms, weight_norms)]
all_summaries = [
tf.Summary.Value(tag='update_ratios/' +
tensor.name, simple_value=ratio)
for tensor, ratio in zip(weight_tensors, ratios)]
return tf.Summary(value=all_summaries)
class UpdateRatioSessionHook(tf.train.SessionRunHook):
def __init__(self, working_dir, every_n_steps=100):
self.working_dir = working_dir
self.every_n_steps = every_n_steps
self.before_weights = None
def begin(self):
# These calls only works because the SessionRunHook api guarantees this
# will get called within a graph context containing our model graph.
self.summary_writer = SummaryWriterCache.get(self.working_dir)
self.weight_tensors = tf.trainable_variables()
self.global_step = tf.train.get_or_create_global_step()
def before_run(self, run_context):
global_step = run_context.session.run(self.global_step)
if global_step % self.every_n_steps == 0 or self.before_weights is None:
self.before_weights = run_context.session.run(self.weight_tensors)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self.global_step)
if global_step % self.every_n_steps == 0:
after_weights = run_context.session.run(self.weight_tensors)
weight_update_summaries = compute_update_ratio(
self.weight_tensors, self.before_weights, after_weights)
self.summary_writer.add_summary(
weight_update_summaries, global_step)
self.before_weights = None
|
the-stack_106_21606
|
"""modelos actualizado
Revision ID: 175c80bee699
Revises: None
Create Date: 2016-05-19 10:38:47.632650
"""
# revision identifiers, used by Alembic.
revision = '175c80bee699'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('companies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=True),
sa.Column('address', sa.Unicode(length=255), nullable=True),
sa.Column('phone', sa.Unicode(length=50), nullable=True),
sa.Column('website', sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('website')
)
op.create_table('skills',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=50), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('students',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=255), nullable=True),
sa.Column('last_name', sa.Unicode(length=255), nullable=True),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('email', sa.Unicode(length=50), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('company_skills',
sa.Column('company_id', sa.Integer(), nullable=True),
sa.Column('skills_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['company_id'], ['companies.id'], ),
sa.ForeignKeyConstraint(['skills_id'], ['skills.id'], )
)
op.create_table('student_skills',
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('skills_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['skills_id'], ['skills.id'], ),
sa.ForeignKeyConstraint(['student_id'], ['students.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('student_skills')
op.drop_table('company_skills')
op.drop_table('students')
op.drop_table('skills')
op.drop_table('companies')
### end Alembic commands ###
|
the-stack_106_21609
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Timer" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/Timer.html
# Last change: 2021-06-02 17:56:35+02:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Timer
This file can be _executed_ as a script, running all experiments:
$ python Timer.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.Timer import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/Timer.html
The `Timer` class allows you to measure elapsed real time (in fractional seconds). Its typical usage is in conjunction with a `with` clause:
>>> with Timer() as t:
>>> some_long_running_function()
>>> t.elapsed_time()
0.04725892299757106
For more details, source, and documentation, see
"The Fuzzing Book - Timer"
at https://www.fuzzingbook.org/html/Timer.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Timer
# =====
if __name__ == '__main__':
print('# Timer')
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Measuring Time
## --------------
if __name__ == '__main__':
print('\n## Measuring Time')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
import time
from typing import Type, Any
def clock() -> float:
"""
Return the number of fractional seconds elapsed since some point of reference.
"""
return time.perf_counter()
from types import TracebackType
class Timer:
def __init__(self) -> None:
"""Constructor"""
self.start_time = clock()
self.end_time = None
def __enter__(self) -> Any:
"""Begin of `with` block"""
self.start_time = clock()
self.end_time = None
return self
def __exit__(self, exc_type: Type, exc_value: BaseException,
tb: TracebackType) -> None:
"""End of `with` block"""
self.end_time = clock() # type: ignore
def elapsed_time(self) -> float:
"""Return elapsed time in seconds"""
if self.end_time is None:
# still running
return clock() - self.start_time
else:
return self.end_time - self.start_time # type: ignore
def some_long_running_function() -> None:
i = 1000000
while i > 0:
i -= 1
if __name__ == '__main__':
print("Stopping total time:")
with Timer() as t:
some_long_running_function()
print(t.elapsed_time())
if __name__ == '__main__':
print("Stopping time in between:")
with Timer() as t:
for i in range(10):
some_long_running_function()
print(t.elapsed_time())
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
if __name__ == '__main__':
with Timer() as t:
some_long_running_function()
t.elapsed_time()
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
|
the-stack_106_21610
|
from .libcrocoddyl_pywrap import *
from .libcrocoddyl_pywrap import __version__
from .deprecation import *
import pinocchio
import numpy as np
import time
import warnings
def rotationMatrixFromTwoVectors(a, b):
a_copy = a / np.linalg.norm(a)
b_copy = b / np.linalg.norm(b)
a_cross_b = np.cross(a_copy, b_copy, axis=0)
s = np.linalg.norm(a_cross_b)
if libcrocoddyl_pywrap.getNumpyType() == np.matrix:
warnings.warn("Numpy matrix supports will be removed in future release", DeprecationWarning, stacklevel=2)
if s == 0:
return np.matrix(np.eye(3))
c = np.asscalar(a_copy.T * b_copy)
ab_skew = pinocchio.skew(a_cross_b)
return np.matrix(np.eye(3)) + ab_skew + ab_skew * ab_skew * (1 - c) / s**2
else:
if s == 0:
return np.eye(3)
c = np.dot(a_copy, b_copy)
ab_skew = pinocchio.skew(a_cross_b)
return np.eye(3) + ab_skew + np.dot(ab_skew, ab_skew) * (1 - c) / s**2
class DisplayAbstract:
def __init__(self, rate=-1, freq=1):
self.rate = rate
self.freq = freq
def displayFromSolver(self, solver, factor=1.):
numpy_conversion = False
if libcrocoddyl_pywrap.getNumpyType() == np.matrix:
numpy_conversion = True
libcrocoddyl_pywrap.switchToNumpyMatrix()
warnings.warn("Numpy matrix supports will be removed in future release", DeprecationWarning, stacklevel=2)
fs = self.getForceTrajectoryFromSolver(solver)
ps = self.getFrameTrajectoryFromSolver(solver)
models = solver.problem.runningModels.tolist() + [solver.problem.terminalModel]
dts = [m.dt if hasattr(m, "differential") else 0. for m in models]
self.display(solver.xs, fs, ps, dts, factor)
if numpy_conversion:
numpy_conversion = False
libcrocoddyl_pywrap.switchToNumpyMatrix()
def display(self, xs, fs=[], ps=[], dts=[], factor=1.):
""" Display the state, force and frame trajectories"""
raise NotImplementedError("Not implemented yet.")
def getForceTrajectoryFromSolver(self, solver):
""" Get the force trajectory from the solver"""
return None
def getFrameTrajectoryFromSolver(self, solver):
""" Get the frame trajectory from the solver"""
return None
class GepettoDisplay(DisplayAbstract):
def __init__(self, robot, rate=-1, freq=1, cameraTF=None, floor=True, frameNames=[], visibility=False):
DisplayAbstract.__init__(self, rate, freq)
self.robot = robot
# Visuals properties
self.fullVisibility = visibility
self.floorGroup = "world/floor"
self.forceGroup = "world/robot/contact_forces"
self.frictionGroup = "world/robot/friction_cone"
self.frameTrajGroup = "world/robot/frame_trajectory"
self.backgroundColor = [1., 1., 1., 1.]
self.floorScale = [0.5, 0.5, 0.5]
self.floorColor = [0.7, 0.7, 0.7, 1.]
self.forceRadius = 0.015
self.forceLength = 0.5
self.forceColor = [1., 0., 1., 1.]
self.frictionConeScale = 0.2
self.frictionConeRays = True
self.frictionConeColor1 = [0., 0.4, 0.79, 0.5]
self.frictionConeColor2 = [0., 0.4, 0.79, 0.5]
self.activeContacts = {}
self.frictionMu = {}
for n in frameNames:
parentId = robot.model.frames[robot.model.getFrameId(n)].parent
self.activeContacts[str(parentId)] = True
self.frictionMu[str(parentId)] = 0.7
self.frameTrajNames = []
for n in frameNames:
self.frameTrajNames.append(str(robot.model.getFrameId(n)))
self.frameTrajColor = {}
self.frameTrajLineWidth = 10
for fr in self.frameTrajNames:
self.frameTrajColor[fr] = list(np.hstack([np.random.choice(range(256), size=3) / 256., 1.]))
self._addRobot()
self._setBackground()
if cameraTF is not None:
self.robot.viewer.gui.setCameraTransform(self.robot.viz.windowID, cameraTF)
if floor:
self._addFloor()
self.totalWeight = sum(m.mass
for m in self.robot.model.inertias) * np.linalg.norm(self.robot.model.gravity.linear)
self.x_axis = np.array([1., 0., 0.])
self.z_axis = np.array([0., 0., 1.])
self.robot.viewer.gui.createGroup(self.forceGroup)
self.robot.viewer.gui.createGroup(self.frictionGroup)
self.robot.viewer.gui.createGroup(self.frameTrajGroup)
self._addForceArrows()
self._addFrameCurves()
self._addFrictionCones()
def display(self, xs, fs=[], ps=[], dts=[], factor=1.):
numpy_conversion = False
if libcrocoddyl_pywrap.getNumpyType() == np.matrix:
numpy_conversion = True
libcrocoddyl_pywrap.switchToNumpyMatrix()
warnings.warn("Numpy matrix supports will be removed in future release", DeprecationWarning, stacklevel=2)
if ps:
for key, p in ps.items():
self.robot.viewer.gui.setCurvePoints(self.frameTrajGroup + "/" + key, p)
if not dts:
dts = [0.] * len(xs)
S = 1 if self.rate <= 0 else max(len(xs) / self.rate, 1)
for i, x in enumerate(xs):
if not i % S:
if fs:
self.activeContacts = {k: False for k, c in self.activeContacts.items()}
for f in fs[i]:
key = f["key"]
pose = f["oMf"]
wrench = f["f"]
# Display the contact forces
R = rotationMatrixFromTwoVectors(self.x_axis, wrench.linear)
forcePose = pinocchio.SE3ToXYZQUATtuple(pinocchio.SE3(R, pose.translation))
forceMagnitud = np.linalg.norm(wrench.linear) / self.totalWeight
forceName = self.forceGroup + "/" + key
self.robot.viewer.gui.setVector3Property(forceName, "Scale", [1. * forceMagnitud, 1., 1.])
self.robot.viewer.gui.applyConfiguration(forceName, forcePose)
self.robot.viewer.gui.setVisibility(forceName, "ON")
# Display the friction cones
position = pose
position.rotation = f["R"]
frictionName = self.frictionGroup + "/" + key
self._setConeMu(key, f["mu"])
self.robot.viewer.gui.applyConfiguration(
frictionName, list(np.array(pinocchio.SE3ToXYZQUAT(position)).squeeze()))
self.robot.viewer.gui.setVisibility(frictionName, "ON")
self.activeContacts[key] = True
for key, c in self.activeContacts.items():
if c == False:
self.robot.viewer.gui.setVisibility(self.forceGroup + "/" + key, "OFF")
self.robot.viewer.gui.setVisibility(self.frictionGroup + "/" + key, "OFF")
self.robot.display(x[:self.robot.nq])
time.sleep(dts[i] * factor)
if numpy_conversion:
numpy_conversion = False
libcrocoddyl_pywrap.switchToNumpyMatrix()
def getForceTrajectoryFromSolver(self, solver):
if len(self.frameTrajNames) == 0:
return None
fs = []
models = solver.problem.runningModels.tolist() + [solver.problem.terminalModel]
datas = solver.problem.runningDatas.tolist() + [solver.problem.terminalData]
for i, data in enumerate(datas):
model = models[i]
if hasattr(data, "differential"):
if isinstance(data.differential, libcrocoddyl_pywrap.DifferentialActionDataContactFwdDynamics):
fc = []
for key, contact in data.differential.multibody.contacts.contacts.todict().items():
if model.differential.contacts.contacts[key].active:
oMf = contact.pinocchio.oMi[contact.joint] * contact.jMf
fiMo = pinocchio.SE3(contact.pinocchio.oMi[contact.joint].rotation.T,
contact.jMf.translation)
force = fiMo.actInv(contact.f)
R = np.eye(3)
mu = 0.7
for k, c in model.differential.costs.costs.todict().items():
if isinstance(c.cost, libcrocoddyl_pywrap.CostModelContactFrictionCone):
if contact.joint == self.robot.model.frames[c.cost.reference.id].parent:
R = c.cost.reference.cone.R
mu = c.cost.reference.cone.mu
continue
fc.append({"key": str(contact.joint), "oMf": oMf, "f": force, "R": R, "mu": mu})
fs.append(fc)
elif isinstance(data, libcrocoddyl_pywrap.ActionDataImpulseFwdDynamics):
fc = []
for key, impulse in data.multibody.impulses.impulses.todict().items():
if model.impulses.impulses[key].active:
oMf = impulse.pinocchio.oMi[impulse.joint] * impulse.jMf
fiMo = pinocchio.SE3(impulse.pinocchio.oMi[impulse.joint].rotation.T, impulse.jMf.translation)
force = fiMo.actInv(impulse.f)
R = np.eye(3)
mu = 0.7
for k, c in model.costs.costs.todict().items():
if isinstance(c.cost, libcrocoddyl_pywrap.CostModelContactFrictionCone):
if impulse.joint == self.robot.model.frames[c.cost.id].parent:
R = c.cost.cone.R
mu = c.cost.cone.mu
continue
fc.append({"key": str(impulse.joint), "oMf": oMf, "f": force, "R": R, "mu": mu})
fs.append(fc)
return fs
def getFrameTrajectoryFromSolver(self, solver):
if len(self.frameTrajNames) == 0:
return None
ps = {fr: [] for fr in self.frameTrajNames}
models = solver.problem.runningModels.tolist() + [solver.problem.terminalModel]
datas = solver.problem.runningDatas.tolist() + [solver.problem.terminalData]
for key, p in ps.items():
frameId = int(key)
for i, data in enumerate(datas):
model = models[i]
if hasattr(data, "differential"):
if hasattr(data.differential, "pinocchio"):
# Update the frame placement if there is not contact.
# Note that, in non-contact cases, the action model does not compute it for efficiency reason
if len(data.differential.multibody.contacts.contacts.todict().items()) == 0:
pinocchio.updateFramePlacement(model.differential.pinocchio, data.differential.pinocchio,
frameId)
pose = data.differential.pinocchio.oMf[frameId]
p.append(np.asarray(pose.translation.T).reshape(-1).tolist())
elif isinstance(data, libcrocoddyl_pywrap.ActionDataImpulseFwdDynamics):
if hasattr(data, "pinocchio"):
pose = data.pinocchio.oMf[frameId]
p.append(np.asarray(pose.translation.T).reshape(-1).tolist())
return ps
def _addRobot(self):
# Spawn robot model
self.robot.initViewer(windowName="crocoddyl", loadModel=False)
self.robot.loadViewerModel(rootNodeName="robot")
def _setBackground(self):
# Set white background and floor
window_id = self.robot.viewer.gui.getWindowID("crocoddyl")
self.robot.viewer.gui.setBackgroundColor1(window_id, self.backgroundColor)
self.robot.viewer.gui.setBackgroundColor2(window_id, self.backgroundColor)
def _addFloor(self):
self.robot.viewer.gui.createGroup(self.floorGroup)
self.robot.viewer.gui.addFloor(self.floorGroup + "/flat")
self.robot.viewer.gui.setScale(self.floorGroup + "/flat", self.floorScale)
self.robot.viewer.gui.setColor(self.floorGroup + "/flat", self.floorColor)
self.robot.viewer.gui.setLightingMode(self.floorGroup + "/flat", "OFF")
def _addForceArrows(self):
for key in self.activeContacts:
forceName = self.forceGroup + "/" + key
self.robot.viewer.gui.addArrow(forceName, self.forceRadius, self.forceLength, self.forceColor)
self.robot.viewer.gui.setFloatProperty(forceName, "Alpha", 1.)
if self.fullVisibility:
self.robot.viewer.gui.setVisibility(self.forceGroup, "ALWAYS_ON_TOP")
def _addFrictionCones(self):
for key in self.activeContacts:
self._createCone(key, self.frictionConeScale, mu=0.7)
def _addFrameCurves(self):
for key in self.frameTrajNames:
frameName = self.frameTrajGroup + "/" + key
self.robot.viewer.gui.addCurve(frameName, [np.array([0., 0., 0.]).tolist()] * 2, self.frameTrajColor[key])
self.robot.viewer.gui.setCurveLineWidth(frameName, self.frameTrajLineWidth)
if self.fullVisibility:
self.robot.viewer.gui.setVisibility(frameName, "ALWAYS_ON_TOP")
def _createCone(self, coneName, scale=1., mu=0.7):
m_generatrices = np.matrix(np.empty([3, 4]))
m_generatrices[:, 0] = np.matrix([mu, mu, 1.]).T
m_generatrices[:, 0] = m_generatrices[:, 0] / np.linalg.norm(m_generatrices[:, 0])
m_generatrices[:, 1] = m_generatrices[:, 0]
m_generatrices[0, 1] *= -1.
m_generatrices[:, 2] = m_generatrices[:, 0]
m_generatrices[:2, 2] *= -1.
m_generatrices[:, 3] = m_generatrices[:, 0]
m_generatrices[1, 3] *= -1.
generatrices = m_generatrices
v = [[0., 0., 0.]]
for k in range(m_generatrices.shape[1]):
v.append(m_generatrices[:3, k].T.tolist()[0])
v.append(m_generatrices[:3, 0].T.tolist()[0])
coneGroup = self.frictionGroup + "/" + coneName
self.robot.viewer.gui.createGroup(coneGroup)
meshGroup = coneGroup + "/cone"
result = self.robot.viewer.gui.addCurve(meshGroup, v, self.frictionConeColor1)
self.robot.viewer.gui.setCurveMode(meshGroup, 'TRIANGLE_FAN')
if self.frictionConeRays:
lineGroup = coneGroup + "/lines"
self.robot.viewer.gui.createGroup(lineGroup)
for k in range(m_generatrices.shape[1]):
l = self.robot.viewer.gui.addLine(lineGroup + "/" + str(k), [0., 0., 0.],
m_generatrices[:3, k].T.tolist()[0], self.frictionConeColor2)
self.robot.viewer.gui.setScale(coneGroup, [scale, scale, scale])
if self.fullVisibility:
self.robot.viewer.gui.setVisibility(meshGroup, "ALWAYS_ON_TOP")
self.robot.viewer.gui.setVisibility(lineGroup, "ALWAYS_ON_TOP")
def _setConeMu(self, coneName, mu):
current_mu = self.frictionMu[coneName]
if mu != current_mu:
self.frictionMu[coneName] = mu
coneGroup = self.frictionGroup + "/" + coneName
self.robot.viewer.gui.deleteNode(coneGroup, True)
self._createCone(coneName, self.frictionConeScale, mu)
class MeshcatDisplay(DisplayAbstract):
def __init__(self, robot, rate=-1, freq=1, openWindow=True):
DisplayAbstract.__init__(self, rate, freq)
self.robot = robot
robot.setVisualizer(pinocchio.visualize.MeshcatVisualizer())
self._addRobot(openWindow)
def display(self, xs, fs=[], ps=[], dts=[], factor=1.):
if not dts:
dts = [0.] * len(xs)
S = 1 if self.rate <= 0 else max(len(xs) // self.rate, 1)
for i, x in enumerate(xs):
if not i % S:
self.robot.display(x[:self.robot.nq])
time.sleep(dts[i] * factor)
def _addRobot(self, openWindow):
# Spawn robot model
self.robot.initViewer(open=openWindow)
self.robot.loadViewerModel(rootNodeName="robot")
class CallbackDisplay(libcrocoddyl_pywrap.CallbackAbstract):
def __init__(self, display):
libcrocoddyl_pywrap.CallbackAbstract.__init__(self)
self.visualization = display
def __call__(self, solver):
if (solver.iter + 1) % self.visualization.freq:
return
self.visualization.displayFromSolver(solver)
class CallbackLogger(libcrocoddyl_pywrap.CallbackAbstract):
def __init__(self):
libcrocoddyl_pywrap.CallbackAbstract.__init__(self)
self.xs = []
self.us = []
self.fs = []
self.steps = []
self.iters = []
self.costs = []
self.u_regs = []
self.x_regs = []
self.stops = []
self.grads = []
def __call__(self, solver):
import copy
self.xs = copy.copy(solver.xs)
self.us = copy.copy(solver.us)
self.fs.append(copy.copy(solver.fs))
self.steps.append(solver.stepLength)
self.iters.append(solver.iter)
self.costs.append(solver.cost)
self.u_regs.append(solver.u_reg)
self.x_regs.append(solver.x_reg)
self.stops.append(solver.stoppingCriteria())
self.grads.append(-np.asscalar(solver.expectedImprovement()[1]))
def plotOCSolution(xs=None, us=None, figIndex=1, show=True, figTitle=""):
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["pdf.fonttype"] = 42
plt.rcParams["ps.fonttype"] = 42
# Getting the state and control trajectories
if xs is not None:
xsPlotIdx = 111
nx = xs[0].shape[0]
X = [0.] * nx
for i in range(nx):
X[i] = [np.asscalar(x[i]) for x in xs]
if us is not None:
usPlotIdx = 111
nu = us[0].shape[0]
U = [0.] * nu
for i in range(nu):
U[i] = [np.asscalar(u[i]) if u.shape[0] != 0 else 0 for u in us]
if xs is not None and us is not None:
xsPlotIdx = 211
usPlotIdx = 212
plt.figure(figIndex)
# Plotting the state trajectories
if xs is not None:
plt.subplot(xsPlotIdx)
[plt.plot(X[i], label="x" + str(i)) for i in range(nx)]
plt.legend()
plt.title(figTitle, fontsize=14)
# Plotting the control commands
if us is not None:
plt.subplot(usPlotIdx)
[plt.plot(U[i], label="u" + str(i)) for i in range(nu)]
plt.legend()
plt.xlabel("knots")
if show:
plt.show()
def plotConvergence(costs, muLM, muV, gamma, theta, alpha, figIndex=1, show=True, figTitle=""):
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["pdf.fonttype"] = 42
plt.rcParams["ps.fonttype"] = 42
plt.figure(figIndex, figsize=(6.4, 8))
# Plotting the total cost sequence
plt.subplot(511)
plt.ylabel("cost")
plt.plot(costs)
plt.title(figTitle, fontsize=14)
# Ploting mu sequences
plt.subplot(512)
plt.ylabel("mu")
plt.plot(muLM, label="LM")
plt.plot(muV, label="V")
plt.legend()
# Plotting the gradient sequence (gamma and theta)
plt.subplot(513)
plt.ylabel("gamma")
plt.plot(gamma)
plt.subplot(514)
plt.ylabel("theta")
plt.plot(theta)
# Plotting the alpha sequence
plt.subplot(515)
plt.ylabel("alpha")
ind = np.arange(len(alpha))
plt.bar(ind, alpha)
plt.xlabel("iteration")
if show:
plt.show()
def saveOCSolution(filename, xs, us, ks=None, Ks=None):
import pickle
data = {"xs": xs, "us": us, "ks": ks, "Ks": Ks}
with open(filename, "wb") as f:
pickle.dump(data, f)
def saveConvergence(filename, costs, muLM, muV, gamma, theta, alpha):
import pickle
data = {"costs": costs, "muLM": muLM, "muV": muV, "gamma": gamma, "theta": theta, "alpha": alpha}
with open(filename, "wb") as f:
pickle.dump(data, f)
def saveLogfile(filename, log):
import pickle
data = {
"xs": log.xs,
"us": log.us,
"fs": log.fs,
"steps": log.steps,
"iters": log.iters,
"costs": log.costs,
"muLM": log.u_regs,
"muV": log.x_regs,
"stops": log.stops,
"grads": log.grads
}
with open(filename, "wb") as f:
pickle.dump(data, f)
|
the-stack_106_21612
|
"""Support for AdGuard Home."""
from __future__ import annotations
import logging
from typing import Any
from adguardhome import AdGuardHome, AdGuardHomeConnectionError, AdGuardHomeError
import voluptuous as vol
from homeassistant.components.adguard.const import (
CONF_FORCE,
DATA_ADGUARD_CLIENT,
DATA_ADGUARD_VERION,
DOMAIN,
SERVICE_ADD_URL,
SERVICE_DISABLE_URL,
SERVICE_ENABLE_URL,
SERVICE_REFRESH,
SERVICE_REMOVE_URL,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
SERVICE_URL_SCHEMA = vol.Schema({vol.Required(CONF_URL): cv.url})
SERVICE_ADD_URL_SCHEMA = vol.Schema(
{vol.Required(CONF_NAME): cv.string, vol.Required(CONF_URL): cv.url}
)
SERVICE_REFRESH_SCHEMA = vol.Schema(
{vol.Optional(CONF_FORCE, default=False): cv.boolean}
)
PLATFORMS = ["sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the AdGuard Home components."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up AdGuard Home from a config entry."""
session = async_get_clientsession(hass, entry.data[CONF_VERIFY_SSL])
adguard = AdGuardHome(
entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
username=entry.data[CONF_USERNAME],
password=entry.data[CONF_PASSWORD],
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
session=session,
)
hass.data.setdefault(DOMAIN, {})[DATA_ADGUARD_CLIENT] = adguard
try:
await adguard.version()
except AdGuardHomeConnectionError as exception:
raise ConfigEntryNotReady from exception
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
async def add_url(call) -> None:
"""Service call to add a new filter subscription to AdGuard Home."""
await adguard.filtering.add_url(
allowlist=False, name=call.data.get(CONF_NAME), url=call.data.get(CONF_URL)
)
async def remove_url(call) -> None:
"""Service call to remove a filter subscription from AdGuard Home."""
await adguard.filtering.remove_url(allowlist=False, url=call.data.get(CONF_URL))
async def enable_url(call) -> None:
"""Service call to enable a filter subscription in AdGuard Home."""
await adguard.filtering.enable_url(allowlist=False, url=call.data.get(CONF_URL))
async def disable_url(call) -> None:
"""Service call to disable a filter subscription in AdGuard Home."""
await adguard.filtering.disable_url(
allowlist=False, url=call.data.get(CONF_URL)
)
async def refresh(call) -> None:
"""Service call to refresh the filter subscriptions in AdGuard Home."""
await adguard.filtering.refresh(
allowlist=False, force=call.data.get(CONF_FORCE)
)
hass.services.async_register(
DOMAIN, SERVICE_ADD_URL, add_url, schema=SERVICE_ADD_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_REMOVE_URL, remove_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_ENABLE_URL, enable_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_DISABLE_URL, disable_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_REFRESH, refresh, schema=SERVICE_REFRESH_SCHEMA
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload AdGuard Home config entry."""
hass.services.async_remove(DOMAIN, SERVICE_ADD_URL)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_URL)
hass.services.async_remove(DOMAIN, SERVICE_ENABLE_URL)
hass.services.async_remove(DOMAIN, SERVICE_DISABLE_URL)
hass.services.async_remove(DOMAIN, SERVICE_REFRESH)
for platform in PLATFORMS:
await hass.config_entries.async_forward_entry_unload(entry, platform)
del hass.data[DOMAIN]
return True
class AdGuardHomeEntity(Entity):
"""Defines a base AdGuard Home entity."""
def __init__(
self, adguard, name: str, icon: str, enabled_default: bool = True
) -> None:
"""Initialize the AdGuard Home entity."""
self._available = True
self._enabled_default = enabled_default
self._icon = icon
self._name = name
self.adguard = adguard
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
async def async_update(self) -> None:
"""Update AdGuard Home entity."""
if not self.enabled:
return
try:
await self._adguard_update()
self._available = True
except AdGuardHomeError:
if self._available:
_LOGGER.debug(
"An error occurred while updating AdGuard Home sensor",
exc_info=True,
)
self._available = False
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
raise NotImplementedError()
class AdGuardHomeDeviceEntity(AdGuardHomeEntity):
"""Defines a AdGuard Home device entity."""
@property
def device_info(self) -> dict[str, Any]:
"""Return device information about this AdGuard Home instance."""
return {
"identifiers": {
(DOMAIN, self.adguard.host, self.adguard.port, self.adguard.base_path)
},
"name": "AdGuard Home",
"manufacturer": "AdGuard Team",
"sw_version": self.hass.data[DOMAIN].get(DATA_ADGUARD_VERION),
"entry_type": "service",
}
|
the-stack_106_21614
|
import asyncio
import discord
import random
import socket
import logging
import datetime
import time
import inspect
import traceback
import yaml
import shutil
import os
import re
try:
import pip._internal as pip # pip 10 compat
except:
import pip
from logging.handlers import RotatingFileHandler
from distutils.dir_util import copy_tree
from discord.abc import PrivateChannel
from jshbot import parser, data, utilities, commands, plugins, configurations, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Attachment,
MessageTypes, Response, Elevation)
__version__ = '0.2.15'
uses_configuration = False
CBException = ConfiguredBotException('Base')
global_dictionary = {}
DEFAULT_PLUGINS_REPO = 'https://github.com/jkchen2/JshBot-plugins/archive/master.zip'
# Debugging
DEV_BOT_ID = 171672297017573376
@plugins.command_spawner
def get_commands(bot):
"""Sets up new commands and shortcuts in the proper syntax.
See dummy.py for a complete sample reference.
"""
new_commands = []
new_commands.append(Command(
'ping', subcommands=[
SubCommand(
Arg('message', argtype=ArgTypes.MERGED_OPTIONAL),
doc='The bot will respond with "Pong!" and the given message if it is included.')],
description='Pings the bot for a response.', category='core', function=get_ping))
new_commands.append(Command(
'base', subcommands=[
SubCommand(Opt('version'), doc='Gets the version of the bot.'),
SubCommand(Opt('source'), doc='Gets the source of the bot.'),
SubCommand(Opt('uptime'), doc='Gets the uptime of the bot.'),
SubCommand(
Opt('announcement'), doc='Gets the current announcement set by the bot owners.'),
SubCommand(
Opt('invite'),
Opt('details', optional=True,
doc='Shows a breakdown of what each permission is used for'),
doc='Generates an invite for the bot.'),
SubCommand(
Opt('notifications'),
doc='If this command is used in a server, this will list the pending '
'notifications for the channel you are in. If it is used in a '
'direct message, this will list the pending notifications for you.'),
SubCommand(
Opt('join'), doc='Have the bot join the voice channel you are in.',
allow_direct=False),
SubCommand(
Opt('leave'), Opt('force', optional=True),
doc='Have the bot leave the voice channel you are in.',
allow_direct=False),
SubCommand(
Opt('stop'), doc='Stops currently playing audio.',
allow_direct=False)],
shortcuts = [
Shortcut('announcement', 'announcement'),
Shortcut('invite', 'invite'),
Shortcut('join', 'join'),
Shortcut('leave', 'leave'),
Shortcut('stfu', 'stop'),
Shortcut('stop', 'stop')],
description='Essential bot commands that anybody can use.',
category='core', function=base_wrapper))
new_commands.append(Command(
'mod', subcommands=[
SubCommand(Opt('info'), doc='Gets server information'),
SubCommand(
Opt('toggle'),
Arg('command', quotes_recommended=False),
Arg('specifier', argtype=ArgTypes.MERGED_OPTIONAL,
doc='The index of the subcomand, or subcommand arguments.'),
doc='Enables or disables a command.'),
SubCommand(
Opt('block'), Arg('user', argtype=ArgTypes.MERGED,
convert=utilities.MemberConverter()),
doc='Blocks the user from interacting with the bot.'),
SubCommand(
Opt('unblock'), Arg('user', argtype=ArgTypes.MERGED,
convert=utilities.MemberConverter()),
doc='Unlocks the user from interacting with the bot.'),
SubCommand(Opt('clear'), doc='Pushes chat upwards.'),
SubCommand(
Opt('mute'), Arg('channel', argtype=ArgTypes.MERGED_OPTIONAL,
convert=utilities.ChannelConverter()),
doc='Stops the bot from responding to messages sent in the given '
'channel, or the entire server if the channel is not given.'),
SubCommand(
Opt('unmute'), Arg('channel', argtype=ArgTypes.MERGED_OPTIONAL,
convert=utilities.ChannelConverter()),
doc='Allows the bot to respond to messages sent in the given '
'channel, or the entire server if the channel is not given.'),
SubCommand(
Opt('invoker'),
Arg('custom invoker', argtype=ArgTypes.MERGED_OPTIONAL,
check=lambda b, m, v, *a: len(v) <= 10,
check_error='The invoker can be a maximum of 10 characters long.'),
doc='Sets or clears the custom invoker.'),
SubCommand(
Opt('mention'), doc='Toggles mention mode. If enabled, the bot '
'will only respond to its name or mention as an invoker.'),
SubCommand(
Opt('cooldown'),
Arg('number of commands', argtype=ArgTypes.MERGED_OPTIONAL,
convert=int, check=lambda b, m, v, *a: 1 <= v <= b.spam_limit,
check_error='Must be between 1 and {b.spam_limit} inclusive.'),
doc='Limits the number of commands per default time interval to the '
'value specified. Bot moderators are not subject to this limit. If '
'no value is given, the default cooldown is used (maximum value).'),
SubCommand(
Opt('timezone'),
Arg('offset', quotes_recommended=False, argtype=ArgTypes.OPTIONAL,
convert=int, check=lambda b, m, v, *a: -12 <= v <= 12,
check_error='Must be between -12 and +12',
doc='A UTC hours offset (-12 to +12).'),
doc='Sets or clears the bot\'s timezone interpretation for the server.')],
shortcuts=[Shortcut('clear', 'clear')],
description='Commands for bot moderators.', elevated_level=Elevation.BOT_MODERATORS,
no_selfbot=True, category='core', function=mod_wrapper))
new_commands.append(Command(
'owner', subcommands=[
SubCommand(
Opt('modrole'),
Arg('role', argtype=ArgTypes.MERGED_OPTIONAL, convert=utilities.RoleConverter()),
doc='Sets or clears the bot moderator role.'),
SubCommand(
Opt('feedback'), Arg('message', argtype=ArgTypes.MERGED),
doc='Sends a message to the bot owners. NOTE: Your user ID will be sent '
'as well. Please use reasonably.'),
SubCommand(
Opt('notifications'),
doc='Toggles notifications from the bot regarding moderation events '
'(such as muting channels and blocking users from bot interaction).')],
description='Commands for server owners.',
elevated_level=Elevation.GUILD_OWNERS, no_selfbot=True,
category='core', function=owner_wrapper))
new_commands.append(Command(
'botowner', subcommands=[
SubCommand(Opt('halt'), doc='Shuts down the bot.'),
SubCommand(
Opt('reload'),
Arg('plugin', argtype=ArgTypes.SPLIT_OPTIONAL, additional='additional plugins'),
doc='Reloads the specified plugin(s), or all external plugins.'),
SubCommand(Opt('ip'), doc='Gets the local IP address of the bot.'),
SubCommand(Opt('backup'), doc='Gets the data folder as a zip file.'),
SubCommand(
Opt('restore'), Attachment('restore zip file'),
doc='Downloads the restore zip file and replaces current data files with '
'the contents of the backup.'),
SubCommand(
Opt('restoredb'),
Arg('table', argtype=ArgTypes.SPLIT_OPTIONAL, additional='additional tables',
doc='Restores the given specific tables.'),
Attachment('db_dump file'),
doc='Loads the database dump and restores either the entire database, or the '
'specified tables.'),
SubCommand(
Opt('blacklist'),
Arg('user', argtype=ArgTypes.MERGED_OPTIONAL,
convert=utilities.MemberConverter(server_only=False)),
doc='Blacklist or unblacklist a user from sending feedback. If no '
'user is specified, this lists all blacklisted entries.'),
SubCommand(Opt('togglefeedback'), doc='Toggles the feedback command.'),
SubCommand(
Opt('announcement'), Arg('text', argtype=ArgTypes.MERGED_OPTIONAL),
doc='Sets or clears the announcement text.'),
SubCommand(
Opt('update'),
Arg('custom repo URL', argtype=ArgTypes.MERGED_OPTIONAL,
doc='A custom URL to a .zip file containing plugins.'),
doc='Opens the bot update menu.'),
SubCommand(
Opt('maintenance'),
Opt('silent', optional=True, doc='Don\'t display the maintenance error.'),
Arg('message', argtype=ArgTypes.MERGED_OPTIONAL))],
shortcuts=[
Shortcut(
'reload', 'reload {arguments}',
Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL))],
description='Commands for the bot owner(s).',
hidden=True, elevated_level=Elevation.BOT_OWNERS,
category='core', function=botowner_wrapper))
new_commands.append(Command(
'debug', subcommands=[
SubCommand(Opt('plugin'), Opt('list'), doc='Lists loaded plugins.'),
SubCommand(
Opt('plugin', attached='plugin name'),
doc='Gets basic information about the given plugin.'),
SubCommand(Opt('latency'), doc='Calculates the ping time.'),
SubCommand(Opt('logs'), doc='Uploads logs to the debug channel.'),
SubCommand(Opt('toggle'), doc='Toggles the debug mode.'),
SubCommand(Opt('resetlocals'), doc='Resets the debug local variables.'),
SubCommand(
Arg('python', argtype=ArgTypes.MERGED),
doc='Evaluates or executes the given code.')],
description='Commands to help the bot owner debug stuff.',
other='Be careful with these commands! They can break the bot.',
hidden=True, elevated_level=Elevation.BOT_OWNERS,
category='core', function=debug_wrapper))
new_commands.append(Command(
'help', subcommands=[
SubCommand(
Opt('manual'), Opt('here', optional=True),
Arg('subject', argtype=ArgTypes.OPTIONAL, default=''),
Arg('topic number', argtype=ArgTypes.OPTIONAL, convert=int,
check=lambda b, m, v, *a: v > 0, check_error='Must be a positive number.',
quotes_recommended=False),
Arg('page number', argtype=ArgTypes.OPTIONAL, convert=int,
check=lambda b, m, v, *a: v > 0, check_error='Must be a positive number.',
quotes_recommended=False),
doc='Gets the specified manual. If no subject is specified, this '
'brings up the general manual menu.'),
SubCommand(
Opt('all'), Opt('here', optional=True),
doc='Shows all of the commands and related help.'),
SubCommand(
Opt('here', optional=True),
Arg('base', argtype=ArgTypes.OPTIONAL, quotes_recommended=False),
Arg('topic', argtype=ArgTypes.MERGED_OPTIONAL, default='',
doc='Either the subcommand index, or standard subcommand syntax.'),
doc='Gets the specified help entry. If no base is specified, this '
'brings up the general help menu.')],
shortcuts=[
Shortcut(
'manual', 'manual {arguments}',
Arg('arguments', argtype=ArgTypes.MERGED_OPTIONAL))],
description='Command help and usage manuals.',
other=('For all of these commands, if the \'here\' option is specified, a '
'direct message will not be sent.'),
category='core', function=help_wrapper))
return new_commands
@plugins.db_template_spawner
def get_templates(bot):
"""Gets the timer database template."""
return {
'schedule': ("time bigint NOT NULL,"
"plugin text NOT NULL,"
"function text NOT NULL,"
"payload json,"
"search text,"
"destination text,"
"info text,"
"id serial")
}
@plugins.on_load
def setup_schedule_table(bot):
data.db_create_table(bot, 'schedule', template='schedule')
if not data.db_exists(bot, 'IX_schedule_time'): # Create time index
data.db_execute(bot, 'CREATE INDEX IX_schedule_time ON schedule (time ASC)')
async def base_wrapper(bot, context):
message, _, subcommand, options = context[:4]
response = Response()
if subcommand.index == 0: # version
response.content = '`{}`\n{}'.format(bot.version, bot.date)
elif subcommand.index == 1: # source
response.content = random.choice([
"It's shit. I'm sorry.",
"You want to see what the Matrix is like?",
"Script kiddie level stuff in here.",
"Beware the lack of PEP 8 guidelines inside!",
"Snarky comments inside and out.",
"Years down the road, this will all just be a really "
"embarrassing but funny joke.",
"Made with ~~love~~ pure hatred.",
"At least he's using version control.",
"Yes, I know I'm not very good. Sorry...",
"Take it easy on me, okay?",
"You're going to groan. A lot.",
"You might be better off *not* looking inside."])
response.content += ("\nhttps://github.com/jkchen2/JshBot\n"
"https://github.com/jkchen2/JshBot-plugins")
elif subcommand.index == 2: # uptime
uptime = int(time.time()) - bot.time
response.content = "The bot has been on since **{}**\n{}".format(
bot.readable_time, utilities.get_time_string(uptime, text=True, full=True))
elif subcommand.index == 3: # Announcement
announcement = data.get(bot, 'core', 'announcement')
if not announcement:
response.content = "No announcement right now!"
else:
invoker = utilities.get_invoker(bot, context.guild)
response.embed = discord.Embed(
title=":mega: Announcement",
description=announcement[0].format(invoker=invoker),
timestamp=datetime.datetime.utcfromtimestamp(announcement[1]),
colour=discord.Colour(0x55acee))
elif subcommand.index == 4: # Invite
if bot.selfbot:
raise CBException("Nope.")
permissions_number = utilities.get_permission_bits(bot)
app_id = (await bot.application_info()).id
authorization_link = (
'**[`Authorization link`](https://discordapp.com/oauth2/authorize?'
'&client_id={0}&scope=bot&permissions={1})\nRemember: you must have the '
'`Administrator` role on the server you are trying to add the '
'bot to.**'.format(app_id, permissions_number))
response.embed = discord.Embed(
title=':inbox_tray: Invite', colour=discord.Colour(0x77b255),
description=authorization_link)
if 'details' in options:
for plugin in bot.plugins.keys():
permission_items = data.get(
bot, plugin, 'permissions', volatile=True, default={}).items()
if permission_items:
lines = []
for item in permission_items:
lines.append('**`{}`** -- {}'.format(
item[0].replace('_', ' ').title(), item[1]))
response.embed.add_field(name=plugin, value='\n'.join(lines))
elif subcommand.index == 5: # Notifications
if context.direct: # List user notifications
destination = 'u' + str(context.author.id)
specifier = 'you'
guild_id = None
else: # List channel notifications:
destination = 'c' + str(context.channel.id)
specifier = 'this channel'
guild_id = context.guild.id
notifications = utilities.get_schedule_entries(
bot, None, custom_match='destination=%s', custom_args=[destination])
if notifications:
results = ['Here is a list of pending notifications for {}:\n'.format(specifier)]
for entry in notifications:
delta = utilities.get_time_string(entry.time - time.time(), text=True)
offset, scheduled = utilities.get_timezone_offset(
bot, guild_id=guild_id, as_string=True,
utc_dt=datetime.datetime.utcfromtimestamp(entry.time))
results.append('{} [{}] ({}) from plugin `{}`: {}'.format(
scheduled, offset, delta, entry.plugin,
entry.info if entry.info else '(No description available)'))
response.content = '\n'.join(results)
else:
response.content = "No pending notifications for {}.".format(specifier)
elif subcommand.index in (6, 7): # Join/leave voice channel
if not message.author.voice:
raise CBException("You are not in a voice channel.")
try:
voice_channel = message.author.voice.channel
if subcommand.index == 6:
await utilities.join_and_ready(
bot, voice_channel, reconnect=True, is_mod=data.is_mod(
bot, member=message.author))
response.content = "Joined {}.".format(voice_channel.name)
else:
await utilities.stop_audio(
bot, message.guild, member=message.author, safe=False,
force='force' in options)
response.content = "Left {}.".format(voice_channel.name)
except BotException as e:
raise e # Pass up
except Exception as e:
action = 'join' if subcommand.index == 6 else 'leave'
raise CBException("Failed to {} the voice channel.".format(action), e=e)
elif subcommand.index == 8: # Stop audio
await utilities.stop_audio(
bot, message.guild, member=message.author, safe=False, disconnect=False)
response.content = "Stopped audio."
return response
async def mod_wrapper(bot, context):
message, _, subcommand, _, arguments = context[:5]
response = ''
mod_action = ''
if subcommand.index == 0: # info
guild_data = data.get(bot, 'core', None, guild_id=message.guild.id, default={})
disabled_commands = guild_data.get('disabled', [])
modrole_id = guild_data.get('modrole', None)
modrole = data.get_role(bot, modrole_id, context.guild, safe=True)
display_list = []
for disabled_command in disabled_commands:
display_list.append('{0} ({1})'.format(
disabled_command[0],
'all' if disabled_command[1] == -1 else disabled_command[1]+1))
cooldown_message = "{} command(s) per {} seconds(s)".format(
guild_data.get('spam_limit', bot.spam_limit), bot.spam_timeout)
response = (
'```\n'
'Information for server {0}\n'
'ID: {0.id}\n'
'Owner: {0.owner_id}\n'
'Bot moderator role: {1}\n'
'Blocked users: {2}\n'
'Muted: {3}\n'
'Muted channels: {4}\n'
'Command invoker: {5}\n'
'Mention mode: {6}\n'
'Disabled commands: {7}\n'
'Cooldown: {8}```').format(
message.guild,
modrole.id if modrole else None,
guild_data.get('blocked', []),
guild_data.get('muted', []),
guild_data.get('muted_channels', []),
guild_data.get('command_invoker', None),
guild_data.get('mention_mode', False),
display_list, cooldown_message)
elif subcommand.index == 1: # Toggle command
index_guess = -1 # All
guess_text = '{} '.format(arguments[0])
if arguments[1]: # Given the index or subcommand
if arguments[1].isdigit():
index_guess = int(arguments[1]) - 1
else:
guess_text += arguments[1]
pass
guess = await parser.guess_command(
bot, guess_text, message, safe=False, suggest_help=False)
if isinstance(guess, Command): # No subcommand found
if not -1 <= index_guess < len(guess.subcommands):
raise CBException(
"Invalid subcommand index. Must be between 1 and {} inclusive, "
"or 0 to toggle all subcommands.".format(len(guess.subcommands)))
else: # Subcommand
index_guess = guess.index
guess = guess.command
# Display disabled command and potentially subcommand
subcommand = guess.subcommands[index_guess] if index_guess != -1 else None
disabled = data.list_data_toggle(
bot, 'core', 'disabled', [guess.base, index_guess], guild_id=context.guild.id)
response = "Disabled" if disabled else "Enabled"
if subcommand:
response += " the \t{}\t subcommand.".format(subcommand.help_string)
else:
response += " all `{}` subcommands.".format(guess.base)
mod_action = response
elif subcommand.index in (2, 3): # Block or unblock
user = arguments[0]
block = subcommand.index == 2
mod_action = 'Blocked {}' if block else 'Unblocked {}'
mod_action = mod_action.format('{0} ({0.id})'.format(user))
blocked = data.is_blocked(bot, member=user, strict=True)
mod = data.is_mod(bot, member=user)
if mod:
raise CBException("Cannot block or unblock a moderator.")
elif block:
if blocked:
raise CBException("User is already blocked.")
else:
data.list_data_append(bot, 'core', 'blocked', user.id, guild_id=message.guild.id)
response = "User is now blocked."
else:
if not blocked:
raise CBException("User is already unblocked.")
else:
data.list_data_remove(bot, 'core', 'blocked', user.id, guild_id=message.guild.id)
response = "User is now unblocked."
elif subcommand.index == 4: # Clear
response = '\u200b' + '\n'*80 + "The chat was pushed up by a bot moderator."
elif subcommand.index in (5, 6): # Mute or unmute
guild_id = message.guild.id
mute = subcommand.index == 5
mod_action = 'Muted {}' if mute else 'Unmuted {}'
if arguments[0]:
channel = arguments[0]
muted = channel.id in data.get(
bot, 'core', 'muted_channels', guild_id=guild_id, default=[])
mod_action = mod_action.format(channel.name)
if mute:
if muted:
raise CBException("Channel is already muted.")
else:
data.list_data_append(
bot, 'core', 'muted_channels', channel.id, guild_id=guild_id)
if isinstance(channel, discord.VoiceChannel): # disconnect
await utilities.stop_audio(bot, message.guild)
response = "Channel muted."
else: # unmute
if not muted:
raise CBException("Channel is already unmuted.")
else:
data.list_data_remove(
bot, 'core', 'muted_channels', channel.id, guild_id=guild_id)
response = "Channel unmuted."
else: # guild
mod_action = mod_action.format('the server')
muted = data.get(bot, 'core', 'muted', guild_id=guild_id, default=False)
if not (muted ^ mute):
response = "Server is already {}muted.".format('' if muted else 'un')
raise CBException(response)
else:
data.add(bot, 'core', 'muted', mute, guild_id=guild_id)
response = "Server {}muted.".format('' if mute else 'un')
elif subcommand.index == 7: # Invoker
data.add(
bot, 'core', 'command_invoker',
arguments[0] if arguments[0] else None,
guild_id=message.guild.id)
response = "Custom command invoker {}.".format('set' if arguments[0] else 'cleared')
if arguments[0]:
response = "Custom command invoker set."
mod_action = "Set the server command invoker to '{}'.".format(arguments[0])
else:
response = "Custom command invoker cleared."
mod_action = "Removed the custom command invoker."
elif subcommand.index == 8: # Mention
current_mode = data.get(
bot, 'core', 'mention_mode', guild_id=message.guild.id, default=False)
data.add(bot, 'core', 'mention_mode', not current_mode, guild_id=message.guild.id)
response = "Mention mode {}activated.".format('de' if current_mode else '')
mod_action = "{}activated mention mode.".format('de' if current_mode else '').capitalize()
elif subcommand.index == 9: # Cooldown
cooldown = arguments[0]
if cooldown:
data.add(bot, 'core', 'spam_limit', cooldown, guild_id=message.guild.id)
cooldown_message = (
"{} command(s) per {} seconds(s)".format(cooldown, bot.spam_timeout))
response = "Cooldown set to {}.".format(cooldown_message)
mod_action = "set the cooldown to {}.".format(cooldown_message)
else:
data.remove(bot, 'core', 'spam_limit', guild_id=message.guild.id, safe=True)
cooldown_message = (
"{} command(s) per {} seconds(s)".format(bot.spam_limit, bot.spam_timeout))
response = "Cooldown reset to the default {}.".format(cooldown_message)
mod_action = "reset the cooldown to the default {}.".format(cooldown_message)
elif subcommand.index == 10: # Set timezone
if isinstance(arguments[0], int): # Set timezone
data.add(bot, 'core', 'timezone', arguments[0], guild_id=context.guild.id)
response = "Timezone set to UTC{}.".format(
('+' + str(arguments[0])) if arguments[0] >= 0 else arguments[0])
mod_action = "set the timezone: {}".format(response)
else: # Clear timezone
data.remove(bot, 'core', 'timezone', guild_id=context.guild.id, safe=True)
guess = utilities.get_timezone_offset(bot, context.guild.id, as_string=True)
response = (
"Timezone cleared. Time will be interpreted based off of voice "
"server location instead. Current guess: ({})".format(guess))
mod_action = "cleared the custom timezone offset."
# Send notification if configured
send_notifications = data.get(
bot, 'core', 'notifications', guild_id=message.guild.id, default=True)
if mod_action and send_notifications:
if message.edited_at:
timestamp = message.edited_at
else:
timestamp = message.created_at
notification = ('Moderator {0} ({0.id}) from {0.guild} on {1}:\n\t'
'{2}').format(message.author, timestamp, mod_action)
logs = await utilities.get_log_text(bot, message.channel, limit=20, before=message)
logs += '\n{}'.format(utilities.get_formatted_message(message))
guild_owner = await data.fetch_member(bot, message.guild.owner_id, guild=message.guild)
await guild_owner.send(notification)
await utilities.send_text_as_file(guild_owner, logs, 'context')
return Response(content=response)
async def owner_wrapper(bot, context):
message, _, subcommand, _, arguments = context[:5]
mod_action = ''
send_notifications = data.get(
bot, 'core', 'notifications', guild_id=message.guild.id, default=True)
if subcommand.index == 0: # Change moderator role
role = arguments[0]
if role:
response = mod_action = 'Set the bot moderator role to {}.'.format(role)
data.add(bot, 'core', 'modrole', role.id, guild_id=message.guild.id)
else:
response = mod_action = 'Removed the bot moderator role.'
data.remove(bot, 'core', 'modrole', guild_id=message.guild.id, safe=True)
elif subcommand.index == 1: # Send feedback
if data.get(bot, 'core', 'feedbackdisabled', default=False):
response = ("Feedback has been temporarily disabled, probably "
"due to some troll spammers.")
else:
text = arguments[0]
if len(text) > 1500:
raise CBException(
"Whoa! That's a lot of feedback. 1500 characters or fewer, please.")
text = '{0} ({0.id}) on {1.created_at}:\n\t{2}'.format(message.author, message, text)
await utilities.notify_owners(bot, text, user_id=message.author.id)
response = "Message sent to bot owners."
elif subcommand.index == 2: # Toggle notifications
response = ("Bot moderator activity notifications are now turned "
"{}").format("OFF." if send_notifications else "ON.")
data.add(
bot, 'core', 'notifications', not send_notifications, guild_id=message.guild.id)
# Send notification if configured
if mod_action and send_notifications:
if message.edited_at:
timestamp = message.edited_at
else:
timestamp = message.created_at
notification = 'From {0.guild} on {1}, you:\n\t{2}'.format(
message.author, timestamp, mod_action)
logs = await utilities.get_log_text(bot, message.channel, limit=20, before=message)
logs += '\n{}'.format(utilities.get_formatted_message(message))
guild_owner = await data.fetch_member(bot, message.guild.owner_id, guild=message.guild)
await guild_owner.send(content=notification)
await utilities.send_text_as_file(message.guild.owner, logs, 'context')
return Response(content=response)
# Update related
def _compare_config(bot, plugin, file_path):
try:
comparison = bot.configurations[plugin]
except:
logger.warn("Configuration file for plugin %s exists, but is not loaded.", plugin)
with open('{}/config/{}-config.yaml'.format(bot.path, plugin[:-3]), 'rb') as config_file:
comparison = yaml.safe_load(config_file)
with open(file_path, 'rb') as config_file:
test_config = yaml.safe_load(config_file)
changes = []
for key, value in test_config.items():
if key not in comparison:
changes.append("Missing entry: " + key)
elif type(value) is not type(comparison[key]):
changes.append("Type mismatch for entry: " + key)
return changes
async def _update_core(bot, progress_function):
if bot.user.id == DEV_BOT_ID:
raise CBException("Dev bot - cancelled core update")
await progress_function('Downloading core package...')
core_repo = 'https://github.com/jkchen2/JshBot/archive/master.zip'
archive_path = await utilities.download_url(bot, core_repo, filename='core.zip')
update_directory = bot.path + '/temp/update/'
try:
shutil.rmtree(update_directory)
except Exception as e:
logger.warn("Failed to clear the update directory: %s", e)
await progress_function('Installing core...')
shutil.unpack_archive(archive_path, update_directory)
update_directory += 'JshBot-master/config/'
config_directory = bot.path + '/config/'
shutil.copy2(update_directory + 'core-manual.yaml', config_directory + 'core-manual.yaml')
changes = _compare_config(bot, 'core', update_directory + 'core-config.yaml')
if changes:
return changes
return_code = pip.main([
'install',
'--upgrade',
'--force-reinstall',
'--process-dependency-links',
archive_path])
if return_code != 0:
return return_code
await asyncio.sleep(1)
await progress_function('Core updated.')
async def _download_plugins(bot, progress_function, plugins_repo):
await progress_function("Downloading plugins...")
archive_path = await utilities.download_url(bot, plugins_repo, filename='plugins.zip')
await progress_function("Unpacking plugins...")
# Extract and return plugins list
update_directory = bot.path + '/temp/update/'
try:
shutil.rmtree(update_directory)
except Exception as e:
logger.warn("Failed to clear the update directory: %s", e)
shutil.unpack_archive(archive_path, update_directory)
update_directory += os.listdir(bot.path + '/temp/update/')[0]
available_updates = []
for entry in os.listdir(update_directory):
if os.path.isdir('{}/{}'.format(update_directory, entry)):
available_updates.append(entry + '.py')
await asyncio.sleep(1)
await progress_function("Plugins unpacked.")
return sorted(available_updates)
async def _update_plugins(bot, plugin_list, progress_function):
if bot.user.id == DEV_BOT_ID:
raise CBException("Dev bot - cancelled core update")
await progress_function('Updating plugins...')
config_changed = {}
update_name = os.listdir(bot.path + '/temp/update/')[0]
update_directory = bot.path + '/temp/update/{}/'.format(update_name)
plugins_directory = bot.path + '/plugins/'
config_directory = bot.path + '/config/'
for plugin in plugin_list:
directory = update_directory + plugin[:-3]
for entry in os.listdir(directory):
entry_path = directory + '/' + entry
if entry.lower() == 'requirements.txt': # Install plugin requirements
await progress_function('Installing requirements for {}...'.format(plugin))
pip.main(['install', '--upgrade', '-r', entry_path])
await asyncio.sleep(1)
continue
if entry in (plugin, 'plugin_data'): # plugin_data or plugin itself
if entry == 'plugin_data':
copy_tree(entry_path, plugins_directory + 'plugin_data')
else:
shutil.copy2(entry_path, plugins_directory)
elif entry.startswith(plugin[:-3] + '-'):
if entry in os.listdir(config_directory) and entry.endswith('-config.yaml'):
# Checks existing config files for changes
changes = _compare_config(bot, plugin, entry_path)
if changes:
config_changed[plugin] = changes
else: # Copy config over
shutil.copy2(entry_path, config_directory)
else:
logger.debug("Ignoring entry: %s", entry_path)
logger.debug('Updated ' + plugin)
await asyncio.sleep(1)
await progress_function('Plugins updated.')
return config_changed
async def update_menu(bot, context, response, result, timed_out):
if timed_out or (result and result[0].emoji == '❌'):
response.embed.clear_fields()
response.embed.add_field(name='Update cancelled', value='\u200b')
response.embed.set_footer(text='---')
await response.message.edit(embed=response.embed)
return False
elif not result:
return
async def _progress_function(status_message):
response.embed.set_footer(text=status_message)
try:
await response.message.edit(embed=response.embed)
except Exception as e:
logger.warn("Failed to update the update embed: %s", e)
selection = ['⬆', '⬇', '🇦', '🇧'].index(result[0].emoji)
if selection in (0, 1): # Navigation
if response.stage != 1: # Ignore selection
return
offset = 1 if selection else -1
response.selection_index = max(
min(response.selection_index + offset, len(response.updates)), 0)
else: # Action
if response.stage == 0: # First choice
if selection == 2: # Update core
response.stage = 10
changed = await _update_core(bot, _progress_function)
else: # Download plugins
response.stage = 1
plugins_repo = response.extra['repo'] or DEFAULT_PLUGINS_REPO
response.updates = await _download_plugins(bot, _progress_function, plugins_repo)
for index, update in enumerate(response.updates):
if update in response.plugin_list:
response.selected.append(index)
await asyncio.sleep(1)
elif response.stage == 1: # Plugins selected
if selection == 2: # Toggle selection
if response.selection_index in response.selected:
response.selected.remove(response.selection_index)
else:
response.selected.append(response.selection_index)
else: # Start update
if not response.selected: # None selected
return
response.stage = 2
update_list = [response.updates[it] for it in response.selected]
changed = await _update_plugins(bot, update_list, _progress_function)
await asyncio.sleep(1)
tooltip = None
if response.stage == 10: # Core finished updating
if changed:
if isinstance(changed, int):
title = 'A package failed to install'
result = 'Pip return code: {}\nCheck console output.'.format(changed)
else:
title = 'Core config file issue(s) detected'
result = '\n'.join(changed)
response.embed.set_footer(text='Core update interrupted.')
else:
title = 'Core updated'
result = 'No issues detected. Restart to complete update.'
elif response.stage == 1: # Select plugins
title = 'Select plugins'
result_list = []
for index, plugin in enumerate(response.updates):
arrow = '→ ' if index == response.selection_index else '\u200b\u3000 '
wrap, tick = ('**', '> ') if index in response.selected else ('', '')
result_list.append('{0}{1}`{2}{3}`{1}'.format(arrow, wrap, tick, plugin))
result = '\n'.join(result_list)
tooltip = ['Toggle selection', 'Update selected']
elif response.stage == 2: # Finished updating plugins
if changed:
title = 'Config file issue(s) detected'
result_list = []
for plugin, issues in changed.items():
result_list.append('{}:\n\t{}'.format(plugin, '\t\n'.join(issues)))
result = '\n'.join(result_list)
else:
title = 'Plugins updated'
result = 'No issues detected. Restart to complete update.'
if title:
response.embed.set_field_at(0, name=title, value=result, inline=False)
if tooltip:
tooltip = ':regional_indicator_a: : {}\n:regional_indicator_b: : {}'.format(*tooltip)
else:
tooltip = '\u200b'
response.embed.set_field_at(1, name='\u200b', value=tooltip, inline=False)
await response.message.edit(embed=response.embed)
if response.stage in (10, 2): # Finish update
return False
async def botowner_wrapper(bot, context):
message, _, subcommand, options, arguments = context[:5]
response = Response()
if subcommand.index == 0: # Halt
await message.channel.send("Going down...")
bot.shutdown()
elif subcommand.index == 1: # Reload
response.content = "Reloading..."
response.message_type = MessageTypes.ACTIVE
response.extra_function = handle_active_message
response.extra = ('reload', arguments)
elif subcommand.index == 2: # IP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80)) # Thanks Google
ip = s.getsockname()[0]
s.close()
response.content = "Local IP: " + ip
elif subcommand.index == 3: # Backup
utilities.make_backup(bot)
response.content = "Manual backup file:"
if not bot.docker_mode:
response.content = (
"**NOTE:** Database dumps are only available "
"in Docker mode.\n{}".format(response.content))
response.file = discord.File('{}/temp/backup1.zip'.format(bot.path))
elif subcommand.index == 4: # Restore
try:
location = await utilities.download_url(
bot, message.attachments[0].url, extension='zip')
except Exception as e:
raise CBException("Failed to download the file.", e=e)
utilities.restore_backup(bot, location)
response.content = "Restored backup file."
elif subcommand.index == 5: # DB Restore
if not bot.docker_mode:
raise CBException("Database restores can only be made in Docker mode.")
try:
location = await utilities.download_url(
bot, message.attachments[0].url, filename='db_dump')
except Exception as e:
raise CBException("Failed to download the file.", e=e)
tables = context.arguments if context.arguments[0] else list()
exit_code = utilities.restore_db_backup(bot, tables=tables)
response.content = "Restore exit code: `{}`".format(exit_code)
elif subcommand.index == 6: # Blacklist
if not arguments[0]:
blacklist = data.get(bot, 'core', 'blacklist')
if not blacklist:
raise CBException("The blacklist is empty.")
converted_users = [await data.fetch_member(bot, it, safe=True) for it in blacklist]
readable_list = []
for user in converted_users:
if user:
text = '{0.mention} ({0})'.format(user)
else:
text = '{} [Not found]'.format(user)
readable_list.append(text)
response.embed = discord.Embed(
color=discord.Color(0xdd2e44), title='Blacklisted users',
description='\n'.join(readable_list))
else:
user = arguments[0]
blacklisted = data.list_data_toggle(bot, 'core', 'blacklist', user.id)
response.embed = discord.Embed(
color=discord.Color(0xdd2e44 if blacklisted else 0x77b255),
title='{}lacklisted user'.format('B' if blacklisted else 'Unb'),
description='{0.mention} ({0})'.format(user))
elif subcommand.index == 7: # Toggle feedback
status = data.get(bot, 'core', 'feedbackdisabled', default=False)
action = "enabled" if status else "disabled"
data.add(bot, 'core', 'feedbackdisabled', not status)
response.content = "Feedback has been {}.".format(action)
elif subcommand.index == 8: # Announcement
if arguments[0]:
data.add(bot, 'core', 'announcement', [arguments[0], int(time.time())])
response.content = "Announcement set!"
else:
data.remove(bot, 'core', 'announcement')
response.content = "Announcement cleared!"
elif subcommand.index == 9: # Update
response.embed = discord.Embed(
title=':arrow_up: Update', description='', colour=discord.Color(0x3b88c3))
tooltip = ':regional_indicator_a: : Update core\n:regional_indicator_b: : Update plugins'
response.embed.add_field(name='Update Wizard 95 ready', value='\u200b', inline=False)
response.embed.add_field(name='\u200b', value=tooltip, inline=False)
response.embed.set_footer(text='---')
response.message_type = MessageTypes.INTERACTIVE
response.extra_function = update_menu
response.extra = {'buttons': ['❌', '⬆', '⬇', '🇦', '🇧'], 'repo': arguments[0]}
response.selection_index = 0
response.plugin_list = list(bot.plugins)[1:]
response.updates = []
response.selected = []
response.stage = 0
elif subcommand.index == 10: # Maintenance
if bot.maintenance_mode:
bot.maintenance_mode = 0
bot.maintenance_message = ''
await bot.change_presence(status=discord.Status.online)
response.content = "Maintenance mode disabled."
else:
silent = 'silent' in options
bot.maintenance_mode = 2 if silent else 1
bot.maintenance_message = arguments[0]
game = discord.Game('⚠️ Maintenance mode{}'.format(
(': ' + arguments[0][:100]) if arguments[0] else ''))
await bot.change_presence(activity=game, status=discord.Status.dnd)
response.content = "Maintenance mode enabled{}.".format(' (silent)' if silent else '')
return response
async def debug_wrapper(bot, context):
message, _, subcommand, options, arguments, _, cleaned_content = context[:7]
response, message_type, extra = ('', MessageTypes.NORMAL, None)
global global_dictionary
if subcommand.index == 0: # List plugins
plugins = list(bot.plugins.keys())
plugins.sort()
response = '```\n{}```'.format(plugins)
elif subcommand.index == 1: # Plugin information
if options['plugin'] not in bot.plugins:
raise CBException(options['plugin'] + " not found.")
else:
plugin = bot.plugins[options['plugin']]
version = getattr(plugin, '__version__', 'Unknown')
has_flag = getattr(plugin, 'uses_configuration', False)
response = (
"```\nPlugin information for: {0}\n"
"Version: {1}\nConfig: {2}\nDir: {3}\n```").format(
options['plugin'], version, has_flag, dir(plugin))
elif subcommand.index == 2: # Latency
message_type = MessageTypes.ACTIVE
response = "Testing latency time..."
extra = ('ping', time.time() * 1000)
elif subcommand.index == 3: # Upload logs
await utilities.upload_logs(bot)
response = "Logs uploaded to the debug channel."
elif subcommand.index == 4: # Toggle debug mode
if bot.debug: # Remove handlers
to_remove = []
for handler in logging.root.handlers:
if 'jb_debug' in handler.get_name():
to_remove.append(handler)
for handler in to_remove:
logging.root.removeHandler(handler)
logging.root.setLevel(logging.WARN)
bot.debug = False
response = 'Debug mode is now off.'
else: # Add handlers
log_file = '{}/temp/debug_logs.txt'.format(bot.path)
if os.path.isfile(log_file):
shutil.copy2(log_file, '{}/temp/last_debug_logs.txt'.format(bot.path))
file_handler = RotatingFileHandler(log_file, maxBytes=5000000, backupCount=5)
file_handler.set_name('jb_debug_file')
stream_handler = logging.StreamHandler()
stream_handler.set_name('jb_debug_stream')
logging.basicConfig(level=logging.DEBUG, handlers=[file_handler, stream_handler])
bot.debug = True
response = 'Debug mode is now on.'
elif subcommand.index == 5: # Reset local dictionary
_setup_debug_environment(bot)
response = "Debug environment local dictionary reset."
elif subcommand.index == 6: # Repl thingy
global_dictionary['bot'] = bot
global_dictionary['message'] = message
global_dictionary['author'] = message.author
global_dictionary['channel'] = message.channel
global_dictionary['guild'] = message.guild
# Cleaning up input
arguments = cleaned_content[6:].lstrip()
if arguments.startswith('```py\n') and arguments.endswith('```'):
arguments = arguments[6:-3]
else:
arguments = arguments.strip('`')
pass_in = [arguments, global_dictionary]
# Check if the previous result should be sent as a file
if arguments in ('saf', 'file'):
await utilities.send_text_as_file(
message.channel, str(global_dictionary['_']), 'result')
else:
used_exec = False
try: # Try to execute arguments
if '\n' in arguments:
exec(*pass_in)
used_exec = True
else:
try:
if arguments.startswith('await '):
pass_in[0] = arguments[6:]
global_dictionary['_'] = await eval(*pass_in)
else:
global_dictionary['_'] = eval(*pass_in)
except SyntaxError: # May need to use exec
exec(*pass_in)
used_exec = True
except BotException as e:
response = str(e)
except Exception as e:
global_dictionary['last_exception'] = e
global_dictionary['last_traceback'] = traceback.format_exc()
response = '`{0}: {1}`'.format(type(e).__name__, e)
else: # Get response if it exists
if used_exec:
result = 'Executed.'
elif global_dictionary['_'] is None:
result = 'Evaluated. (returned None)'
else:
result = str(global_dictionary['_'])
if len(result) >= 1980:
raise CBException("Exec result is too long. (try 'file')")
if '\n' in result: # Better formatting
response = '```py\n{}```'.format(result)
else: # One line response
response = '`{}`'.format(result)
return Response(
content=response, message_type=message_type,
extra=extra, extra_function=handle_active_message)
async def help_menu(bot, context, response, result, timed_out):
invoker = utilities.get_invoker(bot, guild=None if response.destination else context.guild)
if timed_out:
response.embed.add_field(
name=":information_source: The menu timed out",
value="Type `{}help` to start again.".format(invoker), inline=False)
await response.message.edit(embed=response.embed)
return
elif not result:
return
selection = ['↩', '⬅', '➡', '1⃣', '2⃣', '3⃣', '4⃣', '5⃣'].index(result[0].emoji)
if selection == 0: # Back
if response.backtrack:
previous_entry = response.backtrack.pop()
else:
previous_entry = [None]*3 + [0]
response.current_state = previous_entry
embed_details = plugins.get_help(
bot, *previous_entry, guild=context.guild, elevation=context.elevation)
elif selection in (1, 2): # Page navigation
new_page = response.current_state[3] + (1 if selection == 2 else -1)
test_state = response.current_state[:3] + [new_page]
embed_details = plugins.get_help(
bot, *test_state, guild=context.guild, elevation=context.elevation)
else: # Entry selection
if response.current_state[2] is not None: # Subcommand index given
return # No more things to select
elif response.current_state[1] is not None: # Command index given
selection_type_index = 2 # Choose subcommand
elif response.current_state[0] is not None: # Category ID given
selection_type_index = 1
else: # Nothing given
selection_type_index = 0 # Choose category
page_compensation = response.current_state[3] * 5
test_state = response.current_state[:3] + [0]
test_state[selection_type_index] = page_compensation + selection - 3
embed_details = plugins.get_help(
bot, *test_state, guild=context.guild, elevation=context.elevation)
if embed_details: # Successful selection
response.backtrack.append(response.current_state)
response.current_state = test_state
if not embed_details:
return
embed_fields, page, total_pages = embed_details
response.current_state[3] = page
response.embed.clear_fields()
for name, value in embed_fields:
response.embed.add_field(name=name, value=value.format(invoker=invoker), inline=False)
response.embed.add_field(
value='Page [ {} / {} ]'.format(page+1, total_pages+1), name='\u200b', inline=False)
await response.message.edit(embed=response.embed)
async def manual_menu(bot, context, response, result, timed_out):
invoker_guild = None if response.destination else context.guild
invoker = utilities.get_invoker(bot, guild=invoker_guild)
if timed_out:
response.embed.add_field(
name=":information_source: The menu timed out",
value="Type `{}manual` to start again.".format(invoker), inline=False)
await response.message.edit(embed=response.embed)
return
elif not result:
return
selection = ['↩', '⬅', '➡', '1⃣', '2⃣', '3⃣', '4⃣', '5⃣'].index(result[0].emoji)
if selection == 0: # Back
if response.backtrack:
previous_entry = response.backtrack.pop()
else:
previous_entry = [None]*3
response.current_state = previous_entry
embed_details = plugins.get_manual(bot, *previous_entry, guild=invoker_guild)
elif selection in (1, 2): # Page navigation
new_page = response.current_state[2] + (1 if selection == 2 else -1)
test_state = response.current_state[:2] + [new_page]
embed_details = plugins.get_manual(bot, *test_state, guild=invoker_guild)
if not embed_details: # Ignore page change failure
return
else: # Entry selection
if response.current_state[1] is not None: # Topic given
return # No more things to select
elif response.current_state[0] is not None: # Subject given
selection_type_index = 1 # Choose topic
else: # Nothing given
selection_type_index = 0 # Choose subject
page_compensation = response.current_state[2] * 5
test_state = response.current_state[:2] + [0]
test_state[selection_type_index] = page_compensation + selection - 3
embed_details = plugins.get_manual(bot, *test_state, guild=invoker_guild)
if embed_details: # Successful selection
response.backtrack.append(response.current_state)
response.current_state = test_state
else: # Ignore selection failure
return
crumbs, text, page, total_pages = embed_details
response.current_state[2] = page
response.embed.set_field_at(0, name=crumbs, value=text, inline=False)
response.embed.set_field_at(
1, value='Page [ {} / {} ]'.format(page+1, total_pages+1), name='\u200b', inline=False)
await response.message.edit(embed=response.embed)
async def help_wrapper(bot, context):
message, _, subcommand, options, arguments = context[:5]
response = Response()
is_owner = data.is_owner(bot, message.author.id)
help_here = 'here' in options
invoker_guild = context.guild if (context.direct or help_here or bot.selfbot) else None
invoker = utilities.get_invoker(bot, guild=invoker_guild)
if subcommand.index == 0: # Manual
if not bot.manuals:
raise CBException("There are no manual entries.")
if arguments[0]: # Load from given state
try:
subject_test = int(arguments[0]) - 1
except:
subject_test = arguments[0]
if arguments[1] is not None:
arguments[1] -= 1
if arguments[2] is not None:
arguments[2] -= 1
state = [subject_test, arguments[1], arguments[2]]
embed_details = plugins.get_manual(bot, *state, safe=False, guild=invoker_guild)
else: # Load menu from scratch
embed_details = plugins.get_manual(bot, guild=invoker_guild)
state = [None]*3
crumbs, text, page, total_pages = embed_details
state[2] = page
response.backtrack = []
response.current_state = state
embed = discord.Embed(title=':page_facing_up: Manual', colour=discord.Colour(0xccd6dd))
embed.add_field(name=crumbs, value=text, inline=False)
embed.add_field(
value='Page [ {} / {} ]'.format(page+1, total_pages+1), name='\u200b', inline=False)
response.message_type = MessageTypes.INTERACTIVE
response.extra_function = manual_menu
response.extra = {'buttons': ['↩', '⬅', '➡', '1⃣', '2⃣', '3⃣', '4⃣', '5⃣']}
response.embed = embed
elif subcommand.index == 1: # All help
response.content = "Serving up all the help:"
base_list = []
for command in bot.commands.values():
if isinstance(command, Command):
if not command.hidden or context.elevation >= 3:
base_list.append(command)
base_list.sort()
help_list = ["### Command quick-reference ###\r\n"]
for command in base_list:
help_list.append(command.clean_quick_help.replace('\n', '\r\n'))
help_list.append("\r\n\r\n### Individual command reference ###")
for command in base_list:
help_list.append("\r\n# {} #".format(command.base))
help_list.append(
"\t" + command.clean_help_string.replace('\n', '\r\n\t'))
help_text = '\r\n'.join(help_list)
help_file = discord.File(utilities.get_text_as_file(help_text), filename='help.txt')
response.content = "Here is all of the help as a file:"
response.file = help_file
elif subcommand.index == 2: # Help
response.embed = discord.Embed(
title=':grey_question: Help', colour=discord.Colour(0xccd6dd))
if arguments[0]: # Specified help
guess = None
if arguments[1]:
try:
command = bot.commands[arguments[0].lower()]
index = int(arguments[1]) - 1
assert 0 <= index < len(command.subcommands)
guess = command.subcommands[index]
except: # Ignore invalid index or shortcut
pass
if guess is None:
text = arguments[0] + ' ' + arguments[1]
guess = await parser.guess_command(
bot, text, message, safe=False, substitute_shortcuts=False)
for name, value in guess.help_embed_fields:
response.embed.add_field(
name=name, value=value.format(invoker=invoker), inline=False)
help_here = True
else: # Help menu
state = [None]*3 + [0]
embed_fields, page, total_pages = plugins.get_help(
bot, *state, guild=message.guild, elevation=context.elevation)
for name, value in embed_fields:
response.embed.add_field(name=name, value=value, inline=False)
response.embed.add_field(
value='Page [ {} / {} ]'.format(page+1, total_pages+1),
name='\u200b', inline=False)
response.embed.set_footer(
text='Confused about the syntax? Read {}manual core 3'.format(invoker))
response.backtrack = []
response.current_state = state
response.message_type = MessageTypes.INTERACTIVE
response.extra_function = help_menu
response.extra = {'buttons': ['↩', '⬅', '➡', '1⃣', '2⃣', '3⃣', '4⃣', '5⃣']}
if not (context.direct or help_here or bot.selfbot):
try:
await message.add_reaction('📨') # Envelope reaction
except:
pass
response.destination = context.author
return response
async def get_ping(bot, context):
if context.arguments[0]:
return Response(content='Pong!\n{}'.format(
utilities.filter_everyone(context.arguments[0])))
else:
return Response(content='Pong! ({} ms)'.format(int(bot.latency * 1000)))
async def handle_active_message(bot, context, response):
"""
This function is called if the given message was marked as active
(message_type of 3).
"""
if response.extra[0] == 'ping':
latency_time = "Latency time: {:.2f} ms".format((time.time() * 1000) - response.extra[1])
await response.message.edit(content=latency_time)
elif response.extra[0] == 'reload':
# Preliminary check
plugins_to_reload = []
if response.extra[1][0]:
for plugin_name in response.extra[1]:
if plugin_name in bot.plugins:
plugins_to_reload.append(plugin_name)
else:
raise CBException("Invalid plugin.", plugin_name)
else:
plugins_to_reload = list(bot.plugins.keys())
plugins_to_reload.remove('core')
data.save_data(bot) # Safety save
logger.info("Reloading plugins and commands...")
for plugin_name in plugins_to_reload:
plugins.load_plugin(bot, plugin_name)
# TODO: Resetting volatile data can screw up other plugins
bot.volatile_data = {'global_users': {}, 'global_plugins': {}}
data.check_all(bot)
for plugin_name in plugins_to_reload:
plugin = bot.plugins[plugin_name]
if hasattr(plugin, 'bot_on_ready_boot'):
asyncio.ensure_future(plugin.bot_on_ready_boot(bot))
await response.message.edit(content='Reloaded {} plugin{}.'.format(
len(plugins_to_reload), '' if len(plugins_to_reload) == 1 else 's'))
def _setup_debug_environment(bot):
"""Resets the local dictionary for the debug command."""
global global_dictionary
import pprint
import jshbot
def say(*args, **kwargs):
message = global_dictionary.get('message')
task = asyncio.ensure_future(message.channel.send(*args, **kwargs))
def f(future):
global_dictionary['_'] = future.result()
task.add_done_callback(f)
async def asay(*args, **kwargs):
message = global_dictionary.get('message')
return await message.channel.send(*args, **kwargs)
global_dictionary = {
'bot': bot,
'inspect': inspect,
'traceback': traceback,
'last_traceback': None,
'last_exception': None,
'_': None,
'say': say,
'asay': asay,
'pformat': pprint.pformat,
'random': random,
'time': time,
're': re,
'discord': discord,
'jbu': utilities,
'jbd': data,
'jb': jshbot
}
@plugins.permissions_spawner
def setup_permissions(bot):
return {
'read_messages': "Standard.",
'send_messages': "Standard.",
'manage_messages': "Deletes messages and reactions of certain commands. (Framework)",
'attach_files': "Uploads responses longer than 2000 characters long as a text file.",
'read_message_history': "Gets chat context when bot moderators change settings.",
'connect': "Allows the bot to connect to voice channels. (Framework)",
'speak': "Allows the bot to speak. (Framework)",
'add_reactions': "Allows for interactive menus. (Framework)",
'embed_links': "Allows for embedded messages. (Framework)",
}
# Standard discord.py event functions
@plugins.listen_for('bot_on_ready_boot')
async def setup_debug_environment(bot):
"""Sets up the debug environment (and logger for the dev bot)."""
_setup_debug_environment(bot)
if not bot.use_verbose_output:
for handler in logger.handlers:
if handler.get_name() == 'jb_log_stream':
handler.setLevel(logging.INFO)
@plugins.listen_for('on_guild_join')
async def notify_server_owners(bot, guild):
"""Notifies server owners about the bot joining their guild."""
# Add guild to the list
logger.info("Joining guild")
data.add_guild(bot, guild)
if bot.selfbot: # Don't send DMs if in selfbot mode
return
invoker = utilities.get_invoker(bot)
text = (
"Hello! You are receiving this notification because this bot was "
"added to one of your servers, specifically '{0.name}' (ID: {0.id}). "
"If you are aware of this and approve of the addition, feel free to "
"continue and use the bot. However, if you did not approve of this "
"addition, it is highly recommended you kick or ban this bot as there "
"may be potential for users to use the bot to spam. Only users that "
"have the administrator permission can add bots to servers. "
"Unfortunately, there is no way to track who added the bot.\n\n"
"To read more about the functionality and usage of the bot, type "
"`{1}manual` to see a list of topics, and `{1}help` to see a list of "
"commands. **As a server owner, it is highly recommended that you "
"read `{1}manual core 5` and `{1}manual core 4` for moderating and "
"configuring the bot.**\n\nThat's all for now. If you have any questions, "
"please refer to the manual, or send the bot owners a message using "
"`{1}owner feedback <message>`.\n\nCheck out the Wiki for more: "
"https://github.com/jkchen2/JshBot/wiki").format(guild, invoker)
guild_owner = await data.fetch_member(bot, guild.owner_id, guild=guild)
await guild_owner.send(text)
@plugins.listen_for('on_message_edit')
async def check_edited_commands(bot, before, after):
"""Integrates with the core to handle edited messages."""
if before.content != after.content and before.id in bot.edit_dictionary:
message_reference = bot.edit_dictionary.pop(before.id)
await bot.on_message(after, replacement_message=message_reference)
@plugins.listen_for('on_error')
async def on_error(bot, event, *args, **kwargs):
"""Gets uncaught exceptions."""
logger.error(
"An exception was thrown that wasn't handled by the core. \n"
"Event: {0}\nargs: {1}\nkwargs: {2}".format(event, args, kwargs))
logger.error(traceback.format_exc())
|
the-stack_106_21615
|
# Copyright 2020 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""TF Ranking sample code for LETOR datasets in LibSVM format.
WARNING: All data sets are loaded into memory in this sample code. It is
for small data sets whose sizes are < 10G.
A note on the LibSVM format:
--------------------------------------------------------------------------
Due to the sparse nature of features utilized in most academic datasets for
learning to rank such as LETOR datasets, data points are represented in the
LibSVM format. In this setting, every line encapsulates features and a (graded)
relevance judgment of a query-document pair. The following illustrates the
general structure:
<relevance int> qid:<query_id int> [<feature_id int>:<feature_value float>]
For example:
1 qid:10 32:0.14 48:0.97 51:0.45
0 qid:10 1:0.15 31:0.75 32:0.24 49:0.6
2 qid:10 1:0.71 2:0.36 31:0.58 51:0.12
0 qid:20 4:0.79 31:0.01 33:0.05 35:0.27
3 qid:20 1:0.42 28:0.79 35:0.30 42:0.76
In the above example, the dataset contains two queries. Query "10" has 3
documents, two of which relevant with grades 1 and 2. Similarly, query "20"
has 1 relevant document. Note that query-document pairs may have different
sets of zero-valued features and as such their feature vectors may only
partly overlap or not at all.
--------------------------------------------------------------------------
You can use TensorBoard to display the training results stored in $OUTPUT_DIR.
Notes:
* Use --alsologtostderr if the output is not printed into screen.
* In addition, you can enable multi-objective learning by adding the following
flags: --secondary_loss=<the secondary loss key>.
"""
from absl import flags
import numpy as np
import six
import tensorflow as tf
import tensorflow_ranking as tfr
flags.DEFINE_string("train_path", None, "Input file path used for training.")
flags.DEFINE_string("vali_path", None, "Input file path used for validation.")
flags.DEFINE_string("test_path", None, "Input file path used for testing.")
flags.DEFINE_string("output_dir", None, "Output directory for models.")
flags.DEFINE_integer("train_batch_size", 32, "The batch size for training.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of steps for training.")
flags.DEFINE_float("learning_rate", 0.1, "Learning rate for optimizer.")
flags.DEFINE_float("dropout_rate", 0.5, "The dropout rate before output layer.")
flags.DEFINE_list("hidden_layer_dims", ["256", "128", "64"],
"Sizes for hidden layers.")
flags.DEFINE_integer("num_features", 136, "Number of features per document.")
flags.DEFINE_integer("list_size", 100, "List size used for training.")
flags.DEFINE_integer("group_size", 1, "Group size used in score function.")
flags.DEFINE_string("loss", "softmax_loss",
"The RankingLossKey for the primary loss function.")
flags.DEFINE_string(
"secondary_loss", None, "The RankingLossKey for the secondary loss for "
"multi-objective learning.")
flags.DEFINE_float(
"secondary_loss_weight", 0.5, "The weight for the secondary loss in "
"multi-objective learning.")
flags.DEFINE_bool("serank", False, "serank")
flags.DEFINE_bool("query_label_weight", False, "use query label weight")
flags.DEFINE_float('shrinkage', 2.0, 'se block shrinkage')
flags.DEFINE_bool("shrink_first", False, "se block with shrink first")
flags.DEFINE_bool("without_squeeze", False, "se block without squeeze operation")
flags.DEFINE_bool("without_excite", False, "se block without excite operation")
FLAGS = flags.FLAGS
_PRIMARY_HEAD = "primary_head"
_SECONDARY_HEAD = "secondary_head"
def _use_multi_head():
"""Returns True if using multi-head."""
return FLAGS.secondary_loss is not None
class IteratorInitializerHook(tf.estimator.SessionRunHook):
"""Hook to initialize data iterator after session is created."""
def __init__(self):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_fn = None
def after_create_session(self, session, coord):
"""Initialize the iterator after the session has been created."""
del coord
self.iterator_initializer_fn(session)
def example_feature_columns(with_mask=False):
"""Returns the example feature columns."""
feature_names = ["{}".format(i + 1) for i in range(FLAGS.num_features)]
if with_mask:
feature_names.append('mask')
return {
name:
tf.feature_column.numeric_column(name, shape=(1,), default_value=0.0)
for name in feature_names
}
def load_libsvm_data(path, list_size):
"""Returns features and labels in numpy.array."""
def _parse_line(line):
"""Parses a single line in LibSVM format."""
tokens = line.split("#")[0].split()
assert len(tokens) >= 2, "Ill-formatted line: {}".format(line)
label = float(tokens[0])
qid = tokens[1]
kv_pairs = [kv.split(":") for kv in tokens[2:]]
features = {k: float(v) for (k, v) in kv_pairs}
return qid, features, label
tf.compat.v1.logging.info("Loading data from {}".format(path))
# The 0-based index assigned to a query.
qid_to_index = {}
# The number of docs seen so far for a query.
qid_to_ndoc = {}
# Each feature is mapped an array with [num_queries, list_size, 1]. Label has
# a shape of [num_queries, list_size]. We use list for each of them due to the
# unknown number of quries.
feature_map = {k: [] for k in example_feature_columns()}
label_list = []
total_docs = 0
discarded_docs = 0
with open(path, "rt") as f:
for line in f:
qid, features, label = _parse_line(line)
if qid not in qid_to_index:
# Create index and allocate space for a new query.
qid_to_index[qid] = len(qid_to_index)
qid_to_ndoc[qid] = 0
for k in feature_map:
feature_map[k].append(np.zeros([list_size, 1], dtype=np.float32))
label_list.append(np.ones([list_size], dtype=np.float32) * -1.)
total_docs += 1
batch_idx = qid_to_index[qid]
doc_idx = qid_to_ndoc[qid]
qid_to_ndoc[qid] += 1
# Keep the first 'list_size' docs only.
if doc_idx >= list_size:
discarded_docs += 1
continue
for k, v in six.iteritems(features):
assert k in feature_map, "Key {} not found in features.".format(k)
feature_map[k][batch_idx][doc_idx, 0] = v
label_list[batch_idx][doc_idx] = label
tf.compat.v1.logging.info("Number of queries: {}".format(len(qid_to_index)))
tf.compat.v1.logging.info(
"Number of documents in total: {}".format(total_docs))
tf.compat.v1.logging.info(
"Number of documents discarded: {}".format(discarded_docs))
# Convert everything to np.array.
for k in feature_map:
feature_map[k] = np.array(feature_map[k])
return feature_map, np.array(label_list)
def get_train_inputs(features, labels, batch_size):
"""Set up training input in batches."""
iterator_initializer_hook = IteratorInitializerHook()
def _train_input_fn():
"""Defines training input fn."""
# mask out invalid samples for serank
features['mask'] = np.where(labels >= 0, 1.0, 0.0).astype(labels.dtype)
valid_label = np.where(labels > 0.0, labels, 0.0)
features['query_label_weight'] = np.sum(valid_label, axis=1, keepdims=True)
features_placeholder = {
k: tf.compat.v1.placeholder(v.dtype, v.shape)
for k, v in six.iteritems(features)
}
if _use_multi_head():
placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
labels_placeholder = {
_PRIMARY_HEAD: placeholder,
_SECONDARY_HEAD: placeholder,
}
else:
labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(features_placeholder, labels_placeholder))
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
if _use_multi_head():
feed_dict = {
labels_placeholder[head_name]: labels
for head_name in labels_placeholder
}
else:
feed_dict = {labels_placeholder: labels}
feed_dict.update(
{features_placeholder[k]: features[k] for k in features_placeholder})
iterator_initializer_hook.iterator_initializer_fn = (
lambda sess: sess.run(iterator.initializer, feed_dict=feed_dict))
return iterator.get_next()
return _train_input_fn, iterator_initializer_hook
def get_eval_inputs(features, labels):
"""Set up eval inputs in a single batch."""
iterator_initializer_hook = IteratorInitializerHook()
def _eval_input_fn():
"""Defines eval input fn."""
# mask out invalid samples for serank
features['mask'] = np.where(labels >= 0, 1.0, 0.0).astype(labels.dtype)
features_placeholder = {
k: tf.compat.v1.placeholder(v.dtype, v.shape)
for k, v in six.iteritems(features)
}
if _use_multi_head():
placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
labels_placeholder = {
_PRIMARY_HEAD: placeholder,
_SECONDARY_HEAD: placeholder,
}
else:
labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)
dataset = tf.data.Dataset.from_tensors(
(features_placeholder, labels_placeholder))
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
if _use_multi_head():
feed_dict = {
labels_placeholder[head_name]: labels
for head_name in labels_placeholder
}
else:
feed_dict = {labels_placeholder: labels}
feed_dict.update(
{features_placeholder[k]: features[k] for k in features_placeholder})
iterator_initializer_hook.iterator_initializer_fn = (
lambda sess: sess.run(iterator.initializer, feed_dict=feed_dict))
return iterator.get_next()
return _eval_input_fn, iterator_initializer_hook
def make_serving_input_fn():
"""Returns serving input fn to receive tf.Example."""
feature_spec = tf.feature_column.make_parse_example_spec(
example_feature_columns().values())
return tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)
def make_transform_fn():
"""Returns a transform_fn that converts features to dense Tensors."""
def _transform_fn(features, mode):
"""Defines transform_fn."""
if mode == tf.estimator.ModeKeys.PREDICT:
# We expect tf.Example as input during serving. In this case, group_size
# must be set to 1.
if FLAGS.group_size != 1:
raise ValueError(
"group_size should be 1 to be able to export model, but get %s" %
FLAGS.group_size)
context_features, example_features = (
tfr.feature.encode_pointwise_features(
features=features,
context_feature_columns=None,
example_feature_columns=example_feature_columns(),
mode=mode,
scope="transform_layer"))
else:
context_features, example_features = tfr.feature.encode_listwise_features(
features=features,
context_feature_columns=None,
example_feature_columns=example_feature_columns(with_mask=True),
mode=mode,
scope="transform_layer")
return context_features, example_features
return _transform_fn
def make_se_block_fn(list_size, shrinkage=1.0, shrink_first=False, without_squeeze=False, without_excite=False):
def squeeze(cur_layer, mask, last_dim):
# output shape: [batch_size, 1, last_dim]
cur_layer = tf.reshape(cur_layer, [-1, list_size, last_dim])
if mask is None:
cur_layer = tf.reduce_mean(cur_layer, axis=1)
else:
# when training & eval, mask out padding records
mask = tf.reshape(mask, [-1, list_size, 1])
cur_layer = tf.reduce_sum(cur_layer * mask, axis=1) / tf.reduce_sum(mask + 1e-6, axis=1)
return cur_layer
def se_block_fn(input_layer, layer_width, mask):
dim = int(layer_width / shrinkage)
if shrink_first:
cur_layer = tf.compat.v1.layers.dense(input_layer, units=dim)
cur_layer = tf.nn.relu(cur_layer)
if not without_squeeze:
cur_layer = squeeze(cur_layer, mask, dim)
cur_layer = tf.reshape(tf.tile(cur_layer, [1, list_size]), [-1, list_size, dim])
else:
cur_layer = input_layer
if not without_squeeze:
cur_layer = squeeze(cur_layer, mask, layer_width)
cur_layer = tf.compat.v1.layers.dense(cur_layer, units=dim)
cur_layer = tf.nn.relu(cur_layer)
cur_layer = tf.reshape(tf.tile(cur_layer, [1, list_size]), [-1, list_size, dim])
cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)
if without_excite:
cur_layer = tf.concat([input_layer, cur_layer], axis=-1)
else:
excitation = tf.reshape(tf.nn.sigmoid(cur_layer), [-1, layer_width])
cur_layer = input_layer * excitation
return cur_layer
return se_block_fn
def make_score_fn(se_block_fn=None):
"""Returns a groupwise score fn to build `EstimatorSpec`."""
def _score_fn(unused_context_features, group_features, mode, unused_params,
unused_config):
"""Defines the network to score a group of documents."""
with tf.compat.v1.name_scope("input_layer"):
group_input = [
tf.compat.v1.layers.flatten(group_features[name])
for name in sorted(example_feature_columns())
]
input_layer = tf.concat(group_input, 1)
tf.compat.v1.summary.scalar("input_sparsity",
tf.nn.zero_fraction(input_layer))
tf.compat.v1.summary.scalar("input_max",
tf.reduce_max(input_tensor=input_layer))
tf.compat.v1.summary.scalar("input_min",
tf.reduce_min(input_tensor=input_layer))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
cur_layer = tf.compat.v1.layers.batch_normalization(
input_layer, training=is_training)
for i, layer_width in enumerate(int(d) for d in FLAGS.hidden_layer_dims):
cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)
cur_layer = tf.compat.v1.layers.batch_normalization(
cur_layer, training=is_training)
cur_layer = tf.nn.relu(cur_layer)
tf.compat.v1.summary.scalar("fully_connected_{}_sparsity".format(i),
tf.nn.zero_fraction(cur_layer))
if se_block_fn:
cur_layer = se_block_fn(cur_layer, layer_width, group_features.get('mask'))
cur_layer = tf.compat.v1.layers.dropout(
cur_layer, rate=FLAGS.dropout_rate, training=is_training)
logits = tf.compat.v1.layers.dense(cur_layer, units=FLAGS.group_size)
if _use_multi_head():
# Duplicate the logits for both heads.
return {_PRIMARY_HEAD: logits, _SECONDARY_HEAD: logits}
else:
return logits
return _score_fn
def get_eval_metric_fns():
"""Returns a dict from name to metric functions."""
metric_fns = {}
metric_fns.update({
"metric/%s" % name: tfr.metrics.make_ranking_metric_fn(name) for name in [
tfr.metrics.RankingMetricKey.ARP,
tfr.metrics.RankingMetricKey.ORDERED_PAIR_ACCURACY,
]
})
metric_fns.update({
"metric/ndcg@%d" % topn: tfr.metrics.make_ranking_metric_fn(
tfr.metrics.RankingMetricKey.NDCG, topn=topn)
for topn in [1, 3, 5, 10]
})
return metric_fns
def train_and_eval():
"""Train and Evaluate."""
features, labels = load_libsvm_data(FLAGS.train_path, FLAGS.list_size)
train_input_fn, train_hook = get_train_inputs(features, labels,
FLAGS.train_batch_size)
features_vali, labels_vali = load_libsvm_data(FLAGS.vali_path,
FLAGS.list_size)
vali_input_fn, vali_hook = get_eval_inputs(features_vali, labels_vali)
features_test, labels_test = load_libsvm_data(FLAGS.test_path,
FLAGS.list_size)
test_input_fn, test_hook = get_eval_inputs(features_test, labels_test)
optimizer = tf.compat.v1.train.AdagradOptimizer(
learning_rate=FLAGS.learning_rate)
def _train_op_fn(loss):
"""Defines train op used in ranking head."""
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
minimize_op = optimizer.minimize(
loss=loss, global_step=tf.compat.v1.train.get_global_step())
train_op = tf.group([minimize_op, update_ops])
return train_op
weights_feature_name = 'query_label_weight' if FLAGS.query_label_weight else None
if _use_multi_head():
primary_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(FLAGS.loss, weights_feature_name=weights_feature_name),
eval_metric_fns=get_eval_metric_fns(),
train_op_fn=_train_op_fn,
name=_PRIMARY_HEAD)
secondary_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(FLAGS.secondary_loss, weights_features_name=weights_feature_name),
eval_metric_fns=get_eval_metric_fns(),
train_op_fn=_train_op_fn,
name=_SECONDARY_HEAD)
ranking_head = tfr.head.create_multi_ranking_head(
[primary_head, secondary_head], [1.0, FLAGS.secondary_loss_weight])
else:
ranking_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(FLAGS.loss),
eval_metric_fns=get_eval_metric_fns(),
train_op_fn=_train_op_fn)
se_block_fn = None
if FLAGS.serank:
se_block_fn = make_se_block_fn(FLAGS.list_size,
shrinkage=FLAGS.shrinkage,
shrink_first=FLAGS.shrink_first,
without_squeeze=FLAGS.without_squeeze,
without_excite=FLAGS.without_excite)
estimator = tf.estimator.Estimator(
model_fn=tfr.model.make_groupwise_ranking_fn(
group_score_fn=make_score_fn(se_block_fn),
group_size=FLAGS.group_size,
transform_fn=make_transform_fn(),
ranking_head=ranking_head),
config=tf.estimator.RunConfig(
FLAGS.output_dir, save_checkpoints_steps=1000))
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
hooks=[train_hook],
max_steps=FLAGS.num_train_steps)
# Export model to accept tf.Example when group_size = 1.
if FLAGS.group_size == 1:
vali_spec = tf.estimator.EvalSpec(
input_fn=vali_input_fn,
hooks=[vali_hook],
steps=1,
exporters=tf.estimator.LatestExporter(
"latest_exporter",
serving_input_receiver_fn=make_serving_input_fn()),
start_delay_secs=0,
throttle_secs=30)
else:
vali_spec = tf.estimator.EvalSpec(
input_fn=vali_input_fn,
hooks=[vali_hook],
steps=1,
start_delay_secs=0,
throttle_secs=30)
# Train and validate
tf.estimator.train_and_evaluate(estimator, train_spec, vali_spec)
# Evaluate on the test data.
estimator.evaluate(input_fn=test_input_fn, hooks=[test_hook])
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
train_and_eval()
if __name__ == "__main__":
flags.mark_flag_as_required("train_path")
flags.mark_flag_as_required("vali_path")
flags.mark_flag_as_required("test_path")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
|
the-stack_106_21616
|
import pytest
import dask
import dask.multiprocessing
from dask import persist
import numpy as np
import dask.array as da
from dask_glm.algorithms import (newton, lbfgs, proximal_grad,
gradient_descent, admm)
from dask_glm.families import Logistic, Normal, Poisson
from dask_glm.regularizers import Regularizer
from dask_glm.utils import (sigmoid, make_y, maybe_to_cupy,
to_dask_cupy_array_xy)
def add_l1(f, lam):
def wrapped(beta, X, y):
return f(beta, X, y) + lam * (np.abs(beta)).sum()
return wrapped
def make_intercept_data(N, p, seed=20009):
'''Given the desired number of observations (N) and
the desired number of variables (p), creates
random logistic data to test on.'''
# set the seeds
da.random.seed(seed)
np.random.seed(seed)
X = np.random.random((N, p + 1))
col_sums = X.sum(axis=0)
X = X / col_sums[None, :]
X[:, p] = 1
X = da.from_array(X, chunks=(N / 5, p + 1))
y = make_y(X, beta=np.random.random(p + 1))
return X, y
@pytest.mark.parametrize('opt',
[lbfgs,
newton,
gradient_descent])
@pytest.mark.parametrize('N, p, seed,',
[(100, 2, 20009),
(250, 12, 90210),
(95, 6, 70605)])
@pytest.mark.parametrize('is_cupy', [True, False])
def test_methods(N, p, seed, opt, is_cupy):
X, y = make_intercept_data(N, p, seed=seed)
if is_cupy:
cupy = pytest.importorskip('cupy')
X, y = to_dask_cupy_array_xy(X, y, cupy)
coefs = opt(X, y)
p = sigmoid(X.dot(coefs).compute())
y_sum = y.compute().sum()
p_sum = p.sum()
assert np.isclose(y_sum, p_sum, atol=1e-1)
@pytest.mark.parametrize('func,kwargs', [
(newton, {'tol': 1e-5}),
(lbfgs, {'tol': 1e-8}),
(gradient_descent, {'tol': 1e-7}),
])
@pytest.mark.parametrize('N', [1000])
@pytest.mark.parametrize('nchunks', [1, 10])
@pytest.mark.parametrize('family', [Logistic, Normal, Poisson])
@pytest.mark.parametrize('is_cupy', [True, False])
def test_basic_unreg_descent(func, kwargs, N, nchunks, family, is_cupy):
beta = np.random.normal(size=2)
M = len(beta)
X = da.random.random((N, M), chunks=(N // nchunks, M))
y = make_y(X, beta=np.array(beta), chunks=(N // nchunks,))
if is_cupy:
cupy = pytest.importorskip('cupy')
X, y = to_dask_cupy_array_xy(X, y, cupy)
X, y = persist(X, y)
result = func(X, y, family=family, **kwargs)
test_vec = np.random.normal(size=2)
test_vec = maybe_to_cupy(test_vec, X)
opt = family.pointwise_loss(result, X, y).compute()
test_val = family.pointwise_loss(test_vec, X, y).compute()
assert opt < test_val
@pytest.mark.parametrize('func,kwargs', [
(admm, {'abstol': 1e-4}),
(proximal_grad, {'tol': 1e-7}),
])
@pytest.mark.parametrize('N', [1000])
@pytest.mark.parametrize('nchunks', [1, 10])
@pytest.mark.parametrize('family', [Logistic, Normal, Poisson])
@pytest.mark.parametrize('lam', [0.01, 1.2, 4.05])
@pytest.mark.parametrize('reg', [r() for r in Regularizer.__subclasses__()])
@pytest.mark.parametrize('is_cupy', [True, False])
def test_basic_reg_descent(func, kwargs, N, nchunks, family, lam, reg, is_cupy):
beta = np.random.normal(size=2)
M = len(beta)
X = da.random.random((N, M), chunks=(N // nchunks, M))
y = make_y(X, beta=np.array(beta), chunks=(N // nchunks,))
if is_cupy:
cupy = pytest.importorskip('cupy')
X, y = to_dask_cupy_array_xy(X, y, cupy)
X, y = persist(X, y)
result = func(X, y, family=family, lamduh=lam, regularizer=reg, **kwargs)
test_vec = np.random.normal(size=2)
test_vec = maybe_to_cupy(test_vec, X)
f = reg.add_reg_f(family.pointwise_loss, lam)
opt = f(result, X, y).compute()
test_val = f(test_vec, X, y).compute()
assert opt < test_val
@pytest.mark.parametrize('func,kwargs', [
(admm, {'max_iter': 2}),
(proximal_grad, {'max_iter': 2}),
(newton, {'max_iter': 2}),
(gradient_descent, {'max_iter': 2}),
])
@pytest.mark.parametrize('scheduler', [
'synchronous',
'threading',
'multiprocessing'
])
@pytest.mark.parametrize('is_cupy', [True, False])
def test_determinism(func, kwargs, scheduler, is_cupy):
X, y = make_intercept_data(1000, 10)
if is_cupy:
cupy = pytest.importorskip('cupy')
X, y = to_dask_cupy_array_xy(X, y, cupy)
with dask.config.set(scheduler=scheduler):
a = func(X, y, **kwargs)
b = func(X, y, **kwargs)
assert (a == b).all()
try:
from distributed import Client
from distributed.utils_test import cluster, loop # flake8: noqa
except ImportError:
pass
else:
@pytest.mark.parametrize('func,kwargs', [
(admm, {'max_iter': 2}),
(proximal_grad, {'max_iter': 2}),
(newton, {'max_iter': 2}),
(gradient_descent, {'max_iter': 2}),
])
def test_determinism_distributed(func, kwargs, loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as c:
X, y = make_intercept_data(1000, 10)
a = func(X, y, **kwargs)
b = func(X, y, **kwargs)
assert (a == b).all()
def broadcast_lbfgs_weight():
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as c:
X, y = make_intercept_data(1000, 10)
coefs = lbfgs(X, y, dask_distributed_client=c)
p = sigmoid(X.dot(coefs).compute())
y_sum = y.compute().sum()
p_sum = p.sum()
assert np.isclose(y_sum, p_sum, atol=1e-1)
|
the-stack_106_21617
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from itertools import product, chain
from operator import add, sub
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.piece import Piece
from annotation.direction import (Direction, get_eight_directions,
get_cross_directions)
from ..naive_long import BlackNaiveLongEffectLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/18'
class TestWhiteLongEffectHi(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_effect1(self):
"""
HIの利きがあるかを確認するテスト
他の駒が利きを遮る場合は考えない
:return:
"""
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
board = np.empty(shape, dtype=np.int32)
ph = tf.placeholder(tf.int32, shape=shape)
for direction in chain(get_eight_directions(),
[Direction.RIGHT_UP_UP, Direction.LEFT_UP_UP]):
if direction in (Direction.RIGHT_UP_UP, Direction.LEFT_UP_UP):
# 桂馬の方向の長い利きはあり得ないのでエラー
with self.assertRaises(ValueError):
BlackNaiveLongEffectLayer(
direction=direction, data_format=self.data_format,
use_cudnn=self.use_cudnn
)(ph)
continue
white_effect = BlackNaiveLongEffectLayer(
direction=direction, data_format=self.data_format,
use_cudnn=self.use_cudnn
)(ph)
# チャネルの処理が面倒なので、次元を下げる
white_effect = tf.squeeze(white_effect)
with self.test_session() as sess:
for i, j in product(range(9), repeat=2):
with self.subTest(direction=direction, i=i, j=j):
board[:] = Piece.EMPTY
if self.data_format == 'NCHW':
board[0, 0, i, j] = Piece.BLACK_HI
else:
board[0, i, j, 0] = Piece.BLACK_HI
effect = sess.run(white_effect, feed_dict={ph: board})
self.assertTupleEqual(effect.shape, (9, 9))
if direction not in get_cross_directions():
# 利きがあるマスはない
self.assertFalse(np.any(effect))
continue
if direction == Direction.RIGHT:
edge = i == 0
elif direction == Direction.LEFT:
edge = i == 8
elif direction == Direction.UP:
edge = j == 0
elif direction == Direction.DOWN:
edge = j == 8
else:
raise ValueError(direction)
if edge:
# 盤の端に駒があるので、盤の中に利きはない
self.assertFalse(np.any(effect))
continue
if direction == Direction.RIGHT:
self.assertTrue(np.all(effect[:i, j]))
effect[:i, j] = False
elif direction == Direction.LEFT:
self.assertTrue(np.all(effect[i + 1:, j]))
effect[i + 1:, j] = False
elif direction == Direction.UP:
self.assertTrue(np.all(effect[i, :j]))
effect[i, :j] = False
elif direction == Direction.DOWN:
self.assertTrue(np.all(effect[i, j + 1:]))
effect[i, j + 1:] = False
else:
raise ValueError(direction)
self.assertFalse(np.any(effect))
def test_effect2(self):
"""
HIの利きのある4方向にそれぞれ駒をおいて利きを遮った場合のテスト
:return:
"""
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
board = np.empty(shape, dtype=np.int32)
# 利きを遮る駒の候補を作成
# 各方向についてテストするので、利きの長い駒以外が候補
block_piece_list = [
p for p in Piece if p not in (
Piece.BLACK_KY, Piece.BLACK_KA, Piece.BLACK_HI,
Piece.BLACK_UM, Piece.BLACK_RY, Piece.EMPTY, Piece.SIZE
)
]
ph = tf.placeholder(tf.int32, shape=shape)
for direction in get_cross_directions():
white_effect = BlackNaiveLongEffectLayer(
direction=direction, data_format=self.data_format,
use_cudnn=self.use_cudnn
)(ph)
# チャネルの処理が面倒なので、次元を下げる
white_effect = tf.squeeze(white_effect)
with self.test_session() as sess:
for i, j, k, l in product(range(9), range(9), range(9),
[0, 1]):
# HIを(i, j)に、
# 遮る駒を(i * l + k * (1 - l), j * (1 - l) + k * l)に置く
x = i * l + k * (1 - l)
y = j * (1 - l) + k * l
if i == x and j == y:
continue
with self.subTest(direction=direction, i=i, j=j, k=k, l=l):
board[:] = Piece.EMPTY
block_piece = np.random.choice(block_piece_list)
if self.data_format == 'NCHW':
board[0, 0, i, j] = Piece.BLACK_HI
board[0, 0, x, y] = block_piece
else:
board[0, i, j, 0] = Piece.BLACK_HI
board[0, x, y, 0] = block_piece
effect = sess.run(white_effect, feed_dict={ph: board})
self.assertTupleEqual(effect.shape, (9, 9))
# 必ず4方向のどこかにあるので、xかyを比較すれば十分
if direction == Direction.RIGHT:
block = x < i
u, v = slice(x, i), j
s, t = slice(None, i), j
elif direction == Direction.LEFT:
block = x > i
u, v = slice(i + 1, x + 1), j
s, t = slice(i + 1, None), j
elif direction == Direction.UP:
block = y < j
u, v = i, slice(y, j)
s, t = i, slice(None, j)
elif direction == Direction.DOWN:
block = y > j
u, v = i, slice(j + 1, y + 1)
s, t = i, slice(j + 1, None)
else:
raise ValueError(direction)
if block:
self.assertTrue(np.all(effect[u, v]))
effect[u, v] = False
else:
self.assertTrue(np.all(effect[s, t]))
effect[s, t] = False
self.assertFalse(np.any(effect))
|
the-stack_106_21618
|
# --------------------------------------------------------
# Flow-Guided Feature Aggregation
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
import logging
import numpy as np
from mxnet import context as ctx
from mxnet import ndarray as nd
from mxnet.io import DataDesc
from mxnet.executor_manager import _split_input_slice
def _load_general(data, targets, major_axis):
"""Load a list of arrays into a list of arrays specified by slices"""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
elif isinstance(d_src, (list, tuple)):
for src, dst in zip(d_src, d_targets):
src.copyto(dst)
else:
raise NotImplementedError
def _load_data(batch, targets, major_axis):
"""Load data into sliced arrays"""
_load_general(batch.data, targets, major_axis)
def _load_label(batch, targets, major_axis):
"""Load label into sliced arrays"""
_load_general(batch.label, targets, major_axis)
def _merge_multi_context(outputs, major_axis):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
"""
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
else:
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
rets.append(tensors[0])
return rets
class DataParallelExecutorGroup(object):
"""DataParallelExecutorGroup is a group of executors that lives on a group of devices.
This is a helper class used to implement data parallelization. Each mini-batch will
be split and run on the devices.
Parameters
----------
symbol : Symbol
The common symbolic computation graph for all executors.
contexts : list
A list of contexts.
workload : list
If not `None`, could be a list of numbers that specify the workload to be assigned
to different context. Larger number indicate heavier workload.
data_shapes : list
Should be a list of (name, shape) tuples, for the shapes of data. Note the order is
important and should be the same as the order that the `DataIter` provide the data.
label_shapes : list
Should be a list of (name, shape) tuples, for the shapes of label. Note the order is
important and should be the same as the order that the `DataIter` provide the label.
param_names : list
A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)
in the computation graph.
for_training : bool
Indicate whether the executors should be bind for training. When not doing training,
the memory for gradients will not be allocated.
inputs_need_grad : bool
Indicate whether the gradients for the input data should be computed. This is currently
not used. It will be useful for implementing composition of modules.
shared_group : DataParallelExecutorGroup
Default is `None`. This is used in bucketing. When not `None`, it should be a executor
group corresponding to a different bucket. In other words, it will correspond to a different
symbol but with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case, many memory will be shared.
logger : Logger
Default is `logging`.
fixed_param_names: list of str
Indicate parameters to be fixed during training. Parameters in this list will not allocate
space for gradient, nor do gradient calculation.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names,
for_training, inputs_need_grad, shared_group=None, logger=logging,
fixed_param_names=None, grad_req='write', state_names=None):
self.param_names = param_names
self.arg_names = symbol.list_arguments()
self.aux_names = symbol.list_auxiliary_states()
self.symbol = symbol
self.contexts = contexts
self.workload = workload
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.logger = logger
#In the future we should have a better way to profile memory per device (haibin)
# self._total_exec_bytes = 0
self.fixed_param_names = fixed_param_names
if self.fixed_param_names is None:
self.fixed_param_names = []
self.state_names = state_names
if self.state_names is None:
self.state_names = []
if not for_training:
grad_req = 'null'
# data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
# if label_shapes is not None:
# label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
data_names = [x.name for x in data_shapes[0]]
if isinstance(grad_req, str):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else grad_req
elif k in data_names:
self.grad_req[k] = grad_req if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
elif isinstance(grad_req, (list, tuple)):
assert len(grad_req) == len(self.arg_names)
self.grad_req = dict(zip(self.arg_names, grad_req))
elif isinstance(grad_req, dict):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else 'write'
elif k in data_names:
self.grad_req[k] = 'write' if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
self.grad_req.update(grad_req)
else:
raise ValueError("grad_req must be one of str, list, tuple, or dict.")
if shared_group is not None:
self.shared_data_arrays = shared_group.shared_data_arrays
else:
self.shared_data_arrays = [{} for _ in contexts]
# initialize some instance variables
self.batch_size = len(data_shapes)
self.slices = None
self.execs = []
self._default_execs = None
self.data_arrays = None
self.label_arrays = None
self.param_arrays = None
self.state_arrays = None
self.grad_arrays = None
self.aux_arrays = None
self.input_grad_arrays = None
self.data_shapes = None
self.label_shapes = None
self.data_layouts = None
self.label_layouts = None
self.output_layouts = [DataDesc.get_batch_axis(self.symbol[name].attr('__layout__'))
for name in self.symbol.list_outputs()]
self.bind_exec(data_shapes, label_shapes, shared_group)
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data or label.
"""
assert len(data_shapes) > 0
major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes]
for (name, shape), axis in zip(data_shapes, major_axis):
if axis == -1:
continue
batch_size = shape[axis]
if self.batch_size is not None:
assert batch_size == self.batch_size, ("all data must have the same batch size: "
+ ("batch_size = %d, but " % self.batch_size)
+ ("%s has shape %s" % (name, shape)))
else:
self.batch_size = batch_size
self.slices = _split_input_slice(self.batch_size, self.workload)
return major_axis
def _collect_arrays(self):
"""Collect internal arrays from executors."""
# convenient data structures
self.data_arrays = [[e.arg_dict[name] for name, _ in self.data_shapes[0]] for e in self.execs]
self.state_arrays = [[e.arg_dict[name] for e in self.execs]
for name in self.state_names]
if self.label_shapes is not None:
self.label_arrays = [[e.arg_dict[name] for name, _ in self.label_shapes[0]] for e in self.execs]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in self.data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in data_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))]
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
reshape : bool
"""
assert reshape or not self.execs
for i in range(len(self.contexts)):
data_shapes_i = data_shapes[i]
if label_shapes is not None:
label_shapes_i = label_shapes[i]
else:
label_shapes_i = []
if reshape:
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i))
else:
self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
shared_group))
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def reshape(self, data_shapes, label_shapes):
"""Reshape executors.
Parameters
----------
data_shapes : list
label_shapes : list
"""
if self._default_execs is None:
self._default_execs = [i for i in self.execs]
for i in range(len(self.contexts)):
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes[i] + (label_shapes[i] if label_shapes is not None else []))
)
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def set_params(self, arg_params, aux_params):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
"""
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params)
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
target parameter arrays
aux_params : list of NDArray
target aux arrays
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if self.label_arrays is not None:
assert not is_train or data_batch.label
if data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train)
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(len(self.execs[0].outputs))]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
def get_states(self, merge_multi_context=True):
"""Get states from all devices
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
assert not merge_multi_context, \
"merge_multi_context=True is not supported for get_states yet."
return self.state_arrays
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
`self.for_training` is `True`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, exec_ in enumerate(self.execs):
out_grads_slice = []
exec_.backward(out_grads=out_grads_slice)
def update_metric(self, eval_metric, labels):
"""Accumulate the performance according to `eval_metric` on all devices.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
"""
for texec, labels in zip(self.execs, labels):
eval_metric.update(labels, texec.outputs)
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, "shape inference failed"
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
arg_types, _, aux_types = self.symbol.infer_type(**input_types)
assert arg_types is not None, "type inference failed"
arg_arrays = []
grad_arrays = {} if self.for_training else None
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping"""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
# nice, we can directly re-use this data blob
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape)) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to') +
(' be the bucket taking the largest input for better ') +
('memory sharing.'))
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
# create or borrow arguments and gradients
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names: # model parameters
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if self.grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if self.grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else: # data, label, or states
arg_arr = _get_or_reshape(name, shared_data_arrays, arg_shapes[j], arg_types[j],
context, self.logger)
# data might also need grad if inputs_need_grad is True
if self.grad_req[name] != 'null':
grad_arrays[name] = _get_or_reshape('grad of ' + name, shared_data_arrays,
arg_shapes[j], arg_types[j], context,
self.logger)
arg_arrays.append(arg_arr)
# create or borrow aux variables
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for s, t in zip(aux_shapes, aux_types)]
else:
for j, arr in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays,
args_grad=grad_arrays, aux_states=aux_arrays,
grad_req=self.grad_req, shared_exec=shared_exec)
# Get the total bytes allocated for this executor
return executor
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes
def install_monitor(self, mon):
"""Install monitor on all executors"""
for exe in self.execs:
mon.install(exe)
|
the-stack_106_21619
|
import logging
import voluptuous as vol
from homeassistant.helpers import config_validation as cv
from .const import DOMAIN, LANGUAGE_CODES
from .model.kind import TraktKind
def build_config_schema():
return vol.Schema(
{DOMAIN: build_config_domain_schema()},
extra=vol.ALLOW_EXTRA,
)
def build_config_domain_schema():
return vol.Schema(
{
"sensors": vol.Schema(
{
vol.Required("upcoming"): build_config_upcoming_schema(),
}
),
vol.Required("language"): vol.In(LANGUAGE_CODES),
}
)
def build_config_upcoming_schema():
subschemas = {}
for trakt_kind in TraktKind:
subschemas[trakt_kind.value.identifier] = vol.Schema(
{
vol.Required("days_to_fetch", default=90): cv.positive_int,
vol.Required("max_medias", default=3): cv.positive_int,
}
)
return vol.Schema(subschemas)
|
the-stack_106_21620
|
#!/usr/bin/env python
"""
SlipStream Client
=====
Copyright (C) 2014 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import sys
from slipstream.command.CommandBase import CommandBase
from slipstream.HttpClient import HttpClient
import slipstream.util as util
class MainProgram(CommandBase):
'''A command-line program to show/list module definition(s).'''
def __init__(self, argv=None):
self.module = ''
self.endpoint = None
super(MainProgram, self).__init__(argv)
def parse(self):
usage = '''usage: %prog [options] [<module-url>]
<module-uri> Name of the module to list or show.
For example Public/Tutorials/HelloWorld/client_server'''
self.parser.usage = usage
self.addEndpointOption()
self.options, self.args = self.parser.parse_args()
self._checkArgs()
def _checkArgs(self):
if len(self.args) == 1:
self.module = self.args[0]
if len(self.args) > 1:
self.usageExitTooManyArguments()
def doWork(self):
client = HttpClient()
client.verboseLevel = self.verboseLevel
uri = util.MODULE_RESOURCE_PATH
if self.module:
uri += '/' + self.module
url = self.options.endpoint + uri
_, content = client.get(url)
print(content)
if __name__ == "__main__":
try:
MainProgram()
except KeyboardInterrupt:
print('\n\nExecution interrupted by the user... goodbye!')
sys.exit(-1)
|
the-stack_106_21621
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for the Hyper-V driver and related APIs.
"""
import os
import platform
import shutil
import sys
import uuid
from nova.compute import power_state
from nova import context
from nova import db
from nova import flags
from nova.image import glance
from nova.tests import fake_network
from nova.tests.hyperv import basetestcase
from nova.tests.hyperv import db_fakes
from nova.tests.hyperv import hypervutils
from nova.tests.hyperv import mockproxy
import nova.tests.image.fake as fake_image
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import vmutils
from nova.virt import images
class HyperVAPITestCase(basetestcase.BaseTestCase):
"""Unit tests for Hyper-V driver calls."""
def setUp(self):
super(HyperVAPITestCase, self).setUp()
self._user_id = 'fake'
self._project_id = 'fake'
self._instance_data = None
self._image_metadata = None
self._dest_server = None
self._fetched_image = None
self._update_image_raise_exception = False
self._post_method_called = False
self._recover_method_called = False
self._volume_target_portal = '192.168.1.112:3260'
self._volume_id = '10958016-e196-42e3-9e7f-5d8927ae3099'
self._context = context.RequestContext(self._user_id, self._project_id)
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
vswitch_name='external')
self._hypervutils = hypervutils.HyperVUtils()
self._conn = driver_hyperv.HyperVDriver()
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
if not os.path.exists(target):
self._hypervutils.create_vhd(target)
self.stubs.Set(images, 'fetch', fake_fetch)
def fake_get_remote_image_service(context, name):
class FakeGlanceImageService(object):
def update(self_fake, context, image_id, image_metadata, f):
if self._update_image_raise_exception:
raise vmutils.HyperVException(
"Simulated update failure")
self._image_metadata = image_metadata
return (FakeGlanceImageService(), 1)
self.stubs.Set(glance, 'get_remote_image_service',
fake_get_remote_image_service)
# Modules to mock
modules_to_mock = [
'wmi',
'os',
'shutil',
'uuid',
'time',
'subprocess',
'multiprocessing',
'_winreg'
]
# Modules in which the mocks are going to be injected
from nova.virt.hyperv import baseops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
modules_to_test = [
driver_hyperv,
baseops,
vmops,
vmutils,
volumeops,
volumeutils,
snapshotops,
livemigrationops,
hypervutils,
sys.modules[__name__]
]
self._inject_mocks_in_modules(modules_to_mock, modules_to_test)
if isinstance(snapshotops.wmi, mockproxy.Mock):
from nova.virt.hyperv import ioutils
import StringIO
def fake_open(name, mode):
return StringIO.StringIO("fake file content")
self.stubs.Set(ioutils, 'open', fake_open)
def tearDown(self):
try:
if self._instance_data and self._hypervutils.vm_exists(
self._instance_data["name"]):
self._hypervutils.remove_vm(self._instance_data["name"])
if self._dest_server and \
self._hypervutils.remote_vm_exists(self._dest_server,
self._instance_data["name"]):
self._hypervutils.remove_remote_vm(self._dest_server,
self._instance_data["name"])
self._hypervutils.logout_iscsi_volume_sessions(self._volume_id)
shutil.rmtree(flags.FLAGS.instances_path, True)
fake_image.FakeImageService_reset()
finally:
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
dic = self._conn.get_available_resource()
self.assertEquals(dic['hypervisor_hostname'], platform.node())
def test_list_instances(self):
num_vms = self._hypervutils.get_vm_count()
instances = self._conn.list_instances()
self.assertEquals(len(instances), num_vms)
def test_get_info(self):
self._spawn_instance(True)
info = self._conn.get_info(self._instance_data)
self.assertEquals(info["state"], str(power_state.RUNNING))
def test_spawn_cow_image(self):
self._test_spawn_instance(True)
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
def test_spawn_no_vswitch_exception(self):
# Set flag to a non existing vswitch
self.flags(vswitch_name=str(uuid.uuid4()))
self.assertRaises(vmutils.HyperVException, self._spawn_instance, True)
self.assertFalse(self._hypervutils.vm_exists(
self._instance_data["name"]))
def _test_vm_state_change(self, action, from_state, to_state):
self._spawn_instance(True)
if from_state:
self._hypervutils.set_vm_state(self._instance_data["name"],
from_state)
action(self._instance_data)
vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
self.assertEquals(vmstate, to_state)
def test_pause(self):
self._test_vm_state_change(self._conn.pause, None,
constants.HYPERV_VM_STATE_PAUSED)
def test_pause_already_paused(self):
self._test_vm_state_change(self._conn.pause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_PAUSED)
def test_unpause(self):
self._test_vm_state_change(self._conn.unpause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_ENABLED)
def test_unpause_already_running(self):
self._test_vm_state_change(self._conn.unpause, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_suspend(self):
self._test_vm_state_change(self._conn.suspend, None,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_suspend_already_suspended(self):
self._test_vm_state_change(self._conn.suspend,
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
self._test_vm_state_change(self._conn.resume,
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
self._test_vm_state_change(self._conn.resume, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
self._test_vm_state_change(self._conn.power_off, None,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_already_powered_off(self):
self._test_vm_state_change(self._conn.suspend,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_on(self):
self._test_vm_state_change(self._conn.power_on,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_on_already_running(self):
self._test_vm_state_change(self._conn.power_on, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_reboot(self):
self._spawn_instance(True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self._conn.reboot(self._instance_data, network_info, None)
vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
def test_destroy(self):
self._spawn_instance(True)
(vhd_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self._conn.destroy(self._instance_data)
self.assertFalse(self._hypervutils.vm_exists(
self._instance_data["name"]))
self._instance_data = None
for vhd_path in vhd_paths:
self.assertFalse(os.path.exists(vhd_path))
def test_live_migration(self):
self.flags(limit_cpu_features=True)
self._spawn_instance(False)
# Existing server
self._dest_server = "HV12RCTest1"
self._live_migration(self._dest_server)
instance_name = self._instance_data["name"]
self.assertFalse(self._hypervutils.vm_exists(instance_name))
self.assertTrue(self._hypervutils.remote_vm_exists(self._dest_server,
instance_name))
self.assertTrue(self._post_method_called)
self.assertFalse(self._recover_method_called)
def test_live_migration_with_target_failure(self):
self.flags(limit_cpu_features=True)
self._spawn_instance(False)
dest_server = "nonexistingserver"
exception_raised = False
try:
self._live_migration(dest_server)
except Exception:
exception_raised = True
# Cannot use assertRaises with pythoncom.com_error on Linux
self.assertTrue(exception_raised)
instance_name = self._instance_data["name"]
self.assertTrue(self._hypervutils.vm_exists(instance_name))
self.assertFalse(self._post_method_called)
self.assertTrue(self._recover_method_called)
def _live_migration(self, dest_server):
def fake_post_method(context, instance_ref, dest, block_migration):
self._post_method_called = True
def fake_recover_method(context, instance_ref, dest, block_migration):
self._recover_method_called = True
self._conn.live_migration(self._context, self._instance_data,
dest_server, fake_post_method, fake_recover_method)
def test_pre_live_migration_cow_image(self):
self._test_pre_live_migration(True)
def test_pre_live_migration_no_cow_image(self):
self._test_pre_live_migration(False)
def _test_pre_live_migration(self, cow):
self.flags(use_cow_images=cow)
instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
instance_data = db_fakes.get_fake_instance_data(instance_name,
self._project_id, self._user_id)
block_device_info = None
self._conn.pre_live_migration(self._context, instance_data,
block_device_info, network_info)
if cow:
self.assertTrue(not self._fetched_image is None)
else:
self.assertTrue(self._fetched_image is None)
def test_snapshot_with_update_failure(self):
self._spawn_instance(True)
self._update_image_raise_exception = True
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
self._context, self._instance_data, snapshot_name)
# assert VM snapshots have been removed
self.assertEquals(self._hypervutils.get_vm_snapshots_count(
self._instance_data["name"]), 0)
def test_snapshot(self):
self._spawn_instance(True)
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
self._conn.snapshot(self._context, self._instance_data, snapshot_name)
self.assertTrue(self._image_metadata and
"disk_format" in self._image_metadata and
self._image_metadata["disk_format"] == "vhd")
# assert VM snapshots have been removed
self.assertEquals(self._hypervutils.get_vm_snapshots_count(
self._instance_data["name"]), 0)
def _spawn_instance(self, cow, block_device_info=None):
self.flags(use_cow_images=cow)
instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
self._instance_data = db_fakes.get_fake_instance_data(instance_name,
self._project_id, self._user_id)
instance = db.instance_create(self._context, self._instance_data)
image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self._conn.spawn(self._context, instance, image,
injected_files=[], admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
def _test_spawn_instance(self, cow):
self._spawn_instance(cow)
self.assertTrue(self._hypervutils.vm_exists(
self._instance_data["name"]))
vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
(vhd_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(vhd_paths), 1)
parent_path = self._hypervutils.get_vhd_parent_path(vhd_paths[0])
if cow:
self.assertTrue(not parent_path is None)
self.assertEquals(self._fetched_image, parent_path)
else:
self.assertTrue(parent_path is None)
self.assertEquals(self._fetched_image, vhd_paths[0])
def _attach_volume(self):
self._spawn_instance(True)
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
self._conn.attach_volume(connection_info,
self._instance_data["name"], '/dev/sdc')
def test_attach_volume(self):
self._attach_volume()
(_, volumes_paths) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 1)
sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
self._volume_id)
self.assertTrue(sessions_exist)
def test_detach_volume(self):
self._attach_volume()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
self._conn.detach_volume(connection_info,
self._instance_data["name"], '/dev/sdc')
(_, volumes_paths) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 0)
sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
self._volume_id)
self.assertFalse(sessions_exist)
def test_boot_from_volume(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
self._spawn_instance(False, block_device_info)
(_, volumes_paths) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 1)
sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
self._volume_id)
self.assertTrue(sessions_exist)
def test_attach_volume_with_target_connection_failure(self):
self._spawn_instance(True)
target = 'nonexistingtarget:3260'
connection_info = db_fakes.get_fake_volume_info_data(target,
self._volume_id)
self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
connection_info, self._instance_data["name"], '/dev/sdc')
|
the-stack_106_21622
|
import nltk
import nltk.tokenize as nltk_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords, wordnet
from typing import List
from collections import Counter
import itertools
from threading import Lock
import unicodedata
import sys
import string
import datetime
unicode_punctuation = ''.join(list(set(chr(i) for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P')) | set(string.punctuation)))
months_filter = list(itertools.chain(*[(datetime.date(2020, i, 1).strftime('%B'),
datetime.date(2020, i, 1).strftime('%b')) for i in range(1,13)]))
day_filter = list(itertools.chain(*[(datetime.date(2020, 1, i).strftime('%A'),
datetime.date(2020, 1, i).strftime('%a')) for i in range(1,8)]))
date_filter = list(map(lambda x: x.lower(), months_filter + day_filter))
common_words = ['say', 'go', 'time', 'make', 'said', 'news']
class Tokenizer:
def __init__(self):
pass
def tokenize(self, text):
pass
def tokenize_as_sentences(self, text):
pass
def _filter_empty_string(self, token):
return len(token) > 0
def _lower(self, token: str):
if self.lower_case:
return token.lower()
return token
class TokenProcessor:
def __init__(self, tokenizer: Tokenizer):
self.tokenizer = tokenizer
def process(self, text):
pass
class TokenFilter:
def __init__(self, filter_tokens: List[str]=stopwords.words() + date_filter + common_words):
self.filter_tokens = Counter(filter_tokens)
def add_tokens(self, tokens: List[str]):
self.filter_tokens.extend(tokens)
# ToDo(KMG): Make lower() optional?
def filter(self, token: str) -> bool:
result = token.lower() not in self.filter_tokens
return result
class Stripper:
def __init__(self):
pass
def strip(self, token: str) -> str:
pass
class NoopStripper:
def __init__(self):
pass
def strip(self, token: str) -> str:
return token
class StripCharacters(Stripper):
def __init__(self, strip_characters: str = unicode_punctuation):
super().__init__()
self.strip_characters = strip_characters
def strip(self, token: str) -> str:
return token.strip(self.strip_characters)
class NLTKSentenceTokenizer(Tokenizer):
def __init__(self, language='english'):
super().__init__()
self.language = language
def tokenize(self, text):
return nltk_tokenize.sent_tokenize(text, self.language)
def tokenize_as_sentences(self, text):
return nltk_tokenize.sent_tokenize(text, self.language)
class NLTKWordTokenizer(Tokenizer):
def __init__(self, language='english', token_filter: TokenFilter=TokenFilter(),
strip_characters: Stripper=NoopStripper(), sentence_processor=NLTKSentenceTokenizer(),
lower_case=True, min_tokens=3):
super().__init__()
self.language = language
self.token_filter = token_filter
self.strip_characters = strip_characters
self.sentence_processor = sentence_processor
self.lower_case =lower_case
self.min_tokens = min_tokens
def tokenize(self, text):
tokens = itertools.chain(*[nltk.word_tokenize(sent)
for sent in self.sentence_processor.tokenize(text)])
tokens = map(self._lower, tokens)
tokens = map(self.strip_characters.strip, tokens)
tokens = filter(self.token_filter.filter, tokens)
tokens = filter(self._filter_empty_string, tokens)
return tokens
def tokenize_as_sentences(self, text):
original_sentences = self.sentence_processor.tokenize(text)
sentences = [nltk.word_tokenize(sent)for sent in original_sentences]
filtered_sentences = []
for tokens in sentences:
tokens = map(self._lower, tokens)
tokens = map(self.strip_characters.strip, tokens)
tokens = filter(self.token_filter.filter, tokens)
tokens = list(filter(self._filter_empty_string, tokens))
if len(tokens) < self.min_tokens:
tokens = []
filtered_sentences.append(tokens)
return filtered_sentences, original_sentences
class NLTKWordLemmatizer(Tokenizer):
def __init__(self, language='english', token_filter: TokenFilter=TokenFilter(),
strip_characters: Stripper=NoopStripper(), sentence_processor=NLTKSentenceTokenizer(),
lower_case=True):
self.language = language
# ToDo: Need to map the specified language to the tagger language (ISO 639)
self.tagger_language = "eng"
self.token_filter = token_filter
self.strip_characters = strip_characters
self.sentence_processor = sentence_processor
self.lower_case =lower_case
self.sentence_processor = sentence_processor
self.lemmatizer = WordNetLemmatizer()
self.tagger = nltk.pos_tag
self.lock = Lock()
def _lemmatize(self, tagged_token):
token, tag = tagged_token
#
# NLTK is not thread safe, so need to hold lock for Lemmatization
# ref: https://github.com/nltk/nltk/issues/803
#
with self.lock:
lemmatized_token = self.lemmatizer.lemmatize(token, self._get_wordnet_pos(tag))
return lemmatized_token
# This uses the default, pretrained POS tagger (Treebank corpus). Need to translate to
# the wordnet POS tags
def _get_wordnet_pos(self, treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('S'):
return wordnet.ADJ_SAT
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
# ToDo(KMG): Is this right? Looks like Treebank has a lot of tags... many more than wordnet
# Also, apparently, the default lemmatization pos tag is noun, so this is likely fine for this case.
return wordnet.NOUN
def tokenize(self, text):
# ToDo(KMG): What affect does upper/lower have on lemmatization? I assume we want to preserve capitalization
# because it may refer to proper nouns... Maybe best to do post-processing after we tag (we'll know nouns,
# etc.)
# untagged_tokens = map(self._lower, untagged_tokens)
tagged_tokens = itertools.chain(*[self.tagger(nltk.word_tokenize(sent), None, self.tagger_language)
for sent in self.sentence_processor.tokenize(text)])
tokens = map(self._lemmatize, tagged_tokens)
tokens = map(self.strip_characters.strip, tokens)
tokens = filter(self.token_filter.filter, tokens)
tokens = filter(self._filter_empty_string, tokens)
return tokens
def tokenize_as_sentences(self, text):
original_sentences = self.sentence_processor.tokenize(text)
tagged_sentences = [self.tagger(nltk.word_tokenize(sent), None, self.tagger_language)
for sent in original_sentences]
filtered_sentences = []
for tagged_tokens in tagged_sentences:
tokens = map(self._lemmatize, tagged_tokens)
tokens = map(self.strip_characters.strip, tokens)
tokens = filter(self.token_filter.filter, tokens)
tokens = filter(self._filter_empty_string, tokens)
filtered_sentences.append(list(tokens))
return [filtered_sentences, original_sentences]
|
the-stack_106_21623
|
# pylint: disable=missing-function-docstring
import unittest
from unittest.mock import MagicMock
from dealership_review.core.review_sorter import sort_reviews, SortType
class TestSortReviews(unittest.TestCase):
"""
Tests for the sort_review function
"""
def setUp(self) -> None:
self.highest_score_review = MagicMock(score=0)
self.medium_score_review = MagicMock(score=5)
self.lowest_score_review = MagicMock(score=10)
self.reviews = [
self.medium_score_review,
self.lowest_score_review,
self.highest_score_review,
]
def test_sort_reviews_ascending(self):
expected_result = [
self.highest_score_review,
self.medium_score_review,
self.lowest_score_review,
]
self.assertEqual(
sort_reviews(self.reviews, sort_type=SortType.ASC),
expected_result
)
def test_sort_reviews_descending(self):
expected_result = [
self.lowest_score_review,
self.medium_score_review,
self.highest_score_review,
]
self.assertEqual(
sort_reviews(self.reviews, sort_type=SortType.DESC),
expected_result
)
|
the-stack_106_21624
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="contourcarpet.line", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style+colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_106_21625
|
from pathlib import Path
import os
import shutil
class Node:
def __init__(self, filename):
self.filename = filename
self.items = []
def number_prefix(int):
result = str(int) + "_"
if int < 10:
result = "0" + result
return result
# return an array of Nodes that are included in page's ToC section
def extract_table_of_contents(readfile):
appending = False
toc = []
tocItems = {}
for line in readfile.readlines():
if appending and len(line) > 4 and "::" not in line and "newpage" not in line:
filename = line.replace(" ", "").replace("\n", "")
if not filename in tocItems:
toc.append(Node(filename))
tocItems[filename] = "1"
if "maxdepth" in line:
appending = True
return toc
# recursive function for building ToC tree
def scan_node_for_toc(path, root):
g = open(path, "r")
toc = extract_table_of_contents(g)
if len(toc) > 0:
for idx, item in enumerate(toc):
item_path = root + item.filename + ".rst"
subToc = scan_node_for_toc(item_path, root)
if len(subToc) > 0:
toc[idx].items = subToc
return toc
def count_tree_items(tree):
total = len(tree.items)
for leaf in tree.items:
total += count_tree_items(leaf)
return total
def print_tree_items(tree, depth):
print(">" * depth + " " + tree.filename)
for leaf in tree.items:
print_tree_items(leaf, depth + 1)
# use ToC to move files to correct folder, building a new one if necessary
def process_node(node, root_path, result_path, index):
source = root_path + node.filename + ".mdx"
if len(node.items) == 0:
destination = result_path + number_prefix(index) + node.filename.replace("%", "") + ".mdx"
dest = shutil.copyfile(source, destination)
else:
folder_path = result_path + number_prefix(index) + node.filename
os.mkdir(folder_path)
destination = folder_path + "/index.mdx"
dest = shutil.copyfile(source, destination)
idx = 1
for sub_node in node.items:
process_node(sub_node, root_path, folder_path + "/", idx)
idx += 1
for path in Path('content').rglob('index.rst'):
root_path = str(path.parents[0]) + '/'
content_path = str(path.parents[2]) + '/'
f = path.open()
# get top level of ToC
toc = extract_table_of_contents(f)
# get sub-levels of ToC
for idx, item in enumerate(toc):
item_path = root_path + item.filename + ".rst"
subToc = scan_node_for_toc(item_path, root_path)
if len(subToc) > 0:
toc[idx].items = subToc
# Print ToC structure and check to see how many files logged in ToC
total = len(toc)
for node in toc:
print_tree_items(node, 0)
total += count_tree_items(node)
print(str(total) + " files logged in ToC")
result_path = content_path + "content_build"
dest_path = result_path + "/" + path.parts[-2]
# clear out previous results, if any
if os.path.exists(dest_path):
shutil.rmtree(dest_path)
# create build folder, if needed
if not os.path.exists(result_path):
try:
os.mkdir(result_path)
except OSError:
print ("Creation of the directory %s failed" % result_path)
else:
print ("Successfully created the directory %s " % result_path)
# create destination folder within build folder
try:
os.mkdir(dest_path)
except OSError:
print ("Creation of the directory %s failed" % dest_path)
else:
print ("Successfully created the directory %s " % dest_path)
# copy images folder to destination folder
if os.path.exists(root_path + "images"):
shutil.copytree(root_path + "images", dest_path + "/images")
# copy index over
shutil.copyfile(root_path + "index.mdx", dest_path + "/index.mdx")
# process nodes in ToC to move mdx files to correct folder in destination folder
idx = 1
for node in toc:
process_node(node, root_path, dest_path + "/", idx)
idx += 1
# remove conclusion
conclusion_path = '{0}/{1}conclusion.mdx'.format(dest_path, number_prefix(idx-1))
if os.path.exists(conclusion_path):
print("removed conclusion.mdx")
os.remove(conclusion_path)
|
the-stack_106_21628
|
from collections import Counter
class Solution:
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
n = len(s)
start = end = 0
dict_t = Counter(t)
missing = sum(dict_t.values())
dict_s = {}
min_distance, substring = n+1, ""
while end < n:
letter = s[end]
if letter in dict_t:
dict_t[letter] -= 1
missing -= dict_t[letter] >= 0
if missing == 0:
while missing == 0:
front_letter = s[start]
while front_letter not in dict_t:
start += 1
front_letter = s[start]
dict_t[front_letter] += 1
missing += dict_t[front_letter] > 0
# missing still zero need to find next letter
start += missing == 0
if end - start + 1 < min_distance:
min_distance = end - start + 1
substring = s[start:end+1]
start += 1
end += 1
return substring
|
the-stack_106_21630
|
import os
import numpy as np
import json
import sys
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# sys.path.append(ROOT_DIR)
from guv import GUVDataset, GUVConfig
from frcnn.utils import extract_bboxes
import frcnn.model as modellib
import frcnn.utils as utils
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
class GUVBBDataset(GUVDataset):
def __init__(self):
super(self.__class__, self).__init__()
self.IMAGE_HEIGHT = 128
self.IMAGE_WIDTH = 128
def load_GUV(self, dataset_dir, subset,labels_json="guv_InstSeg_BB.json"):
# Add classes. We have only one class to add.
self.add_class("GUV", 1, "GUV")
# Train or validation dataset?
assert subset in ["train", "val","test"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# "image_4.png":{"file_attributes":{},
# "filename":"image_4.png",
# "regions":[{"shape_attributes":{"name":"rect","x":int,"y":int,"width":int,"height":int},"region_attributes":{}},...},
# "size":26819}
# We mostly care about the x and y coordinates of each region
#
if subset == "train" or subset == "val":
annotations = json.load(open(os.path.join(dataset_dir, labels_json)))
annotations = list(annotations.values()) # don't need the dict keys
annotations = list(annotations[1].values())
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
#print(annotations)
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# x1,y1 should be top left corner and x2 y2 the bottom right.
# x2,y2 should not be part of the box so increment by 1
#
#print(a['regions'])
if type(a['regions']) is dict:
rects = [r['shape_attributes'] for r in a['regions'].values()]
else:
rects = [r['shape_attributes'] for r in a['regions']]
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
# However, in this guv bounding box project we expect to use the same size of image so we just get it from the config.
image_path = os.path.join(dataset_dir, a['filename'])
#if
# image = skimage.io.imread(image_path)
# height, width = image.shape[:2]
#else:
height = self.IMAGE_HEIGHT
width = self.IMAGE_WIDTH
self.add_image(
"GUV",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
rects=rects)
else:
image_paths = glob(os.path.join(dataset_dir,"*.png"))
height = 128
width = 128
#image number can be either a 1 digit, 2 digit number or 3 digit number
for image_path in image_paths:
num = image_path[image_path.find('_')+1:image_path.find('.')]
self.add_image(
"GUV",
image_id = image_path[-(11+len(num)-1):],
path = image_path,
width = width, height = height
)
def load_bboxes(self,image_id):
# If not a GUV dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "GUV":
return super(self.__class__, self).load_bbox(image_id)
info = self.image_info[image_id]
#initialise bounding boxes
boxes = np.zeros([len(info["rects"]), 4], dtype=np.int32)
for i, r in enumerate(info["rects"]):
x1 = r['x']
y1 = r['y']
x2 = x1+r['width']
y2 = y1+r['height']
boxes[i] = np.array([y1,x1,y2,x2])
class_ids = np.ones([boxes.shape[0]],dtype=np.int32)
return boxes, class_ids
def add_labelled_image(self,dataset_dir,image_id,instances,json_path = "guv_InstSeg_BB.json"):
######################################################
# dataset_dir (string): path to directory of training data
# image_id (int): internal id of image passed through network
# instances (np.ndarray) (height,width,N): detection instance masks
# json_path (str): via.html labelling tool project json file path
######################################################
# need save append to the "regions" element of the json the bbox in the following format
# {"shape_attributes":{"name":"rect","x":26,"y":59,"width":14,"height":13},"region_attributes":{}}, ...
assert dataset_dir[-5:] == "train"
labelled_data_dict = json.load(open(os.path.join(dataset_dir,json_path)))
labelled_data= list(labelled_data_dict.values())
# save the list of image names in metadata
image_id_list = labelled_data[4]
labelled_data = labelled_data[1]
#get bounding boxes from masks
bboxes = extract_bboxes(instances)
#bboxes has shape (Ninst,4) -> [n_inst,np.array([y1,x1,y2,x2])]
regions = []
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
#contours,_ = cv2.findContours(instances[:,:,i].astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# json encoding can't handle numpy integer types therefore need to convert to in built python int
#x = []
#y = []
#for index in range(len(contours[0][:,0,0])):
# x.append(int(contours[0][index,0,0]))
# y.append(int(contours[0][index,0,1]))
#convert bbox opposite corners to top corner + width and height
x1 = bbox[1]
y1 = bbox[0]
w = abs(bbox[3]-bbox[1])
h = abs(bbox[2]-bbox[0])
region = {"shape_attributes":{"name":"rect","x":int(x1),"y":int(y1),"width":int(w),"height":int(h)},"region_attributes":{}}
regions.append(region)
#create dictionary in format for labelled data
labelled_image = {'file_attributes': {},'filename': self.image_info[image_id]['id'],'regions': regions,'size': 26493}
labelled_data[self.image_info[image_id]['id']] = labelled_image
image_id_list.append(self.image_info[image_id]['id'])
#save the training data back in place in the dictionary
labelled_data_dict['_via_img_metadata'] = labelled_data
labelled_data_dict['_via_image_id_list'] = image_id_list
with open(os.path.join(dataset_dir,json_path),'w') as outfile:
json.dump(labelled_data_dict,outfile)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = GUVBBDataset()
dataset_train.load_GUV(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = GUVBBDataset()
dataset_val.load_GUV(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads')
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect balloons.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash' or 'detect'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/balloon/dataset/",
help='Directory of the GUV dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
elif args.command == "detect":
assert args.dataset, "Argument --dataset is required for detection test"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = GUVConfig()
else:
class InferenceConfig(GUVConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
GUV_DIR = args.dataset
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
elif args.command == "detect":
# Load validation dataset
dataset = GUVBBDataset()
dataset.load_GUV(GUV_DIR, "test")
# Must call before using the dataset
dataset.prepare()
print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
image_id = random.choice(dataset.image_ids)
if config.GENERATE_MASK:
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
else:
image,image_meta, gt_class_id, gt_bbox, =\
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
info = dataset.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
dataset.image_reference(image_id)))
# Run object detection
results = model.detect([image], verbose=1)
# Display results
#ax = get_ax(1)
#r = results[0]
#visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
# dataset.class_names, r['scores'], ax=ax,
# title="Predictions")
#log("gt_class_id", gt_class_id)
#log("gt_bbox", gt_bbox)
#log("gt_ma`sk", gt_mask)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
|
the-stack_106_21631
|
import inspect
import json
import os
from copy import deepcopy
import empty_files
def get_adjacent_file(name: str) -> str:
calling_file = inspect.stack(1)[1][1]
directory = os.path.dirname(os.path.abspath(calling_file))
filename = os.path.join(directory, name)
return filename
def write_to_temporary_file(expected: str, name: str):
import tempfile
with tempfile.NamedTemporaryFile(
mode="w+b", suffix=".txt", prefix=name, delete=False
) as temp:
temp.write(expected.encode("utf-8-sig"))
return temp.name
def to_json(object) -> str:
return json.dumps(
object,
sort_keys=True,
indent=4,
separators=(",", ": "),
default=lambda o: o.__dict__,
ensure_ascii=True,
)
def deserialize_json_fields(a_dict: dict) -> dict:
a_dict = deepcopy(a_dict)
for key, val in a_dict.items():
if isinstance(val, str) and val.startswith('{'):
try:
deserialized_val = json.loads(val)
except:
# leave field unchanged on exception
pass
else:
a_dict[key] = deserialized_val
elif isinstance(val, dict):
a_dict[key] = deserialize_json_fields(val)
return a_dict
def is_windows_os() -> bool:
return os.name == "nt"
def create_empty_file(file_path: str) -> None:
from empty_files.empty_files import create_empty_file
create_empty_file(file_path)
def ensure_file_exists(approved_path: str) -> None:
if not os.path.isfile(approved_path):
create_empty_file(approved_path)
def create_directory_if_needed(received_file: str) -> None:
directory = os.path.dirname(received_file)
if directory and not os.path.exists(directory):
os.makedirs(directory)
|
the-stack_106_21633
|
import requests
import csv
from bs4 import BeautifulSoup
url = "http://api.irishrail.ie/realtime/realtime.asmx/getCurrentTrainsXML"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'xml')
retrieveTags=['TrainStatus',
'TrainLatitude',
'TrainLongitude',
'TrainCode',
'TrainDate',
'PublicMessage',
'Direction'
]
with open('week03_train.csv', mode='w') as train_file:
train_writer = csv.writer(train_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
listings = soup.findAll("objTrainPositions")
for listing in listings:
#print(listing)
print(listing.TrainLatitude.string)
# or
# print(listing.find('TrainLatitude').string)
lat =float( listing.TrainLatitude.string)
if (lat < 53.4):
entryList = []
entryList.append(listing.find('TrainLatitude').string)
train_writer.writerow(entryList)
#print (soup.prettify())
|
the-stack_106_21634
|
"""Secrets Provider for AWS Secrets Manager."""
import base64
import json
try:
import boto3
from botocore.exceptions import ClientError
except (ImportError, ModuleNotFoundError):
boto3 = None
from django import forms
from nautobot.utilities.forms import BootstrapMixin
from nautobot.extras.secrets import exceptions, SecretsProvider
__all__ = ("AWSSecretsManagerSecretsProvider",)
class AWSSecretsManagerSecretsProvider(SecretsProvider):
"""A secrets provider for AWS Secrets Manager."""
slug = "aws-secrets-manager"
name = "AWS Secrets Manager"
is_available = boto3 is not None
class ParametersForm(BootstrapMixin, forms.Form):
"""Required parameters for AWS Secrets Manager."""
name = forms.CharField(
required=True,
help_text="The name of the AWS Secrets Manager secret",
)
region = forms.CharField(
required=True,
help_text="The region name of the AWS Secrets Manager secret",
)
key = forms.CharField(
required=True,
help_text="The key of the AWS Secrets Manager secret",
)
@classmethod
def get_value_for_secret(cls, secret, obj=None, **kwargs):
"""Return the secret value by name and region."""
# Extract the parameters from the Secret.
parameters = secret.rendered_parameters(obj=obj)
secret_name = parameters.get("name")
secret_key = parameters.get("key")
region_name = parameters.get("region")
# Create a Secrets Manager client.
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=region_name)
# This is based on sample code to only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
except ClientError as err:
if err.response["Error"]["Code"] == "DecryptionFailureException": # pylint: disable=no-else-raise
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretProviderError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "InternalServiceErrorException":
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretProviderError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "InvalidParameterException":
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretParametersError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "InvalidRequestException":
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretProviderError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "ResourceNotFoundException":
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretValueNotFoundError(secret, cls, str(err))
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if "SecretString" in get_secret_value_response:
secret_value = get_secret_value_response["SecretString"]
else:
# TODO(jathan): Do we care about this? Let's figure out what to do about a binary value?
secret_value = base64.b64decode(get_secret_value_response["SecretBinary"]) # noqa
# If we get this far it should be valid JSON.
data = json.loads(secret_value)
# Retrieve the value using the key or complain loudly.
try:
return data[secret_key]
except KeyError as err:
msg = f"The secret value could not be retrieved using key {err}"
raise exceptions.SecretValueNotFoundError(secret, cls, msg) from err
|
the-stack_106_21635
|
from datetime import datetime, date, timedelta
from django.test import TestCase
from selvbetjening.core.user.models import SUser
from selvbetjening.core.events.models import Attend, Event, OptionGroup
from selvbetjening.core.events.options.dynamic_selections import dynamic_selections_form_factory, _pack_id, SCOPE, \
dynamic_selections, dynamic_selections_formset_factory
import models
class Database(object):
_id = 0
@classmethod
def new_id(cls):
cls._id += 1
return str(cls._id)
@classmethod
def new_event(cls,
move_to_accepted_policy=None,
start_date=None):
kwargs = {}
if move_to_accepted_policy is not None:
kwargs['move_to_accepted_policy'] = move_to_accepted_policy
return models.Event.objects.create(title=cls.new_id(),
startdate=start_date if start_date is not None else date.today(),
enddate=date.today(),
registration_open=True,
**kwargs)
@classmethod
def new_user(cls, id=None):
if id is None:
id = cls.new_id()
return SUser.objects.create_user(id, '%[email protected]' % id, id)
@classmethod
def attend(cls, user, event):
return models.Attend.objects.create(user=user, event=event)
@classmethod
def new_optiongroup(cls, event, min_select=0, max_select=0, freeze_time=None):
if freeze_time is None:
freeze_time = datetime.now() + timedelta(days=1)
return models.OptionGroup.objects.create(event=event,
name=cls.new_id(),
minimum_selected=min_select,
maximum_selected=max_select,
freeze_time=freeze_time)
@classmethod
def new_option(cls, optiongroup, name=None, order=0, id=None):
if name is None:
name = cls.new_id()
kwargs = {'group': optiongroup, 'name': name, 'order': order}
if id is not None:
kwargs['id'] = id
return models.Option.objects.create(**kwargs)
class AttendModelTestCase(TestCase):
def test_is_new(self):
user = Database.new_user()
event = Database.new_event()
attend = models.Attend.objects.create(user=user, event=event)
self.assertTrue(attend.is_new)
class EventModelTestCase(TestCase):
fixtures = ['sdemo-example-site.json']
def test_attendee_order(self):
event = Database.new_event()
self.userarray = []
for i in range(30):
self.userarray.append(SUser.objects.create_user('suser%s' % i, '[email protected]', ''))
models.Attend.objects.create(event=event, user=self.userarray[i])
for i in range(30):
self.assertEqual(event.attendees[i].user, self.userarray[i])
def test_remove_attendee(self):
user = Database.new_user()
event = Database.new_event()
attend = Database.attend(user, event)
self.assertTrue(event.is_attendee(user))
attend.delete()
self.assertFalse(event.is_attendee(user))
def test_copy(self):
event = Event.objects.get(pk=2)
old_pk = event.pk
self.assertNotEqual(len(event.optiongroups), 0)
event.copy_and_mutate_self()
self.assertNotEqual(old_pk, event.pk)
self.assertNotEqual(len(event.optiongroups), 0)
options = 0
for group in event.optiongroup_set.all():
options += group.option_set.all().count()
self.assertNotEqual(options, 0)
class DynamicSelectionsTestCase(TestCase):
fixtures = ['formbuilder_test_fixture.json']
def test_scopes(self):
event = Event.objects.get(pk=1)
attendee = Attend.objects.all()[0]
self.assertEqual(len(dynamic_selections(SCOPE.VIEW_REGISTRATION, attendee)), 4)
self.assertEqual(len(dynamic_selections(SCOPE.EDIT_REGISTRATION, attendee)), 4)
def test_ordering(self):
event = Event.objects.get(pk=1)
attendee = Attend.objects.get(pk=1)
selections = dynamic_selections(SCOPE.VIEW_REGISTRATION, attendee)
# correct ordering
# option group 1
# option 1
# option 2
# option group 2
# option 3
# option 4
self.assertEqual(selections[0][0].group.pk, 1)
self.assertEqual(selections[0][0].pk, 1)
self.assertEqual(selections[1][0].pk, 2)
self.assertEqual(selections[2][0].group.pk, 2)
self.assertEqual(selections[2][0].pk, 3)
self.assertEqual(selections[3][0].pk, 4)
class FormBuilderTestCase(TestCase):
fixtures = ['formbuilder_test_fixture.json']
def test_basic_form_building(self):
instance = OptionGroup.objects.all()[0]
user = SUser.objects.get(pk=1)
form_class = dynamic_selections_form_factory(SCOPE.SADMIN, instance)
form = form_class(user=user)
self.assertEqual(len(form.fields), 2)
def test_saving_selections_to_existing_attendee(self):
option_group = OptionGroup.objects.all()[0]
attendee = Attend.objects.all()[0]
OptionGroupSelectionsForm = dynamic_selections_form_factory(SCOPE.SADMIN, option_group)
form = OptionGroupSelectionsForm({}, user=attendee.user, attendee=attendee)
self.assertTrue(form.is_valid())
self.assertTrue(hasattr(form, 'cleaned_data'))
form.save()
for option, selected in dynamic_selections(SCOPE.VIEW_REGISTRATION, attendee, option_group=option_group):
self.assertFalse(selected)
post = {
_pack_id('option', 1): "1",
_pack_id('option', 2): "1"
}
form = OptionGroupSelectionsForm(post, user=attendee.user, attendee=attendee)
self.assertTrue(form.is_valid())
self.assertTrue(hasattr(form, 'cleaned_data'))
self.assertTrue(form.cleaned_data[_pack_id('option', 1)])
self.assertTrue(form.cleaned_data[_pack_id('option', 2)])
form.save()
for option, selected in dynamic_selections(SCOPE.VIEW_REGISTRATION,
attendee,
option_group=option_group):
self.assertTrue(selected)
def test_saving_selections_to_new_attendee(self):
option_group = OptionGroup.objects.all()[0]
attendee = Attend.objects.all()[0]
OptionGroupSelectionsForm = dynamic_selections_form_factory(SCOPE.SADMIN, option_group)
post = {
_pack_id('option', 1): "1",
_pack_id('option', 2): "1"
}
form = OptionGroupSelectionsForm(post, user=attendee.user)
self.assertTrue(form.is_valid())
self.assertTrue(hasattr(form, 'cleaned_data'))
self.assertTrue(form.cleaned_data[_pack_id('option', 1)])
self.assertTrue(form.cleaned_data[_pack_id('option', 2)])
form.save(attendee=attendee)
for option, selected in dynamic_selections(SCOPE.VIEW_REGISTRATION,
attendee,
option_group=option_group):
self.assertTrue(selected)
def test_delete_existing_selections(self):
option_group = OptionGroup.objects.all()[0]
attendee = Attend.objects.get(pk=2)
for option, selection in dynamic_selections(SCOPE.VIEW_REGISTRATION,
attendee,
option_group=option_group):
self.assertIsNotNone(selection)
OptionGroupSelectionsForm = dynamic_selections_form_factory(SCOPE.SADMIN, option_group)
form = OptionGroupSelectionsForm({}, user=attendee.user)
self.assertTrue(form.is_valid())
self.assertTrue(hasattr(form, 'cleaned_data'))
self.assertFalse(form.cleaned_data[_pack_id('option', 1)])
self.assertFalse(form.cleaned_data[_pack_id('option', 2)])
form.save(attendee=attendee)
for option, selected in dynamic_selections(SCOPE.VIEW_REGISTRATION, attendee, option_group=option_group):
self.assertFalse(selected)
def test_text_option_type(self):
option_group = OptionGroup.objects.get(pk=2)
attendee = Attend.objects.get(pk=2)
post = {
_pack_id("option", 3): "some text",
}
OptionGroupSelectionsForm = dynamic_selections_form_factory(SCOPE.SADMIN, option_group)
form = OptionGroupSelectionsForm(post, user=attendee.user, attendee=attendee)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data[_pack_id('option', 3)], "some text")
form.save()
selections = dynamic_selections(SCOPE.VIEW_REGISTRATION,
attendee,
option_group=option_group,
as_dict=True)
option, selection = selections[3]
self.assertIsNotNone(selection)
self.assertEqual(selection.text, "some text")
def test_choice_option_type(self):
option_group = OptionGroup.objects.get(pk=2)
attendee = Attend.objects.get(pk=2)
post = {
_pack_id("option", 4): "suboption_1",
}
OptionGroupSelectionsForm = dynamic_selections_form_factory(SCOPE.SADMIN, option_group)
form = OptionGroupSelectionsForm(post, user=attendee.user, attendee=attendee)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data[_pack_id('option', 4)], "suboption_1")
form.save()
selections = dynamic_selections(SCOPE.VIEW_REGISTRATION, attendee,
option_group=option_group,
as_dict=True)
option, selection = selections[4]
self.assertIsNotNone(selection)
self.assertIsNotNone(selection.suboption)
self.assertEqual(selection.suboption.pk, 1)
def test_option_scope(self):
event = Event.objects.filter(pk=2)
user = SUser.objects.get(pk=1)
# Tests visibility in the different edit scopes.
# The event has one group and 9 options, one for each possible visibility bit
# SCOPE: SADMIN - all visible
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.SADMIN, event)
form = OptionGroupSelectionsFormSet(user=user)
self.assertEqual(len(form), 1)
self.assertEqual(len(form[0].fields), 9)
# SCOPE: EDIT_MANAGE_WAITING - show in_scope_edit_manage_waiting and in_scope_view_manage (readonly)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_MANAGE_WAITING, event)
form = OptionGroupSelectionsFormSet(user=user)
self.assertEqual(len(form), 1)
self.assertEqual(len(form[0].fields), 2)
# SCOPE: EDIT_MANAGE_ACCEPTED
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_MANAGE_ACCEPTED, event)
form = OptionGroupSelectionsFormSet(user=user)
self.assertEqual(len(form), 1)
self.assertEqual(len(form[0].fields), 2)
# SCOPE: EDIT_MANAGE_ATTENDED
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_MANAGE_ATTENDED, event)
form = OptionGroupSelectionsFormSet(user=user)
self.assertEqual(len(form), 1)
self.assertEqual(len(form[0].fields), 2)
# SCOPE: EDIT_REGISTRATION
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
form = OptionGroupSelectionsFormSet(user=user)
self.assertEqual(len(form), 1)
self.assertEqual(len(form[0].fields), 2)
class FormSubmitTestCase(TestCase):
fixtures = ['form_submit_test_fixture.json']
def test_required_checkbox_sadmin(self):
event = Event.objects.get(pk=1)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.SADMIN, event)
form = OptionGroupSelectionsFormSet({
}, user=user)
self.assertTrue(form.is_valid())
def test_required_checkbox(self):
event = Event.objects.get(pk=1)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
form = OptionGroupSelectionsFormSet({
}, user=user)
self.assertFalse(form.is_valid())
form = OptionGroupSelectionsFormSet({
'option_1': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
def test_dependency(self):
event = Event.objects.get(pk=2)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.SADMIN, event)
form = OptionGroupSelectionsFormSet({
'option_2': 'checked',
'option_3': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
self.assertTrue('option_2' in form[0].cleaned_data)
self.assertTrue('option_3' in form[0].cleaned_data)
# We should remove dependent selections automatically - even in sadmin scope
form = OptionGroupSelectionsFormSet({
'option_3': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
self.assertFalse(form[0].cleaned_data.get('option_2', False))
self.assertFalse(form[0].cleaned_data.get('option_3', False))
def test_dependency_required_all_selected(self):
event = Event.objects.get(pk=3)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
# Both selected
form = OptionGroupSelectionsFormSet({
'option_4': 'checked',
'option_5': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
self.assertTrue(form[0].cleaned_data.get('option_4', False))
self.assertTrue(form[0].cleaned_data.get('option_5', False))
def test_dependency_required_dependency_not_selected(self):
event = Event.objects.get(pk=3)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
# Dependency not selected, so both should be deselected
form = OptionGroupSelectionsFormSet({
'option_5': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
self.assertFalse(form[0].cleaned_data.get('option_4', False))
self.assertFalse(form[0].cleaned_data.get('option_5', False))
def test_dependency_required_dependency_none_selected(self):
event = Event.objects.get(pk=3)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
# None selected, but still valid
form = OptionGroupSelectionsFormSet({
}, user=user)
self.assertTrue(form.is_valid())
self.assertFalse(form[0].cleaned_data.get('option_4', False))
self.assertFalse(form[0].cleaned_data.get('option_5', False))
def test_dependency_required_dependency_dependency_selected(self):
event = Event.objects.get(pk=3)
user = SUser.objects.get(pk=1)
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
# Fail if the dependency is checked, but the required child is not
form = OptionGroupSelectionsFormSet({
'option_4': 'checked'
}, user=user)
self.assertFalse(form.is_valid())
def test_minimum_selected(self):
event = Event.objects.get(pk=2)
user = SUser.objects.get(pk=1)
group = OptionGroup.objects.get(pk=2)
group.minimum_selected = 2
group.save()
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
form = OptionGroupSelectionsFormSet({
'option_2': 'checked',
'option_3': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
form = OptionGroupSelectionsFormSet({
'option_2': 'checked'
}, user=user)
self.assertFalse(form.is_valid())
def test_maximum_selected(self):
event = Event.objects.get(pk=2)
user = SUser.objects.get(pk=1)
group = OptionGroup.objects.get(pk=2)
group.maximum_selected = 1
group.save()
OptionGroupSelectionsFormSet = dynamic_selections_formset_factory(SCOPE.EDIT_REGISTRATION, event)
form = OptionGroupSelectionsFormSet({
'option_2': 'checked',
'option_3': 'checked'
}, user=user)
self.assertFalse(form.is_valid())
form = OptionGroupSelectionsFormSet({
'option_2': 'checked'
}, user=user)
self.assertTrue(form.is_valid())
|
the-stack_106_21639
|
import json
import codecs
MENU_INDENTATION_LEVEL = 4
def get_menu_item_by_id(menu_list, menu_id):
menu = [menu for menu in menu_list if menu['id'] == menu_id]
return menu[0]
def build_tree_string(menu_id, title, indent_level, options, tree_trunk="", program_name=""):
if indent_level != 0:
indent_str = list(indent_level * MENU_INDENTATION_LEVEL * " ")
for i in range(0, indent_level):
change_index = i * MENU_INDENTATION_LEVEL
indent_str[change_index + 1] = u"┃"
indent_str = "".join(indent_str)
else:
indent_str = ""
if menu_id != "":
menu_id = " [" + menu_id + "] "
else:
menu_id = " "
base_str = "{0} {1} {2}{3}{4}".format(indent_str, tree_trunk, options, menu_id, title.strip())[3:]
if program_name != "":
width = 70 - len(base_str)
program_name = "".rjust(width, ".") + ("| PROGRAM: " + program_name)
base_str = base_str + program_name
return base_str + "\n"
def build_menu_tree(menu_list, menu_id="EQ", title="Master Menu", indent=0, options="", tree_trunk=""):
menu = get_menu_item_by_id(menu_list, menu_id)
result_str = build_tree_string(menu["id"], title, indent, options, tree_trunk)
for i, item in enumerate(menu["items"]):
sub_menu = item["subMenu"]
new_options = [item["option"]]
if len(options) > 0:
new_options.insert(0, options)
new_options = "-".join(new_options)
item_title = item["title"]
next_indent_level = indent + 1
tree_trunk_symb = u"┝" if i != len(menu["items"]) - 1 else u"┕"
if len(sub_menu) > 0:
result_str += build_menu_tree(menu_list, sub_menu, item_title, next_indent_level, new_options,
tree_trunk_symb)
else:
program = item.get("program", {
"name": ""
})
result_str += build_tree_string("", item_title, next_indent_level, new_options, tree_trunk_symb, program["name"])
return result_str
with open("menu.json") as json_data, codecs.open("list.txt", "w", "utf-8") as menu_list_file:
data = json.load(json_data)
menu_list_file.write(build_menu_tree(data))
|
the-stack_106_21640
|
### ENVIRONMENT ====
### . modules ----
import openeo
import georaster
import matplotlib.pyplot as plt
import numpy as np
### . openeo ----
connection = openeo.connect("https://openeo.vito.be").authenticate_basic("test", "test123")
### PROCESSING ====
### . ndwi ----
sentinel2_data_cube = connection.load_collection(
"SENTINEL2_L2A_SENTINELHUB"
, spatial_extent = {'west':5.151836872,'east':5.1533818244,'south':51.181925592,'north':51.184696360,'crs':4326}
, temporal_extent = ["2020-05-02", "2020-05-02"]
, bands = ['B08','B12']
)
B08 = sentinel2_data_cube.band('B08')
B12 = sentinel2_data_cube.band('B12')
ndwi_cube = (B08 - B12) / (B08 + B12)
ndwi_cube.download("data/ndwi.tif", format = "GTiff")
# Use SingleBandRaster() if image has only one band
img = georaster.SingleBandRaster('data/ndwi.tif')
# img.r gives the raster in [height, width, band] format
# band no. starts from 0
plt.imshow(img.r)
plt.show()
plt.clf()
### . scl ----
s2_scl = connection.load_collection(
"SENTINEL2_L2A_SENTINELHUB"
, spatial_extent = {'west':5.151836872,'east':5.1533818244,'south':51.181925592,'north':51.184696360,'crs':4326}
, temporal_extent = ["2020-05-02", "2020-05-02"]
, bands = ['SCL']
)
mask = s2_scl.band('SCL')
mask.download("data/scl.tif", format = "GTiff")
mask = mask != 4
ndwi_cube_masked = ndwi_cube.mask(mask) # .resample_cube_spatial(ndwi_cube)
ndwi_cube_masked.download("data/ndwi_masked.tif", format = "GTiff")
img1 = georaster.SingleBandRaster('data/ndwi_masked.tif')
plt.imshow(img1.r)
plt.show()
plt.clf()
georaster.MultiBandRaster
img - img1
|
the-stack_106_21645
|
from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class Base16Style(Style):
base00 = '#001100'
base01 = '#003300'
base02 = '#005500'
base03 = '#007700'
base04 = '#009900'
base05 = '#00bb00'
base06 = '#00dd00'
base07 = '#00ff00'
base08 = '#007700'
base09 = '#009900'
base0a = '#007700'
base0b = '#00bb00'
base0c = '#005500'
base0d = '#009900'
base0e = '#00bb00'
base0f = '#005500'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08, # .err
Comment: base03, # .c
Comment.Preproc: base0f, # .cp
Comment.PreprocFile: base0b, # .cpf
Keyword: base0e, # .k
Keyword.Type: base08, # .kt
Name.Attribute: base0d, # .na
Name.Builtin: base0d, # .nb
Name.Builtin.Pseudo: base08, # .bp
Name.Class: base0d, # .nc
Name.Constant: base09, # .no
Name.Decorator: base09, # .nd
Name.Function: base0d, # .nf
Name.Namespace: base0d, # .nn
Name.Tag: base0e, # .nt
Name.Variable: base0d, # .nv
Name.Variable.Instance: base08, # .vi
Number: base09, # .m
Operator: base0c, # .o
Operator.Word: base0e, # .ow
Literal: base0b, # .l
String: base0b, # .s
String.Interpol: base0f, # .si
String.Regex: base0c, # .sr
String.Symbol: base09, # .ss
}
from string import capwords # noqa: E402
Base16Style.__name__ = 'Base16{}Style'.format(
capwords('greenscreen', '-').replace('-', '')
)
globals()[Base16Style.__name__] = globals()['Base16Style']
del globals()['Base16Style']
del capwords
|
the-stack_106_21647
|
#!/usr/bin/env python3
from itertools import combinations
import pickle
from random import choice, shuffle
from remi import gui, start, App
from survey_utils import (ExperimentType, User, Experiment, TlxComponent, Tlx,
Question, Survey)
class MyApp(App):
def __init__(self, *args):
super().__init__(*args)
def main(self):
self.users = []
self.save_location = "data.pickle"
container = gui.Widget(width=500, margin="0px auto")
menu = gui.Menu(width="100%", height="30px")
menu_file = gui.MenuItem("File", width=100, height=30)
menu_load = gui.MenuItem("Load...", width=100, height=30)
menu_load.set_on_click_listener(self.cbk_select_pickle)
menu_save = gui.MenuItem("Save", width=100, height=30)
menu_save.set_on_click_listener(self.cbk_save)
menu_save_as = gui.MenuItem("Save As...", width=100, height=30)
menu_save_as.set_on_click_listener(self.cbk_save_as)
menu.append(menu_file)
menu_file.append(menu_load)
menu_file.append(menu_save)
menu_file.append(menu_save_as)
menubar = gui.MenuBar(width="100%", height="30px")
menubar.append(menu)
self.uploader = gui.FileUploader("./", margin="10px")
self.uploader.set_on_success_listener(self.cbk_load)
self.save_location_label = gui.Label(f"Saving to {self.save_location}", margin="10px")
self.table = gui.Table.new_from_list([("ID", "Name", "Onboard", "SPIRIT", "Both")],
width=300, margin="10px")
select_user_label = gui.Label("Select a user:", margin="10px")
self.user_list = gui.ListView(margin="10px", width=300)
self.user_list.set_on_selection_listener(self.cbk_user_selected)
add_user_button = gui.Button("Add user", width=200, height=30, margin="10px")
add_user_button.set_on_click_listener(self.cbk_add_user)
save_button = gui.Button("Save", width=200, height=30, margin="10px")
save_button.set_on_click_listener(self.cbk_save)
try:
self._load(self.save_location)
except FileNotFoundError:
pass
self.update_table()
self.update_user_list()
container.append(menubar)
container.append(self.uploader)
container.append(self.save_location_label)
container.append(self.table)
container.append(select_user_label)
container.append(self.user_list)
container.append(add_user_button)
container.append(save_button)
return container
def update_user_list(self):
self.user_list.empty()
for user in self.users:
self.user_list.append(str(user), key=user)
def update_table(self):
self.table.empty(keep_title=True)
self.table.append_from_list([
(user.id_, user.name,
len([x for x in user.experiments if x.type_ == ExperimentType.Onboard]),
len([x for x in user.experiments if x.type_ == ExperimentType.Spirit]),
len([x for x in user.experiments if x.type_ == ExperimentType.Combined])
) for user in self.users])
def cbk_add_user(self, widget):
self.dialog = gui.GenericDialog(title="New user", message="Click Ok to save the user", width="500px")
self.dname = gui.TextInput(width=200, height=30)
self.dialog.add_field_with_label("dname", "Name", self.dname)
self.dage = gui.TextInput(width=200, height=30)
self.dialog.add_field_with_label("dage", "Age", self.dage)
self.dgender = gui.DropDown.new_from_list(["Female", "Male", "Other"], width=200, height=30)
self.dgender.select_by_value("Male")
self.dialog.add_field_with_label("dgender", "Gender", self.dgender)
self.dteleop = gui.TextInput(width=200, height=30)
self.dteleop.set_value("0")
self.dialog.add_field_with_label("dteleop", "Total hours flying teleoperated UAVs", self.dteleop)
self.dflying = gui.TextInput(width=200, height=30)
self.dflying.set_value("0")
self.dialog.add_field_with_label("dflying", "Total hours flying other vehicles", self.dflying)
self.dialog.set_on_confirm_dialog_listener(self.add_user_dialog_confirm)
self.dialog.show(self)
def cbk_user_selected(self, widget, user):
self.dialog = gui.GenericDialog(title="User page", message="Click Ok to return", width="500px")
self.dialog.add_field_with_label("dname", "ID", gui.Label(f"{user.id_}"))
self.dialog.add_field_with_label("dname", "Name", gui.Label(f"{user.name}"))
self.dnex = gui.Label(len(user.experiments))
self.dialog.add_field_with_label("dnex", "Number of experiments", self.dnex)
run_random_experiment_button = gui.Button("Run random experiment")
run_random_experiment_button.set_on_click_listener(self.run_random_experiment, user)
self._user_tlx_tables = {}
self.update_tabs(user)
self.dialog.add_field("drandom", run_random_experiment_button)
self.dialog.add_field("dtabs", self.tab_box)
self.dialog.set_on_confirm_dialog_listener(self.done_user_confirm)
self.dialog.show(self)
def update_tabs(self, user):
self.tab_box = gui.TabBox(width="80%")
for type_ in ExperimentType:
self.widget = gui.Widget(width="100%")
button = gui.Button(f"Run {type_.name} view experiment", margin="10px")
button.set_on_click_listener(self.run_experiment, user, type_)
self.widget.append(button)
if type_ in self._user_tlx_tables:
print("Found it!")
self.widget.append(self._user_tlx_tables[type_])
self.tab_box.add_tab(self.widget, f"{type_.name} view", None)
def add_user_dialog_confirm(self, widget):
name = self.dialog.get_field("dname").get_value()
age = self.dialog.get_field("dage").get_value()
gender = self.dialog.get_field("dgender").get_value()
teleop = self.dialog.get_field("dteleop").get_value()
flying = self.dialog.get_field("dflying").get_value()
new_user = User(name, age, gender, teleop, flying)
self.users.append(new_user)
self.update_table()
self.update_user_list()
def done_user_confirm(self, widget):
self.update_table()
def run_random_experiment(self, widget, user):
ran_types = {experiment.type_ for experiment in user.experiments}
if (ExperimentType.Onboard in ran_types) and (ExperimentType.Spirit in ran_types):
self.run_experiment(widget, user, choice(list(ExperimentType)))
elif (ExperimentType.Onboard not in ran_types) and (ExperimentType.Spirit not in ran_types):
self.run_experiment(widget, user, choice([ExperimentType.Onboard, ExperimentType.Spirit]))
elif ExperimentType.Onboard not in ran_types:
self.run_experiment(widget, user, ExperimentType.Onboard)
else:
self.run_experiment(widget, user, ExperimentType.Spirit)
def run_experiment(self, widget, user, type_):
self.dialog = gui.GenericDialog(title="Experiment", message=f"{user.name}, please run a {type_.name} view experiment.", width="600px")
tlx_button = gui.Button("NASA TLX")
tlx_button.set_on_click_listener(self.do_tlx, user, type_)
self.dialog.add_field("dtlxbutton", tlx_button)
survey_button = gui.Button("Survey")
survey_button.set_on_click_listener(self.do_survey, user, type_)
self.dialog.add_field("dsurveybutton", survey_button)
self.survey = Survey()
self.survey_sliders = {}
self.tlx = Tlx()
self.tlx_sliders = {}
experiment = Experiment(user, type_, self.survey, self.tlx)
self.dialog.set_on_confirm_dialog_listener(self.add_experiment, user, experiment)
self.dialog.show(self)
def do_tlx(self, widget, user, type_):
self.dialog = gui.GenericDialog(title="NASA-TLX", message=f"NASA Task Load Index for the {type_.name} view experiment performed by {user.name}. How much did each component contribute to your task load? (scale from 0 to 20)", width="600px")
for component in self.tlx.components.values():
self.dialog.add_field(component.code, gui.Label(f"{component.name}: {component.description}", margin="10px"))
slider = gui.Slider(component.score, 0, 20, 1, width="80%")
slider.set_oninput_listener(self.tlx_slider_changed, component.code)
slider_value = gui.Label(slider.get_value(), margin="10px")
self.tlx_sliders[component.code] = (slider, slider_value)
box = gui.Widget(width="100%", layout_orientation=gui.Widget.LAYOUT_HORIZONTAL, height=50)
box.append(slider_value)
box.append(slider)
self.dialog.add_field(component.code + "_score", box)
self.dialog.set_on_confirm_dialog_listener(self.tlx_done, user, type_)
self.dialog.show(self)
def tlx_slider_changed(self, widget, value, code):
self.tlx_sliders[code][1].set_text(value)
def tlx_done(self, widget, user, type_):
for code, (slider, slider_value) in self.tlx_sliders.items():
self.tlx.components[code].score = int(slider_value.get_text())
self._tlx_weighting(user, type_)
def do_survey(self, widget, user, type_):
self.dialog = gui.GenericDialog(title="Survey", message=f"Survey for the {type_.name} view experiment performed by {user.name}. How would you rate each item? (scale from 1 to 7)", width="600px")
for question in self.survey.questions.values():
self.dialog.add_field(question.code, gui.Label(f"{question.description}", margin="10px"))
slider = gui.Slider(question.score, 1, 7, 1, width="80%")
slider.set_oninput_listener(self.survey_slider_changed, question.code)
slider_value = gui.Label(slider.get_value(), margin="10px")
self.survey_sliders[question.code] = (slider, slider_value)
box = gui.Widget(width="100%", layout_orientation=gui.Widget.LAYOUT_HORIZONTAL, height=50)
box.append(slider_value)
box.append(slider)
self.dialog.add_field(question.code + "_score", box)
self.longform = gui.TextInput(single_line=False, hint="What other things would you like to say?", height="100px", margin="10px")
self.dialog.add_field("dlongformlabel", gui.Label("Other feedback:", margin="10px"))
self.dialog.add_field("dlongform", self.longform)
self.dialog.set_on_confirm_dialog_listener(self.survey_done, user, type_)
self.dialog.show(self)
def survey_slider_changed(self, widget, value, code):
self.survey_sliders[code][1].set_text(value)
def survey_done(self, widget, user, type_):
for code, (slider, slider_value) in self.survey_sliders.items():
self.survey.questions[code].score = int(slider_value.get_text())
self.survey.longform = self.longform.get_text()
def _tlx_weighting(self, user, type_):
self.all_combos = list(list(pair) for pair in combinations(self.tlx.components.keys(), 2))
shuffle(self.all_combos)
self.weights = {k: 0 for k in self.tlx.components.keys()}
self.weight_index = 0
self.pair = ["", ""]
self.dialog = gui.GenericDialog(title="NASA-TLX Weighting", message=f"NASA Task Load Index for the {type_.name} view experiment performed by {user.name}. Which component do you feel contributed more to your task load?", width="300px")
self.weight_progress_label = gui.Label(f"1/{len(self.all_combos)}")
self.dialog.add_field("dweightprogress", self.weight_progress_label)
box = gui.HBox(width="100%", height=50, margin="10px")
self.button_left = gui.Button("", margin="10px")
self.button_right = gui.Button("", margin="10px")
box.append(self.button_left)
box.append(self.button_right)
self.dialog.add_field("dweightbox", box)
self.pair = self.all_combos[self.weight_index]
shuffle(self.pair)
self.button_left.set_text(self.tlx.components[self.pair[0]].name)
self.button_right.set_text(self.tlx.components[self.pair[1]].name)
self.button_left.set_on_click_listener(self.weight_button_pressed, self.pair[0])
self.button_right.set_on_click_listener(self.weight_button_pressed, self.pair[1])
self.dialog.set_on_confirm_dialog_listener(self.weighting_done)
self.dialog.show(self)
def weighting_done(self, widget):
for code, weight in self.weights.items():
self.tlx.components[code].weight = weight
def weight_button_pressed(self, widget, code):
if self.weight_index == 14:
self.dialog.confirm_dialog()
return
self.weights[code] += 1
self.weight_index += 1
self.weight_progress_label.set_text(f"{self.weight_index + 1} / {len(self.all_combos)}")
self.pair = self.all_combos[self.weight_index]
shuffle(self.pair)
self.button_left.set_text(self.tlx.components[self.pair[0]].name)
self.button_right.set_text(self.tlx.components[self.pair[1]].name)
self.button_left.set_on_click_listener(self.weight_button_pressed, self.pair[0])
self.button_right.set_on_click_listener(self.weight_button_pressed, self.pair[1])
def add_experiment(self, widget, user, experiment):
user.experiments.append(experiment)
self.update_tabs(user)
self.dnex.set_text(len(user.experiments))
def cbk_save(self, widget):
self._save()
def _save(self):
with open(self.save_location, "wb") as fout:
pickle.dump(self.users, fout)
self.notification_message("Saved", f"Data saved successfully to {self.save_location}")
def _get_new_save_location(self, save_as=False):
self.input_dialog = gui.InputDialog("Save location", "Path", width=500, height=160)
self.input_dialog.set_on_confirm_value_listener(self.change_save_location, save_as)
self.input_dialog.show(self)
def change_save_location(self, widget, value, save_as):
self.save_location = value
self.save_location_label.set_text(f"Saving to {value}")
if save_as:
self._save()
def cbk_save_as(self, widget):
self._get_new_save_location(save_as=True)
def cbk_select_pickle(self, widget):
file_selector = gui.FileSelectionDialog("File Selection Dialog", "Select data pickle.", False, ".")
file_selector.set_on_confirm_value_listener(self.cbk_load)
file_selector.show(self)
def _load(self, filename):
with open(filename, "rb") as fin:
self.users = pickle.load(fin)
User.count = max(user.id_ for user in self.users) + 1
self.update_table()
self.update_user_list()
def cbk_load(self, widget, filenames):
if isinstance(filenames, list):
filenames = filenames[0]
self._load(filenames)
if __name__ == "__main__":
start(MyApp, title="Dashboard | SPIRIT")
|
the-stack_106_21649
|
import pandas as pd
import numpy as np
def construct_freq_df(df_copy):
'''
Construct a dataframe such that indices are seperated by delta 1 min from the Market Data
and put it in a format that markov matrices can be obtained by the pd.crosstab() method
'''
#This is here in case user passes the actual dataframe, we do not want to modify the actual dataframe
df = df_copy.copy()
#Blank dataframe placeholder
frames = pd.DataFrame()
#Set the index to timestamp and convert it to pd timestamp
#The datatype of the timestamp column should be string
df.set_index('timestamp', inplace=True)
df.index = pd.to_datetime(df.index)
#We need to get customer behaviour from entry to checkout for each unique customerr
for customer in df['customer_no'].unique():
#get customer
temp_df = df[df['customer_no'] == customer]
#expand timestamp index such that delta T is 1 min, and forward fill isles
temp_df = temp_df.asfreq('T',method='ffill')
#insert 'entry' 1 min before first isle
#re sort index so that times make sense
#(WE MIGHT NEED TO SKIP THIS NOT SURE IF ENTRY STATE IS REQUIRED)
temp_df.loc[temp_df.index[0] - pd.to_timedelta('1min')] = [customer,'entry']
temp_df.sort_index(inplace=True)
#after is simply a shift(-1) of current location
#checkout location does not have an after, so drop the NA's here
temp_df['after'] = temp_df['location'].shift(-1)
temp_df.dropna(inplace=True)
#join the frequency table for each customer
frames = pd.concat([frames, temp_df], axis=0)
#return the frequency frame
return frames
def generate_markov_matrix(df_copy):
'''
Generate the Markov Matrix for a Market Data dataframe, structured by constuct_freq_df() function
NOTE: Columns indicate current state, rows indicate after state, probabilities are read current -> after probability
sum of columns should add to 1
'''
df = df_copy.copy()
return pd.crosstab(df['after'], df['location'], normalize=1)
class CustomerOld:
def __init__(self, idn, state, transition_mat):
self.id = idn
self.state = state
self.transition_mat = transition_mat
def __repr__(self):
"""
Returns a csv string for that customer.
"""
return f'{self.id};{self.state}'
def is_active(self):
"""
Returns True if the customer has not reached the checkout
for the second time yet, False otherwise.
"""
if self.state != 'checkout':
return True
if self.state == 'checkout':
return False
def next_state(self):
"""
Propagates the customer to the next state
using a weighted random choice from the transition probabilities
conditional on the current state.
Returns nothing.
"""
# Below are just dummy probas for testing purposes
#self.state = np.random.choice(['Spices', 'Drinks', 'Fruits', 'Dairy', 'Checkout'], p=[0.2, 0.2, 0.1, 0.2, 0.3])
dairy_array = self.transition_mat[0,:]
drinks_array = self.transition_mat[1,:]
entry_array = self.transition_mat[2,:]
fruit_array = self.transition_mat[3,:]
spices_array = self.transition_mat[4,:]
if self.state == 'dairy':
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=dairy_array)
if self.state == 'drinks':
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=drinks_array)
if self.state == 'entry':
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=entry_array)
if self.state == 'fruit':
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=fruit_array)
if self.state == 'spices':
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=spices_array)
class Customer:
def __init__(self, idn, state, transition_mat):
self.id = idn
self.state = state
self.transition_mat = transition_mat
self.tr_array_dict = {
'dairy' : self.transition_mat[0,:],
'drinks' : self.transition_mat[1,:],
'entry' : self.transition_mat[2,:],
'fruit' : self.transition_mat[3,:],
'spices' : self.transition_mat[4,:]
}
def __repr__(self):
"""
Returns a csv string for that customer.
"""
return f'{self.id};{self.state}'
def is_active(self):
"""
Returns True if the customer has not reached the checkout
for the second time yet, False otherwise.
"""
if self.state != 'checkout':
return True
if self.state == 'checkout':
return False
def next_state(self):
"""
Propagates the customer to the next state
using a weighted random choice from the transition probabilities
conditional on the current state.
Returns nothing.
"""
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])
class SuperMarket:
"""manages multiple Customer instances that are currently in the market.
"""
def __init__(self,transition_matrix):
#List contains the customer objects
self.customers = []
#Timing stuff set to some defults, open and close time get their values from the simulate() method when called
self.open_time = pd.to_datetime('08:00',format='%H:%M')
self.close_time = pd.to_datetime('17:00',format='%H:%M')
self.current_time = pd.to_datetime('08:00',format='%H:%M')
#Customer id counter, so that we can consistently assign ids to new customers
self.last_id = 0
#current and total state during a simulation, total state is initiated like this because it becomes the header of a dataframe
#when returned from results() method, also it needs to be in 1x3 shapre for np.vstack() to work in update_total_state()
self.current_state = np.array([])
self.total_state = np.array(['timestamp','customer_id','customer_location'])
#transition matrix is assigned when initiating the SuperMarket object
self.transition_matrix = transition_matrix
def __repr__(self):
pass
def write_current_state(self):
"""
writes the current state during a simulation. Makes rows with current time, customer.id and customer.state of current customers in the market
"""
self.current_state = np.array([[self.current_time, customer.id, customer.state] for customer in self.customers])
def update_total_state(self):
"""
updates the total state, this is constantly updated by the current state during a simulation which yields the final data from the simulation
can be directly accessed or returned as a neat dataframe by the results() method
"""
self.total_state = np.vstack((self.total_state,self.current_state))
def next_minute(self):
"""propagates all customers to the next state. Adds one minute to current time and updates all customers in the market to their next state
"""
self.current_time += pd.Timedelta(1,'min')
#self.customers = [customer.next_state() for customer in self.customers]
for customer in self.customers:
customer.next_state()
#return get_time()
def add_new_customers(self, n_customers):
"""randomly creates new customers. Adds n_customer number of customers to the current list, they all start at the entry, and assigned
an id that is +1 of the current id. Afterwards updates the last id by the latest customer
"""
self.customers = self.customers + [Customer(self.last_id + 1 + i, 'entry', self.transition_matrix) for i in range(n_customers)]
self.last_id = self.customers[-1].id
def remove_exiting_customers(self):
"""removes every customer that is not active any more. Goes through the customer list and if they are active keeps them,
the ones in checkout are dropped
"""
self.customers = [customer for customer in self.customers if customer.is_active() == True]
def count_checkout(self):
"""
counts the number of customers that are at checkout at the current_state. This would be easier if current_state was a dataframe
but since it is a numpy matrix we return the submatrix where the 3rd row is checkout, then we "pseudo count" them by looking at the shape
"""
row_mask = (self.current_state[:,2] == 'checkout')
return self.current_state[row_mask,:].shape[0]
def simulate(self,initial_customers=20,open_time='8:00',close_time='8:10'):
"""
Simulates the SuperMarket. Gets initial customers, opening time and closing time from the user
"""
self.current_state = np.array([])
self.total_state = np.array(['timestamp','customer_id','customer_location'])
#Timing stuff
self.open_time = pd.to_datetime(open_time,format='%H:%M')
self.close_time = pd.to_datetime(close_time,format='%H:%M')
self.current_time = self.open_time
#We first add initial_customers of customers at the entry
self.add_new_customers(initial_customers)
#We simlate until the market closes
while self.current_time <= self.close_time:
#write the current state and update the total state
self.write_current_state()
self.update_total_state()
#get the number of customers at checkout
n_checkout = self.count_checkout()
#remove the customers who are at checkout
self.remove_exiting_customers()
#advance to next minute (also updates the states of the current customers)
self.next_minute()
#POTENTIAL BUG: this is kind of weird, we shold not need the if statement but if it is not there it somehow adds new customers
#needs a revisit
if n_checkout > 0:
self.add_new_customers(n_checkout)
def results(self):
'''
Returns Simulation results in a DataFrame. Simply converts the total_state numpy matrix to a more friendly dataframe
'''
data = self.total_state[1:,1:]
index = self.total_state[1:,0]
columns = self.total_state[0,1:]
return pd.DataFrame(data=data,index=index,columns=columns)
|
the-stack_106_21650
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "v"
parentdir_prefix = "gr4j-"
versionfile_source = "gr4j/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded variables.
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
return default
return (versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
|
the-stack_106_21651
|
from . import Databricks
class Dashboards(Databricks.Databricks):
def __init__(self, url, token=None):
super().__init__(token)
self._url = url
self._api_type = 'preview/sql'
def listDashboards(self, page_size=None, page=None, order=None, q=None):
if order and (order not in ("name", "created_at")):
return "Order by " + str(order) + " not supported"
endpoint = 'dashboards?'
if page_size: endpoint = endpoint + "page_size="+str(page_size) + "&"
if page: endpoint = endpoint + "page=" + str(page) + "&"
if order: endpoint = endpoint + "order="+order + "&"
if q: endpoint = endpoint + "q="+q + "&"
url = self._set_url(self._url, self._api_type, endpoint)
return self._get(url)
def cloneDashboard(self, dashboard_definition):
endpoint = 'dashboards'
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url, dashboard_definition)
def updateDashboard(self, dashboard_id, dashboard_definition):
endpoint = 'dashboards/' + dashboard_id
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url, dashboard_definition)
def createDashboard(self, name, layout, dashboard_filters_enabled, widgets, is_trashed, is_draft, tags):
endpoint = 'dashboards'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {
"name": name
}
if layout: payload["layout"]=layout
if dashboard_filters_enabled == True:
payload["dashboard_filters_enabled"]=True
else:
payload["dashboard_filters_enabled"]=False
if widgets: payload["widgets"]=widgets
if is_trashed == True:
payload["is_trashed"]=True
else:
payload["is_trashed"]=False
if is_draft == True:
payload["is_draft"]=True
else:
payload["is_draft"]=False
if tags: payload["tags"]=tags
return self._post(url, payload)
def getDashboard(self, dashboard_id):
endpoint = 'dashboards/'+str(dashboard_id)
url = self._set_url(self._url, self._api_type, endpoint)
return self._get(url)
def getDashboardPermissions(self, dashboard_id):
endpoint = 'permissions/dashboards/'+str(dashboard_id)
url = self._set_url(self._url, self._api_type, endpoint)
return self._get(url)
def updateDashboardPermissions(self, dashboard_id, acl):
endpoint = 'permissions/dashboards/'+str(dashboard_id)
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url, acl)
def transferDashboard(self, dashboard_id, new_owner):
endpoint = 'permissions/dashboard/'+str(dashboard_id)+'/transfer'
url = self._set_url(self._url, self._api_type, endpoint)
payload = {"new_owner": new_owner}
return self._post(url, payload)
def deleteDashboard(self, dashboard_id):
endpoint = 'dashboards/'+str(dashboard_id)
url = self._set_url(self._url, self._api_type, endpoint)
return self._delete(url)
def restoreDashboard(self, dashboard_id):
endpoint = 'dashboards/trash/'+str(dashboard_id)
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url)
def createWidget(self, widget_definition):
endpoint = 'widgets'
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url, widget_definition)
def createVisualization(self, vis_definition):
endpoint = 'visualizations'
url = self._set_url(self._url, self._api_type, endpoint)
return self._post(url, vis_definition)
|
the-stack_106_21652
|
#
# Copyright 2021 W. Beck Andrews
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
from mpi4py import MPI
import sys
import muSpectre as msp
import muFFT
import muGrid
class isotropic_tc():
def __init__(self):
self.name = "isotropic_tc"
def initialize_material(self,obj):
material = msp.material.MaterialLinearElastic4_2d.make(obj.cell, "material_small")
interp = obj.interp.energy(obj.phi.array())
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='F')
material.add_pixel(pixel_id, obj.Cx.array()[tuple(pixel)]*interp[tuple(pixel)], obj.Poisson)
return material
def update_material(self, obj):
### set current material properties
interp = obj.interp.energy(obj.phi.array())
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='C')
obj.material.set_youngs_modulus(pixel_id,Cxval*interp[tuple(pixel)])
def get_elastic_coupling(self, obj, strain_result):
lamb_factor = obj.Poisson/(1+obj.Poisson)/(1-2*obj.Poisson)
mu_factor = 1/2/(1+obj.Poisson)
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='F')
strain = np.reshape(strain_result.grad[pixel_id*4:(pixel_id+1)*4],(obj.dim,obj.dim))
obj.strain.array()[:,0,pixel[0],pixel[1]] = strain.flatten()
trace = 0.0
for k in range(0,obj.dim):
trace += strain[k,k]
obj.straineng.array()[tuple(pixel)] = 0.5*obj.Cx.array()[tuple(pixel)]*(2.0*mu_factor*(strain**2).sum()
+ lamb_factor*trace**2)
def get_elastic_energy(self, obj):
return obj.straineng.array()*obj.interp.energy(obj.phi.array())
class anisotropic_tc():
def __init__(self):
self.name = "anisotropic_tc"
def initialize_material(self,obj):
material = msp.material.MaterialPhaseFieldFracture_2d.make(obj.cell, "material_small",obj.ksmall)
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='F')
material.add_pixel(pixel_id, obj.Cx.array()[tuple(pixel)],
obj.Poisson, obj.phi.array()[tuple(pixel)])
return material
def update_material(self,obj):
### set current material properties
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='C')
obj.material.set_phase_field(pixel_id, obj.phi.array()[tuple(pixel)])
### key parts of test system
def get_elastic_coupling(self,obj,strain_result):
lamb_factor = obj.Poisson/(1+obj.Poisson)/(1-2*obj.Poisson)
mu_factor = 1/2/(1+obj.Poisson)
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='F')
strain = np.reshape(strain_result.grad[pixel_id*obj.dim**2:(pixel_id+1)*obj.dim**2],(obj.dim,obj.dim))
obj.strain.array()[:,0,pixel[0],pixel[1]] = strain.flatten()
pstrains = np.linalg.eigvalsh(strain)
obj.straineng.array()[tuple(pixel)] = Cxval*(np.maximum(np.sum(pstrains),0)**2*
lamb_factor*0.5 + np.sum(np.maximum(pstrains,0)**2)*mu_factor)
def get_compressive_energy(self,obj):
lamb_factor = obj.Poisson/(1+obj.Poisson)/(1-2*obj.Poisson)
mu_factor = 1/2/(1+obj.Poisson)
compressive_energy = np.zeros_like(obj.straineng.array())
for pixel, Cxval in np.ndenumerate(obj.Cx.array()):
pixel_id = np.ravel_multi_index(pixel, obj.fftengine.nb_subdomain_grid_pts, order='F')
strain = np.reshape(obj.strain.array()[:,0,pixel[0],pixel[1]],(obj.dim,obj.dim))
pstrains = np.linalg.eigvalsh(strain)
compressive_energy[tuple(pixel)] = Cxval*(np.minimum(np.sum(pstrains),0)**2*lamb_factor*0.5 +
np.sum(np.minimum(pstrains,0)**2)*mu_factor)
return compressive_energy
def get_elastic_energy(self, obj):
return (obj.interp.energy(obj.phi.array())*obj.straineng.array() + self.get_compressive_energy(obj))
|
the-stack_106_21657
|
#!/usr/bin/env python
#####################################################################################
#
# Copyright 2022 Quantinuum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#####################################################################################
"""Functions to generate error models as qiskit NoiseModel."""
import numpy as np
from scipy.linalg import expm
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise import pauli_error, depolarizing_error, coherent_unitary_error
sigmaz = np.array([[1,0],[0,-1]])
def arbitrary_noise(error_dict: dict,
nqubits: int,
act_outcomes: bool = False) -> NoiseModel:
"""
Make arbitrary error model for QV simulations.
Notes:
- All error magnigutes are defined in terms of avg fidelity
except for 'meas' which is failur prob.
Args:
error_dict: dict of errors two include key=name, value=avg. fidelity
nqubits: number of qubits (crosstalk only)
act_outcomes: If true include meas error w/ identity
Returns:
(NoiseModel) qiskit noise model
"""
sq_errors = []
tq_errors = []
# Coherent errors
if 'sq_coh' in error_dict and error_dict['sq_coh'] != 0:
theta = 2*np.arccos(np.sqrt((2 - 3*error_dict['sq_coh'])/2))
uni = expm(-1j * theta * sigmaz/2)
sq_errors.append(coherent_unitary_error(uni))
if 'tq_coh' in error_dict and error_dict['tq_coh'] != 0:
theta = 2*np.arccos(np.sqrt((4 - 5*error_dict['tq_coh'])/4))
uni = expm(-1j * theta * np.kron(sigmaz, sigmaz)/2)
tq_errors.append(coherent_unitary_error(uni))
# Depolarizing errors
if 'sq_dep' in error_dict and error_dict['sq_dep'] != 0:
sq_errors.append(depolarizing_error(2*error_dict['sq_dep'], 1))
if 'tq_dep' in error_dict and error_dict['tq_dep'] != 0:
tq_errors.append(depolarizing_error(4*error_dict['tq_dep']/3, 2))
# Dephasing errorss
if 'sq_dph' in error_dict and error_dict['sq_dph'] != 0:
dph = 3*error_dict['sq_dph']/2
sq_errors.append(pauli_error([('Z', dph), ('I', 1 - dph)]))
if 'tq_dph' in error_dict and error_dict['tq_dph'] != 0:
dph = 1 - np.sqrt(1 - (5/4)*error_dict['tq_dph'])
sq_channel = pauli_error([('Z', dph), ('I', 1 - dph)])
tq_errors.append(sq_channel.tensor(sq_channel))
# Prep errors
if 'prep' in error_dict and error_dict['prep'] != 0:
prep_error = pauli_error(
[('X', error_dict['prep']), ('I', 1 - error_dict['prep'])]
)
# Measurement errors
if 'meas' in error_dict and error_dict['meas'] != 0:
meas_error = pauli_error(
[('X', error_dict['meas']), ('I', 1 - error_dict['meas'])
])
# make noise model
noise_model = NoiseModel()
try:
total_sq = sq_errors[0]
for err in sq_errors[1:]:
total_sq = total_sq.compose(err)
noise_model.add_all_qubit_quantum_error(total_sq, ['u2', 'u3'])
except IndexError:
pass
try:
total_tq = tq_errors[0]
for err in tq_errors[1:]:
total_tq = total_tq.compose(err)
noise_model.add_all_qubit_quantum_error(total_tq, ['cx', 'cz'])
except IndexError:
pass
try:
noise_model.add_all_qubit_quantum_error(meas_error, ['measure'])
except UnboundLocalError:
pass
try:
noise_model.add_all_qubit_quantum_error(prep_error, ['u1'])
except UnboundLocalError:
pass
if act_outcomes and error_dict['meas'] != 0:
noise_model.add_all_qubit_quantum_error(meas_error, 'id')
# include crosstalk errors
if 'sq_cross' in error_dict and error_dict['sq_cross'] != 0:
dep = depolarizing_error(2*error_dict['sq_cross'], 1)
for n in range(nqubits):
noise_model.add_nonlocal_quantum_error(
dep,
['u2', 'u3'],
[n],
[(n + 1) % nqubits,
(n - 1) % nqubits]
)
if 'tq_cross' in error_dict and error_dict['tq_cross'] != 0:
dep = depolarizing_error(2*error_dict['tq_cross'], 1)
for n in range(nqubits):
for m in range(nqubits):
adjacent_list = [
(n+1)%nqubits,
(n-1)%nqubits,
(m+1)%nqubits,
(m-1)%nqubits
]
adjacent_list = [a for a in adjacent_list if a != n and a != m]
noise_model.add_nonlocal_quantum_error(
dep,
['cx', 'cz'],
[n, m],
adjacent_list
)
return noise_model
|
the-stack_106_21658
|
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed.ClockDelta import NetworkTimePrecision
import random
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
class Train(DirectObject):
notify = directNotify.newCategory('Train')
nameId = 0
Sfx_TrainPass = 'phase_10/audio/sfx/CBHQ_TRAIN_pass.ogg'
Sfx_TrainStopStart = 'phase_10/audio/sfx/CBHQ_TRAIN_stopstart.ogg'
LocomotiveFile = 'phase_10/models/cogHQ/CashBotLocomotive'
CarFiles = ['phase_10/models/cogHQ/CashBotBoxCar', 'phase_10/models/cogHQ/CashBotTankCar', 'phase_10/models/cogHQ/CashBotFlatCar']
CarLength = 88
MarkDelta = 15
def __init__(self, trackStartPos, trackEndPos, trackNum, numTotalTracks):
self.trackStartPos = trackStartPos
self.trackEndPos = trackEndPos
self.numCars = len(self.CarFiles)
self.locomotive = loader.loadModel(self.LocomotiveFile)
self.cars = []
self.trainPassingSfx = base.loader.loadSfx(self.Sfx_TrainPass)
self.trainStopStartSfx = base.loader.loadSfx(self.Sfx_TrainStopStart)
self.trainId = trackNum
self.bFlipped = False
if trackStartPos[0] < trackEndPos[0]:
self.locomotive.setHpr(180, 0, 0)
self.bFlipped = True
self.collNodeName = 'CollNode-%s' % self.trainId
self.firstMark = self.MarkDelta / numTotalTracks * trackNum
currentTime = self.__networkTimeInSeconds()
currentRun = int((currentTime - self.firstMark) / self.MarkDelta)
self.lastMark = currentRun * self.MarkDelta + self.firstMark
self.doNextRun(True)
self.hide()
def hide(self):
if self.locomotive:
self.locomotive.reparentTo(hidden)
def show(self):
if self.locomotive:
self.locomotive.reparentTo(render)
def __networkTimeInSeconds(self):
time = globalClockDelta.getRealNetworkTime(bits=32) / NetworkTimePrecision
return time
def doNextRun(self, bFirstRun = False):
if self.locomotive:
if bFirstRun:
nextMark = self.lastMark
else:
nextMark = self.lastMark + self.MarkDelta
self.nextRun.finish()
self.notify.debug('Next mark %s' % nextMark)
currentTime = self.__networkTimeInSeconds()
timeTillNextMark = nextMark - currentTime
self.notify.debug('Time diff %s' % timeTillNextMark)
runNumber = int((nextMark - self.firstMark) / self.MarkDelta)
S = random.getstate()
random.seed(self.trainId + runNumber)
self.nextRun = self.__getNextRun()
random.setstate(S)
self.__startNextRun(timeTillNextMark)
self.lastMark = nextMark
return Task.done
def __startNextRun(self, timeTillMark):
if self.locomotive:
self.__disableCollisions()
if timeTillMark > 0:
self.nextRun = Sequence(Wait(timeTillMark), self.nextRun)
self.nextRun.start()
else:
self.nextRun.start(-1 * timeTillMark)
self.__enableCollisions()
return Task.done
def __cleanupCars(self):
self.__disableCollisions()
for car in self.cars:
car.removeNode()
self.cars = []
def __getCars(self):
self.__cleanupCars()
numCarsThisRun = random.randrange(1, 10)
for nCar in range(numCarsThisRun):
carType = random.randrange(0, self.numCars)
car = loader.loadModel(self.CarFiles[carType])
car.reparentTo(self.locomotive)
car.setPos(self.CarLength * (nCar + 1), 0, 0)
self.cars.append(car)
def __showStart(self):
self.notify.debug('Starting train %s at %s.' % (self.trainId, self.__networkTimeInSeconds()))
def __getNextRun(self):
self.__getCars()
trainShouldStop = random.randrange(0, 4)
nextRun = Sequence(Func(self.__showStart))
if trainShouldStop == 0:
waitTime = 3
totalTime = random.randrange(4, (self.MarkDelta - waitTime) / 2)
sfxStopTime = 4.3
halfway = (self.trackStartPos + self.trackEndPos) / 2
halfway.setX(150)
nextRun.append(Parallel(Sequence(Wait(totalTime - sfxStopTime), SoundInterval(self.trainStopStartSfx, volume=0.5)), Sequence(LerpPosInterval(self.locomotive, totalTime, halfway, self.trackStartPos, blendType='easeInOut'), WaitInterval(waitTime), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, halfway, blendType='easeIn'))))
else:
totalTime = random.randrange(6, self.MarkDelta - 1)
sfxTime = 7
sfxStartTime = totalTime / 2 - sfxTime / 2
if self.bFlipped:
sfxStartTime -= 1
else:
sfxStartTime += 1
nextRun.append(Parallel(Sequence(Wait(sfxStartTime), SoundInterval(self.trainPassingSfx, volume=0.5)), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, self.trackStartPos)))
nextRun.append(Func(self.doNextRun))
return nextRun
def delete(self):
self.__cleanupCars()
self.locomotive.removeNode()
self.locomotive = None
self.nextRun.finish()
self.nextRun = None
del self.trainPassingSfx
del self.trainStopStartSfx
return
def uniqueName(self, name):
Train.nameId += 1
return name + '-%d' % Train.nameId
def __enableCollisions(self):
allColls = self.locomotive.findAllMatches('**/+CollisionNode')
for car in self.cars:
carColls = car.findAllMatches('**/+CollisionNode')
allColls += carColls
for collNode in allColls:
collNode.setName(self.collNodeName)
collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.collNodeName, self.__handleCollisionSphereEnter)
def __disableCollisions(self):
self.ignore('enter' + self.collNodeName)
def __handleCollisionSphereEnter(self, collEntry = None):
base.localAvatar.b_squish(10)
|
the-stack_106_21659
|
import argparse
import rebuild_categories as rbldc_ctg
import render_categories as rndr_ctg
# Definition of arguments program
parser = argparse.ArgumentParser(description='An eBay category tree displayer')
parser.add_argument('--rebuild', action='store_true', default=False, \
help='Downloads a category tree from eBay and store it locally.')
parser.add_argument('--render', nargs=1, action='store', \
help='Renders a category tree view.')
args = parser.parse_args()
if args.rebuild:
from_api = rbldc_ctg.RebuildCategories()
from_api.rebuild()
if args.render:
display = rndr_ctg.RenderCategories()
display.render(args.render[0])
|
the-stack_106_21660
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
import monai
from monai.data import CSVSaver, NiftiDataset
from monai.transforms import AddChannel, Compose, Resize, ScaleIntensity, ToTensor
def main():
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
images = [
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
]
# 2 binary labels for gender classification: man and woman
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
# Define transforms for image
val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])
# Define nifti dataset
val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False)
# create a validation data loader
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
# Create DenseNet121
device = torch.device("cuda:0")
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
model.load_state_dict(torch.load("best_metric_model.pth"))
model.eval()
with torch.no_grad():
num_correct = 0.0
metric_count = 0
saver = CSVSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
val_outputs = model(val_images).argmax(dim=1)
value = torch.eq(val_outputs, val_labels)
metric_count += len(value)
num_correct += value.sum().item()
saver.save_batch(val_outputs, val_data[2])
metric = num_correct / metric_count
print("evaluation metric:", metric)
saver.finalize()
if __name__ == "__main__":
main()
|
the-stack_106_21663
|
'''
Created on Apr 15, 2016
Evaluate the performance of Top-K recommendation:
Protocol: leave-1-out evaluation
Measures: Hit Ratio and NDCG
(more details are in: Xiangnan He, et al. Fast Matrix Factorization for Online Recommendation with Implicit Feedback. SIGIR'16)
@author: hexiangnan
'''
import math
import heapq # for retrieval topK
import multiprocessing
import numpy as np
from time import time
from keras.models import Model
# from numba import jit, autojit
# Global variables that are shared across processes
_model = None
_testRatings = None
_testNegatives = None
_K = None
def evaluate_model(model, testRatings, testNegatives, K, num_thread):
"""
Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
Return: score of each test rating.
"""
global _model
global _testRatings
global _testNegatives
global _K
_model = model
_testRatings = testRatings
_testNegatives = testNegatives
_K = K
hits, ndcgs = [], []
if (num_thread > 1): # Multi-thread
pool = multiprocessing.Pool(processes=num_thread)
res = pool.map(eval_one_rating, range(len(_testRatings)))
pool.close()
pool.join()
hits = [r[0] for r in res]
ndcgs = [r[1] for r in res]
return (hits, ndcgs)
# Single thread
for idx in range(len(_testRatings)):
(hr, ndcg) = eval_one_rating(idx)
hits.append(hr)
ndcgs.append(ndcg)
return (hits, ndcgs)
def eval_one_rating(idx):
rating = _testRatings[idx]
items = _testNegatives[idx]
u = rating[0]
gtItem = rating[1]
items.append(gtItem)
# Get prediction scores
map_item_score = {}
users = np.full(len(items), u, dtype='int32')
predictions = _model.predict([users, np.array(items)],
batch_size=100, verbose=0)
# medium_model = Model(inputs=_model.input, outputs=_model.get_layer('user_input').output)
# embeddings_output = medium_model.predict([users, np.array(items)])
# print(embeddings_output)
for i in range(len(items)):
item = items[i]
map_item_score[item] = predictions[i]
items.pop()
# Evaluate top rank list
ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get)
hr = getHitRatio(ranklist, gtItem)
ndcg = getNDCG(ranklist, gtItem)
return (hr, ndcg)
def getHitRatio(ranklist, gtItem):
for item in ranklist:
if item == gtItem:
return 1
return 0
def getNDCG(ranklist, gtItem):
for i in range(len(ranklist)):
item = ranklist[i]
if item == gtItem:
return math.log(2) / math.log(i + 2)
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.