filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_17860
|
from __future__ import print_function
import sys
sys.path.append("/home/ec2-user/anaconda3/lib/python3.5/site-packages/mysql/connector/__init__.py")
import mysql.connector
import requests
import urllib.request
import json
from pprint import pprint
import ssl
import time
ssl._create_default_https_context = ssl._create_unverified_context
cnx = mysql.connector.connect(user='Manjunathsk92', password='Manjunathsk92',
host='se-project-db.cuph6akhej5q.us-east-2.rds.amazonaws.com',
database='projectdb')
cursor = cnx.cursor()
print("connect")
delete_query=("DELETE from dublin_bikes_current_data")
insert_data = ("INSERT INTO dublin_bikes_current_data "
"(station_number, station_name, station_address, insert_timestamp, position_latitude, position_longitude, banking, bonus, status, bike_stands, available_bike_stands, available_bikes, last_update, date_time )"
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s)")
update_date=("UPDATE Dublin_bikes_week_data"
" set ref_id = from_unixtime(%s))")
with urllib.request.urlopen("https://api.jcdecaux.com/vls/v1/stations?contract=dublin&apiKey=afbf7d96c89b1b752484c4f3a1aa7056e323587e") as url:
print("api")
data = json.loads(url.read().decode('utf-8'))
print("after api call")
epoch_time=time.time()
current_time=time.strftime("%Y-%m-%d %H:%M:%S")
cursor.execute(delete_query)
print("after delete query")
for i in range(len(data)):
data_parsed = (data[i]['number'], data[i]['name'], data[i]['address'], epoch_time, data[i]['position']['lat'], data[i]['position']['lng'], data[i]['banking'], data[i]['bonus'], data[i]['status'], data[i]['bike_stands'], data[i]['available_bike_stands'], data[i]['available_bikes'], data[i]['last_update'], current_time)
# print("after data_parsed")
cursor.execute(insert_data, data_parsed)
print("after insert date")
#cursor.execute(update_date, current_time/1000)
#print("after update date")
cnx.commit()
cursor.close()
cnx.close()
print("end")
|
the-stack_106_17861
|
from model.group_address import Address_data
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*7
return prefix + "".join([random.choice(symbols) for x in range(random.randrange(maxlen))])
def random_numbers(maxlen):
numbers = string.digits + " "*2 + "(" + ")" + "-"
return "".join([random.choice(numbers) for x in range(maxlen)])
def random_mail(domen, maxlen):
value = string.ascii_letters + string.digits
return "".join([random.choice(value) for x in range(random.randrange(maxlen))]) + domen
testdata = [
Address_data(firstname=random_string("firstname", 20), middlename=random_string("", 1),
lastname=random_string("lastname", 20), nickname=random_string("nickname", 20),
company=random_string("company", 20), address=random_string("address", 20),
home_phone=random_numbers(10), mobile_phone=random_numbers(10), work_phone=random_numbers(10),
fax_phone=random_numbers(10), email_1=random_mail("@mail.ru", 10), email_2=random_mail("@mail.ru", 10),
home_page=random_string("page", 15))
for x in range(n)
]
constant = [
Address_data(firstname="firstname", middlename="middlename", lastname="lastname", nickname="nickname",
company="company", address="address", home_phone="7874177", mobile_phone="784541212",
work_phone="8776464321", fax_phone="874845421", email_1="[email protected]", email_2="[email protected]",
home_page="www.page.com")
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
#with open(file, "w") as out:
# out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2))
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
the-stack_106_17862
|
"""
This is new module that we intend to GROW from test_events.py.
It will contain schemas (aka validators) for Zulip events.
Right now it's only intended to be used by test code.
"""
from typing import Any, Dict, Sequence, Set, Tuple, Union
from zerver.lib.topic import ORIG_TOPIC, TOPIC_LINKS, TOPIC_NAME
from zerver.lib.validator import (
Validator,
check_bool,
check_dict,
check_dict_only,
check_int,
check_int_in,
check_list,
check_none_or,
check_string,
check_union,
check_url,
equals,
)
from zerver.models import Realm, Stream, UserProfile
# These fields are used for "stream" events, and are included in the
# larger "subscription" events that also contain personal settings.
basic_stream_fields = [
("description", check_string),
("first_message_id", check_none_or(check_int)),
("history_public_to_subscribers", check_bool),
("invite_only", check_bool),
("is_announcement_only", check_bool),
("is_web_public", check_bool),
("message_retention_days", equals(None)),
("name", check_string),
("rendered_description", check_string),
("stream_id", check_int),
("stream_post_policy", check_int),
]
subscription_fields: Sequence[Tuple[str, Validator[object]]] = [
*basic_stream_fields,
("audible_notifications", check_none_or(check_bool)),
("color", check_string),
("desktop_notifications", check_none_or(check_bool)),
("email_address", check_string),
("email_notifications", check_none_or(check_bool)),
("in_home_view", check_bool),
("is_muted", check_bool),
("pin_to_top", check_bool),
("push_notifications", check_none_or(check_bool)),
("stream_weekly_traffic", check_none_or(check_int)),
("wildcard_mentions_notify", check_none_or(check_bool)),
]
def check_events_dict(
required_keys: Sequence[Tuple[str, Validator[object]]],
optional_keys: Sequence[Tuple[str, Validator[object]]] = [],
) -> Validator[Dict[str, object]]:
"""
This is just a tiny wrapper on check_dict, but it provides
some minor benefits:
- mark clearly that the schema is for a Zulip event
- make sure there's a type field
- add id field automatically
- sanity check that we have no duplicate keys (we
should just make check_dict do that, eventually)
"""
rkeys = [key[0] for key in required_keys]
okeys = [key[0] for key in optional_keys]
keys = rkeys + okeys
assert len(keys) == len(set(keys))
assert "type" in rkeys
assert "id" not in keys
return check_dict_only(
required_keys=list(required_keys) + [("id", check_int)],
optional_keys=optional_keys,
)
check_add_or_remove = check_union(
[
# force vertical
equals("add"),
equals("remove"),
]
)
check_value = check_union(
[
# force vertical formatting
check_bool,
check_int,
check_string,
]
)
check_optional_value = check_union(
[
# force vertical formatting
check_bool,
check_int,
check_string,
equals(None),
]
)
check_alert_words = check_events_dict(
required_keys=[
# force vertical formatting
("type", equals("alert_words")),
("alert_words", check_list(check_string)),
]
)
_check_custom_profile_field = check_dict_only(
required_keys=[
("id", check_int),
("type", check_int),
("name", check_string),
("hint", check_string),
("field_data", check_string),
("order", check_int),
]
)
check_custom_profile_fields = check_events_dict(
required_keys=[
("type", equals("custom_profile_fields")),
("op", equals("add")),
("fields", check_list(_check_custom_profile_field)),
]
)
check_invites_changed = check_events_dict(
required_keys=[
# the most boring event...no metadata
("type", equals("invites_changed")),
]
)
message_fields = [
("avatar_url", check_none_or(check_string)),
("client", check_string),
("content", check_string),
("content_type", equals("text/html")),
("display_recipient", check_string),
("id", check_int),
("is_me_message", check_bool),
("reactions", check_list(check_dict([]))),
("recipient_id", check_int),
("sender_realm_str", check_string),
("sender_email", check_string),
("sender_full_name", check_string),
("sender_id", check_int),
("stream_id", check_int),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(check_string)),
("submessages", check_list(check_dict([]))),
("timestamp", check_int),
("type", check_string),
]
check_message = check_events_dict(
required_keys=[
("type", equals("message")),
("flags", check_list(check_string)),
("message", check_dict_only(message_fields)),
]
)
# We will eventually just send user_ids.
_check_reaction_user = check_dict_only(
required_keys=[
# force vertical
("email", check_string),
("full_name", check_string),
("user_id", check_int),
]
)
_check_reaction = check_events_dict(
required_keys=[
("type", equals("reaction")),
("op", check_add_or_remove),
("message_id", check_int),
("emoji_name", check_string),
("emoji_code", check_string),
("reaction_type", check_string),
("user_id", check_int),
("user", _check_reaction_user),
]
)
def check_reaction(var_name: str, event: Dict[str, Any], op: str) -> None:
_check_reaction(var_name, event)
assert event["op"] == op
_check_bot_services_outgoing = check_dict_only(
required_keys=[
# force vertical
("base_url", check_url),
("interface", check_int),
("token", check_string),
]
)
# We use a strict check here, because our tests
# don't specifically focus on seeing how
# flexible we can make the types be for config_data.
_ad_hoc_config_data_schema = equals(dict(foo="bar"))
_check_bot_services_embedded = check_dict_only(
required_keys=[
# force vertical
("service_name", check_string),
("config_data", _ad_hoc_config_data_schema),
]
)
# Note that regular bots just get an empty list of services,
# so the sub_validator for check_list won't matter for them.
_check_bot_services = check_list(
check_union(
[
# force vertical
_check_bot_services_outgoing,
_check_bot_services_embedded,
]
),
)
_check_bot = check_dict_only(
required_keys=[
("user_id", check_int),
("api_key", check_string),
("avatar_url", check_string),
("bot_type", check_int),
("default_all_public_streams", check_bool),
("default_events_register_stream", check_none_or(check_string)),
("default_sending_stream", check_none_or(check_string)),
("email", check_string),
("full_name", check_string),
("is_active", check_bool),
("owner_id", check_int),
("services", _check_bot_services),
]
)
_check_realm_bot_add = check_events_dict(
required_keys=[
# force vertical
("type", equals("realm_bot")),
("op", equals("add")),
("bot", _check_bot),
]
)
def check_realm_bot_add(var_name: str, event: Dict[str, Any],) -> None:
_check_realm_bot_add(var_name, event)
bot_type = event["bot"]["bot_type"]
services_field = f"{var_name}['bot']['services']"
services = event["bot"]["services"]
if bot_type == UserProfile.DEFAULT_BOT:
equals([])(services_field, services)
elif bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
check_list(_check_bot_services_outgoing, length=1)(services_field, services)
elif bot_type == UserProfile.EMBEDDED_BOT:
check_list(_check_bot_services_embedded, length=1)(services_field, services)
else:
raise AssertionError(f"Unknown bot_type: {bot_type}")
_check_bot_for_delete = check_dict_only(
required_keys=[
# for legacy reasons we have a dict here
# with only one key
("user_id", check_int),
]
)
check_realm_bot_delete = check_events_dict(
required_keys=[
("type", equals("realm_bot")),
("op", equals("delete")),
("bot", _check_bot_for_delete),
]
)
_check_bot_for_remove = check_dict_only(
required_keys=[
# Why does remove have full_name but delete doesn't?
# Why do we have both a remove and a delete event
# for bots? I don't know the answer as I write this.
("full_name", check_string),
("user_id", check_int),
]
)
check_realm_bot_remove = check_events_dict(
required_keys=[
("type", equals("realm_bot")),
("op", equals("remove")),
("bot", _check_bot_for_remove),
]
)
_check_bot_for_update = check_dict_only(
required_keys=[
# force vertical
("user_id", check_int),
],
optional_keys=[
("api_key", check_string),
("avatar_url", check_string),
("default_all_public_streams", check_bool),
("default_events_register_stream", check_none_or(check_string)),
("default_sending_stream", check_none_or(check_string)),
("full_name", check_string),
("owner_id", check_int),
("services", _check_bot_services),
],
)
_check_realm_bot_update = check_events_dict(
required_keys=[
("type", equals("realm_bot")),
("op", equals("update")),
("bot", _check_bot_for_update),
]
)
def check_realm_bot_update(var_name: str, event: Dict[str, Any], field: str,) -> None:
# Check the overall schema first.
_check_realm_bot_update(var_name, event)
assert {"user_id", field} == set(event["bot"].keys())
_check_plan_type_extra_data = check_dict_only(
required_keys=[
# force vertical
("upload_quota", check_int),
]
)
"""
realm/update events are flexible for values;
we will use a more strict checker to check
types in a context-specific manner
"""
_check_realm_update = check_events_dict(
required_keys=[
("type", equals("realm")),
("op", equals("update")),
("property", check_string),
("value", check_value),
],
optional_keys=[
# force vertical
("extra_data", _check_plan_type_extra_data),
],
)
def check_realm_update(var_name: str, event: Dict[str, Any], prop: str,) -> None:
"""
Realm updates have these two fields:
property
value
We check not only the basic schema, but also that
the value people actually matches the type from
Realm.property_types that we have configured
for the property.
"""
_check_realm_update(var_name, event)
assert prop == event["property"]
value = event["value"]
if prop == "plan_type":
assert isinstance(value, int)
assert "extra_data" in event.keys()
return
assert "extra_data" not in event.keys()
if prop in ["notifications_stream_id", "signup_notifications_stream_id"]:
assert isinstance(value, int)
return
property_type = Realm.property_types[prop]
if property_type in (bool, int, str):
assert isinstance(value, property_type)
elif property_type == (int, type(None)):
assert isinstance(value, int)
elif property_type == (str, type(None)):
assert isinstance(value, str)
else:
raise AssertionError(f"Unexpected property type {property_type}")
avatar_fields = {
"avatar_source",
"avatar_url",
"avatar_url_medium",
"avatar_version",
}
_check_custom_profile_field = check_dict_only(
required_keys=[
# vertical formatting
("id", check_int),
("value", check_string),
],
optional_keys=[
# vertical formatting
("rendered_value", check_string),
],
)
_check_realm_user_person = check_dict_only(
required_keys=[
# vertical formatting
("user_id", check_int),
],
optional_keys=[
("avatar_source", check_string),
("avatar_url", check_none_or(check_string)),
("avatar_url_medium", check_none_or(check_string)),
("avatar_version", check_int),
("bot_owner_id", check_int),
("custom_profile_field", _check_custom_profile_field),
("delivery_email", check_string),
("full_name", check_string),
("role", check_int_in(UserProfile.ROLE_TYPES)),
("email", check_string),
("user_id", check_int),
("timezone", check_string),
],
)
_check_realm_user_update = check_events_dict(
required_keys=[
("type", equals("realm_user")),
("op", equals("update")),
("person", _check_realm_user_person),
]
)
def check_realm_user_update(
var_name: str, event: Dict[str, Any], optional_fields: Set[str],
) -> None:
_check_realm_user_update(var_name, event)
keys = set(event["person"].keys()) - {"user_id"}
assert optional_fields == keys
check_stream_create = check_events_dict(
required_keys=[
("type", equals("stream")),
("op", equals("create")),
("streams", check_list(check_dict_only(basic_stream_fields))),
]
)
_check_stream_update = check_events_dict(
required_keys=[
("type", equals("stream")),
("op", equals("update")),
("property", check_string),
("value", check_optional_value),
("name", check_string),
("stream_id", check_int),
],
optional_keys=[
("rendered_description", check_string),
("history_public_to_subscribers", check_bool),
],
)
def check_stream_update(var_name: str, event: Dict[str, Any],) -> None:
_check_stream_update(var_name, event)
prop = event["property"]
value = event["value"]
extra_keys = set(event.keys()) - {
"id",
"type",
"op",
"property",
"value",
"name",
"stream_id",
}
if prop == "description":
assert extra_keys == {"rendered_description"}
assert isinstance(value, str)
elif prop == "email_address":
assert extra_keys == set()
assert isinstance(value, str)
elif prop == "invite_only":
assert extra_keys == {"history_public_to_subscribers"}
assert isinstance(value, bool)
elif prop == "message_retention_days":
assert extra_keys == set()
if value is not None:
assert isinstance(value, int)
elif prop == "name":
assert extra_keys == set()
assert isinstance(value, str)
elif prop == "stream_post_policy":
assert extra_keys == set()
assert value in Stream.STREAM_POST_POLICY_TYPES
else:
raise AssertionError(f"Unknown property: {prop}")
check_submessage = check_events_dict(
required_keys=[
("type", equals("submessage")),
("message_id", check_int),
("submessage_id", check_int),
("sender_id", check_int),
("msg_type", check_string),
("content", check_string),
]
)
_check_single_subscription = check_dict_only(
required_keys=subscription_fields,
optional_keys=[
# force vertical
("subscribers", check_list(check_int)),
],
)
_check_subscription_add = check_events_dict(
required_keys=[
("type", equals("subscription")),
("op", equals("add")),
("subscriptions", check_list(_check_single_subscription)),
],
)
def check_subscription_add(
var_name: str, event: Dict[str, Any], include_subscribers: bool,
) -> None:
_check_subscription_add(var_name, event)
for sub in event["subscriptions"]:
if include_subscribers:
assert "subscribers" in sub.keys()
else:
assert "subscribers" not in sub.keys()
check_subscription_peer_add = check_events_dict(
required_keys=[
("type", equals("subscription")),
("op", equals("peer_add")),
("user_id", check_int),
("stream_id", check_int),
]
)
check_subscription_peer_remove = check_events_dict(
required_keys=[
("type", equals("subscription")),
("op", equals("peer_remove")),
("user_id", check_int),
("stream_id", check_int),
]
)
_check_remove_sub = check_dict_only(
required_keys=[
# We should eventually just return stream_id here.
("name", check_string),
("stream_id", check_int),
]
)
check_subscription_remove = check_events_dict(
required_keys=[
("type", equals("subscription")),
("op", equals("remove")),
("subscriptions", check_list(_check_remove_sub)),
]
)
_check_typing_person = check_dict_only(
required_keys=[
# we should eventually just send user_id
("email", check_string),
("user_id", check_int),
]
)
check_typing_start = check_events_dict(
required_keys=[
("type", equals("typing")),
("op", equals("start")),
("sender", _check_typing_person),
("recipients", check_list(_check_typing_person)),
]
)
_check_update_display_settings = check_events_dict(
required_keys=[
("type", equals("update_display_settings")),
("setting_name", check_string),
("setting", check_value),
("user", check_string),
],
optional_keys=[
# force vertical
("language_name", check_string),
],
)
def check_update_display_settings(var_name: str, event: Dict[str, Any],) -> None:
"""
Display setting events have a "setting" field that
is more specifically typed according to the
UserProfile.property_types dictionary.
"""
_check_update_display_settings(var_name, event)
setting_name = event["setting_name"]
setting = event["setting"]
setting_type = UserProfile.property_types[setting_name]
assert isinstance(setting, setting_type)
if setting_name == "default_language":
assert "language_name" in event.keys()
else:
assert "language_name" not in event.keys()
_check_update_global_notifications = check_events_dict(
required_keys=[
("type", equals("update_global_notifications")),
("notification_name", check_string),
("setting", check_value),
("user", check_string),
]
)
def check_update_global_notifications(
var_name: str, event: Dict[str, Any], desired_val: Union[bool, int, str],
) -> None:
"""
See UserProfile.notification_setting_types for
more details.
"""
_check_update_global_notifications(var_name, event)
setting_name = event["notification_name"]
setting = event["setting"]
assert setting == desired_val
setting_type = UserProfile.notification_setting_types[setting_name]
assert isinstance(setting, setting_type)
update_message_required_fields = [
("type", equals("update_message")),
("user_id", check_int),
("edit_timestamp", check_int),
("message_id", check_int),
]
update_message_content_fields = [
("content", check_string),
("is_me_message", check_bool),
("mention_user_ids", check_list(check_int)),
("orig_content", check_string),
("orig_rendered_content", check_string),
("presence_idle_user_ids", check_list(check_int)),
("prev_rendered_content_version", check_int),
("prior_mention_user_ids", check_list(check_int)),
("push_notify_user_ids", check_list(check_int)),
("rendered_content", check_string),
("stream_email_user_ids", check_list(check_int)),
("stream_push_user_ids", check_list(check_int)),
("wildcard_mention_user_ids", check_list(check_int)),
]
update_message_topic_fields = [
("flags", check_list(check_string)),
("message_ids", check_list(check_int)),
("new_stream_id", check_int),
(ORIG_TOPIC, check_string),
("propagate_mode", check_string),
("stream_id", check_int),
("stream_name", check_string),
(TOPIC_LINKS, check_list(check_string)),
(TOPIC_NAME, check_string),
]
update_message_optional_fields = (
update_message_content_fields + update_message_topic_fields
)
# The schema here does not include the "embedded"
# variant of update_message; it is for message
# and topic editing.
_check_update_message = check_events_dict(
required_keys=update_message_required_fields,
optional_keys=update_message_optional_fields,
)
def check_update_message(
var_name: str,
event: Dict[str, Any],
has_content: bool,
has_topic: bool,
has_new_stream_id: bool,
) -> None:
# Always check the basic schema first.
_check_update_message(var_name, event)
actual_keys = set(event.keys())
expected_keys = {"id"}
expected_keys.update(tup[0] for tup in update_message_required_fields)
if has_content:
expected_keys.update(tup[0] for tup in update_message_content_fields)
if has_topic:
expected_keys.update(tup[0] for tup in update_message_topic_fields)
if not has_new_stream_id:
expected_keys.discard("new_stream_id")
assert expected_keys == actual_keys
check_update_message_embedded = check_events_dict(
required_keys=[
("type", equals("update_message")),
("flags", check_list(check_string)),
("content", check_string),
("message_id", check_int),
("message_ids", check_list(check_int)),
("rendered_content", check_string),
("sender", check_string),
]
)
_check_update_message_flags = check_events_dict(
required_keys=[
("type", equals("update_message_flags")),
("operation", check_add_or_remove),
("flag", check_string),
("messages", check_list(check_int)),
("all", check_bool),
]
)
def check_update_message_flags(
var_name: str, event: Dict[str, Any], operation: str
) -> None:
_check_update_message_flags(var_name, event)
assert event["operation"] == operation
_check_group = check_dict_only(
required_keys=[
("id", check_int),
("name", check_string),
("members", check_list(check_int)),
("description", check_string),
]
)
check_user_group_add = check_events_dict(
required_keys=[
("type", equals("user_group")),
("op", equals("add")),
("group", _check_group),
]
)
check_user_status = check_events_dict(
required_keys=[
("type", equals("user_status")),
("user_id", check_int),
("away", check_bool),
("status_text", check_string),
]
)
|
the-stack_106_17863
|
from distutils.core import setup
classes = """
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Topic :: System :: Logging
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Operating System :: POSIX :: Linux
"""
classifiers = [s.strip() for s in classes.split("\n") if s]
setup(
name="zutil",
version="1.0.3",
description="Utilities used for generating zCFD control dictionaries",
author="Zenotech",
author_email="[email protected]",
license="MIT",
url="https://zcfd.zenotech.com/",
classifiers=classifiers,
project_urls={"Source Code": "https://github.com/zCFD/zutil/"},
packages=["zutil", "zutil.post", "zutil.analysis", "zutil.plot"],
install_requires=[
"future",
"ipython",
"fabric>=2.5",
"ipywidgets",
"matplotlib",
"numpy",
"pandas",
"PyYAML",
"dill",
],
extras_require={"mpi": ["mpi4py"]},
)
|
the-stack_106_17865
|
"""Initialize the NCBI subpackage, which can contain multiple python modules (files)."""
from pkg_resources import get_distribution, DistributionNotFound
__project__ = 'PyDkBio'
__version__ = None # required for initial installation
try:
__version__ = get_distribution('PyDkBio').version
except DistributionNotFound:
VERSION = __project__ + '-' + '(local)'
else:
VERSION = __project__ + '-' + __version__
|
the-stack_106_17867
|
from queue import Queue
from termcolor import colored
from com.shbak.effective_python._01_example._55_queue_for_thread.main import ClosableQueue, StoppableWorker
from com.shbak.effective_python._01_example._56_when_need_concurrent.main import game_logic, count_neighbors, Grid, \
ALIVE
def game_logic_thread(item):
y, x, state, neighbors = item
try:
next_state = game_logic(state, neighbors)
except Exception as e:
next_state = e
return (y, x, next_state)
class ColumnPrinter():
deilmeter = '|\n'
def __init__(self):
self.list = list()
def __call__(self):
return self.list
def __str__(self):
return '---'
def thread_and_queue_test():
in_queue = ClosableQueue()
out_queue = ClosableQueue()
threads = []
for _ in range(5):
thread = StoppableWorker(game_logic_thread, in_queue, out_queue)
thread.start()
threads.append(thread)
grid = Grid(5, 9)
grid.set(0, 3, ALIVE)
grid.set(1, 4, ALIVE)
grid.set(2, 2, ALIVE)
grid.set(2, 3, ALIVE)
grid.set(2, 4, ALIVE)
columns = []
for i in range(5):
columns.append(str(grid))
grid = simulate_pipeline(grid, in_queue, out_queue)
print(colored(f'{columns}', 'green'))
for thread in threads:
in_queue.close()
for thread in threads:
thread.join()
class SimulationError(Exception):
pass
def simulate_pipeline(grid, in_queue, out_queue):
for y in range(grid.height):
for x in range(grid.width):
state = grid.get(y, x)
neighbors = count_neighbors(y, x, grid.get)
in_queue.put((y, x, state, neighbors)) # fan out
in_queue.join()
out_queue.close()
next_grid = Grid(grid.height, grid.width)
for item in out_queue:
y, x, next_state = item
if isinstance(next_state, Exception):
raise SimulationError(y, x) from next_state
next_grid.set(y, x, next_state)
return next_grid
if __name__ == '__main__':
thread_and_queue_test()
pass
|
the-stack_106_17868
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from transformer import Embedding
import re
def AMREmbedding(vocab, embedding_dim, pretrained_file=None, amr=False, dump_file=None):
# char_vocab-vocabs['concept_char']
# char_dim-concept_char_dim-32
if pretrained_file is None:
# Embedding[vocab.size, embedding_dim, vocab.padding_idx(id)]
return Embedding(vocab.size, embedding_dim, vocab.padding_idx)
tokens_to_keep = set()
for idx in range(vocab.size):
token = vocab.idx2token(idx)
# TODO: Is there a better way to do this? Currently we have a very specific 'amr' param.
if amr:
token = re.sub(r'-\d\d$', '', token)
tokens_to_keep.add(token)
embeddings = {}
if dump_file is not None:
fo = open(dump_file, 'w', encoding='utf8')
with open(pretrained_file, encoding='utf8') as embeddings_file:
for line in embeddings_file.readlines():
fields = line.rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
continue
token = fields[0]
if token in tokens_to_keep:
if dump_file is not None:
fo.write(line)
vector = np.asarray(fields[1:], dtype='float32')
embeddings[token] = vector
if dump_file is not None:
fo.close()
all_embeddings = np.asarray(list(embeddings.values()))
print ('pretrained', all_embeddings.shape)
embeddings_mean = float(np.mean(all_embeddings))
embeddings_std = float(np.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
embedding_matrix = torch.FloatTensor(vocab.size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
for i in range(vocab.size):
token = vocab.idx2token(i)
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
else:
if amr:
normalized_token = re.sub(r'-\d\d$', '', token)
if normalized_token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[normalized_token])
embedding_matrix[vocab.padding_idx].fill_(0.)
return nn.Embedding.from_pretrained(embedding_matrix, freeze=False)
class RelationEncoder(nn.Module):
def __init__(self, vocab, rel_dim, embed_dim, hidden_size, num_layers, dropout, bidirectional=True):
# relation_encoder-RelationEncoder
# vocabs['relation']. rel_dim-100, embed_dim-512, hidden_size-256,
# num_layers-2, dropout-0.2
super(RelationEncoder, self).__init__()
self.vocab = vocab
self.embed_dim = embed_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
# rel_embed-Embedding[n, 100]
self.rel_embed = AMREmbedding(vocab, rel_dim)
# GRU[100, 256] /2层, 双向, 池化
self.rnn = nn.GRU(
input_size=rel_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout if num_layers > 1 else 0.,
bidirectional=bidirectional
)
# tot_dim-512
tot_dim = 2 * hidden_size if bidirectional else hidden_size
self.out_proj = nn.Linear(tot_dim, embed_dim)
# out_proj-Linear [512, 512]
def reset_parameters(self):
nn.init.normal_(self.out_proj.weight, std=0.02)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, src_tokens, src_lengths):
# inp['relation_bank'], inp['relation_length']
# src_tokens-[seq_len, bsz]
seq_len, bsz = src_tokens.size()
# sorted_src_lengths-按relation长度降序排序
# indices-降序索引
sorted_src_lengths, indices = torch.sort(src_lengths, descending=True)
# 按长度排序好的sequence
# sorted_src_tokens-[seq_len, bsz]
sorted_src_tokens = src_tokens.index_select(1, indices)
# rel_embed-Embedding[n, 100]
# x-[seq_len, bsz, 100]
x = self.rel_embed(sorted_src_tokens)
# x-[seq_len, bsz, 100] 池化
x = F.dropout(x, p=self.dropout, training=self.training)
# 变长GRU前padded_sequence
packed_x = nn.utils.rnn.pack_padded_sequence(x, sorted_src_lengths.data.tolist())
if self.bidirectional:
# state_size - 4, batch_size, 256 [4, batch_size, 256]
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
# 创建[4, batch_size, 256]零值矩阵
h0 = x.data.new(*state_size).zero_()
# self.rnn = nn.GRU(
# input_size=rel_dim,
# hidden_size=hidden_size,
# num_layers=num_layers,
# dropout=self.dropout if num_layers > 1 else 0.,
# bidirectional=bidirectional
# )
# final_h-[4, batch_size, 256]
_, final_h = self.rnn(packed_x, h0)
if self.bidirectional:
def combine_bidir(outs):
# final_h-[4, batch_size, 256]-前向后向分开[2, 2, batch_size, 256]-两层特征放一起(2, 256)[2, batch_size, 2, 256]
# final_h-[2, batch_size, 512]
return outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous().view(self.num_layers, bsz, -1)
# 合并不同层特征
final_h = combine_bidir(final_h)
# 恢复原有排序[2, batch_size, 512]
_, positions = torch.sort(indices)
final_h = final_h.index_select(1, positions) # num_layers x bsz x hidden_size
# out_proj-Linear [512, 512]
# 取最后一层[batch_size, 512]进行全连接
# output-[batch_size, 512]
output = self.out_proj(final_h[-1])
return output
class TokenEncoder(nn.Module):
def __init__(self, token_vocab, char_vocab, char_dim, token_dim, embed_dim, filters, char2token_dim, dropout, pretrained_file=None):
# concept_encoder-TokenEncoder
# token_vocab-vocabs['concept']
# char_vocab-vocabs['concept_char']
# char_dim-concept_char_dim-32
# token_dim-concept_dim-300
# embed_dim-512
# filters-cnn_filters-[(3, 256)]
# char2token_dim-128
# dropout-0.2
# pretrained_file-预训练路径
super(TokenEncoder, self).__init__()
# char_vocab-vocabs['concept_char']
# char_dim-concept_char_dim-32
# nn.Embedding(n, 32)
self.char_embed = AMREmbedding(char_vocab, char_dim)
# token_vocab-vocabs['concept']
# token_dim-concept_dim-300
# pretrained_file-预训练路径 None
# nn.Embedding(n, 300)
self.token_embed = AMREmbedding(token_vocab, token_dim, pretrained_file)
# filters-cnn_filters-[(3, 256)]
# char_dim-concept_char_dim-32
# char2token_dim-128
self.char2token = CNNEncoder(filters, char_dim, char2token_dim)
# tot_dim-128+300
tot_dim = char2token_dim + token_dim
# out_proj-Linear[428, 512]
self.out_proj = nn.Linear(tot_dim, embed_dim)
# char_dim-32
self.char_dim = char_dim
# token_dim-concept_dim-300
self.token_dim = token_dim
# dropout-0.2
self.dropout = dropout
# 初始化out_proj权重参数
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.out_proj.weight, std=0.02)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, token_input, char_input):
# data['concept'], data['concept_char']
# seq_len-序列长度
# bsz-batch_size大小
seq_len, bsz, _ = char_input.size()
# char_embed-nn.Embedding(n, 32)
# char_repr-[seq_len * bsz, -1, 32]
char_repr = self.char_embed(char_input.view(seq_len * bsz, -1))
# 进行卷积编码(Conv1D), 扩大单词embedding维度32-256
# char2token返回[seq_len * bsz, 128]
# char_repr-[seq_len, bsz, 128]
char_repr = self.char2token(char_repr).view(seq_len, bsz, -1)
# token_input-data['concept']
# token_embed-nn.Embedding(n, 300)
# token_repr-[seq_len, bsz, 300]
token_repr = self.token_embed(token_input)
# token-[seq_len, bsz, 428]
token = F.dropout(torch.cat([char_repr,token_repr], -1), p=self.dropout, training=self.training)
# out_proj-Linear[428, 512]
token = self.out_proj(token)
# token-[seq_len, bsz, 512]
return token
class CNNEncoder(nn.Module):
def __init__(self, filters, input_dim, output_dim, highway_layers=1):
# filters-cnn_filters-[(3, 256)]
# input_dim-concept_char_dim-32
# output_dim-128
super(CNNEncoder, self).__init__()
self.convolutions = nn.ModuleList()
# 3, 256
for width, out_c in filters:
# Conv1d-[32, 256, kernel_size=3]
self.convolutions.append(nn.Conv1d(input_dim, out_c, kernel_size=width))
# [(3, 256)]
# final_dim-256
final_dim = sum(f[1] for f in filters)
# final_dim-256
# highway_layers-1
self.highway = Highway(final_dim, highway_layers)
# out_proj-Linear [256, 128]
self.out_proj = nn.Linear(final_dim, output_dim)
# 初始化out_proj权值
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.out_proj.weight, std=0.02)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, input):
# input: batch_size x seq_len x input_dim
# char_repr-[seq_len * bsz, -1, 32]
# x-[seq_len * bsz, 32, -1]
# 将输入转置为了后面conv1d
x = input.transpose(1, 2)
conv_result = []
for i, conv in enumerate(self.convolutions):
# Conv1d-[32, 256, kernel_size=3] 将词向量扩充到256
# y-[seq_len * bsz, 256, -1]
y = conv(x)
# y-[seq_len * bsz, 256]
y, _ = torch.max(y, -1)
# ReLU激活函数
y = F.relu(y)
conv_result.append(y)
# 合并卷积处理后结果 conv_result-[seq_len * bsz, 256]
conv_result = torch.cat(conv_result, dim=-1)
# 全连接层Linear, 权值合并
# conv_result-[seq_len * bsz, 256]
conv_result = self.highway(conv_result)
# out_proj-Linear [256, 128]
# [seq_len * bsz, 128]
return self.out_proj(conv_result) # batch_size x output_dim
class Highway(nn.Module):
def __init__(self, input_dim, layers):
super(Highway, self).__init__()
# input_dim-256
# layers-1
self.input_dim = input_dim
# layers-Linear[256, 512]
self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2)
for _ in range(layers)])
# 初始化layers权值
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
nn.init.normal_(layer.weight, std=0.02)
nn.init.constant_(layer.bias[self.input_dim:], 1)
nn.init.constant_(layer.bias[:self.input_dim], 0)
def forward(self, x):
# 合并卷积处理后结果 conv_result-[seq_len * bsz, 256]
for layer in self.layers:
# new_x-[seq_len * bsz, 512]
new_x = layer(x)
# new_x-[seq_len * bsz, 256]
new_x, gate = new_x.chunk(2, dim=-1)
new_x = F.relu(new_x)
gate = torch.sigmoid(gate)
x = gate * x + (1 - gate) * new_x
return x
|
the-stack_106_17869
|
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
import copy
import re
import netaddr
from oslo.config import cfg
from sqlalchemy.orm import exc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron.common import constants as os_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import lockutils
from neutron.plugins.nuage.common import config
from neutron.plugins.nuage.common import constants
from neutron.plugins.nuage.common import exceptions as nuage_exc
from neutron.plugins.nuage import extensions
from neutron.plugins.nuage.extensions import netpartition
from neutron.plugins.nuage import nuagedb
from neutron import policy
class NuagePlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_db.L3_NAT_db_mixin,
netpartition.NetPartitionPluginBase,
sg_db.SecurityGroupDbMixin):
"""Class that implements Nuage Networks' plugin functionality."""
supported_extension_aliases = ["router", "binding", "external-net",
"net-partition", "nuage-router",
"nuage-subnet", "quotas", "provider",
"extraroute", "security-group"]
binding_view = "extension:port_binding:view"
def __init__(self):
super(NuagePlugin, self).__init__()
neutron_extensions.append_api_extensions_path(extensions.__path__)
config.nuage_register_cfg_opts()
self.nuageclient_init()
net_partition = cfg.CONF.RESTPROXY.default_net_partition_name
self._create_default_net_partition(net_partition)
def nuageclient_init(self):
server = cfg.CONF.RESTPROXY.server
serverauth = cfg.CONF.RESTPROXY.serverauth
serverssl = cfg.CONF.RESTPROXY.serverssl
base_uri = cfg.CONF.RESTPROXY.base_uri
auth_resource = cfg.CONF.RESTPROXY.auth_resource
organization = cfg.CONF.RESTPROXY.organization
nuageclient = importutils.import_module('nuagenetlib.nuageclient')
self.nuageclient = nuageclient.NuageClient(server, base_uri,
serverssl, serverauth,
auth_resource,
organization)
def _resource_finder(self, context, for_resource, resource, user_req):
match = re.match(attributes.UUID_PATTERN, user_req[resource])
if match:
obj_lister = getattr(self, "get_%s" % resource)
found_resource = obj_lister(context, user_req[resource])
if not found_resource:
msg = (_("%(resource)s with id %(resource_id)s does not "
"exist") % {'resource': resource,
'resource_id': user_req[resource]})
raise n_exc.BadRequest(resource=for_resource, msg=msg)
else:
filter = {'name': [user_req[resource]]}
obj_lister = getattr(self, "get_%ss" % resource)
found_resource = obj_lister(context, filters=filter)
if not found_resource:
msg = (_("Either %(resource)s %(req_resource)s not found "
"or you dont have credential to access it")
% {'resource': resource,
'req_resource': user_req[resource]})
raise n_exc.BadRequest(resource=for_resource, msg=msg)
if len(found_resource) > 1:
msg = (_("More than one entry found for %(resource)s "
"%(req_resource)s. Use id instead")
% {'resource': resource,
'req_resource': user_req[resource]})
raise n_exc.BadRequest(resource=for_resource, msg=msg)
found_resource = found_resource[0]
return found_resource
def _create_update_port(self, context, port,
netpart_id, parent_id):
filters = {'device_id': [port['device_id']]}
ports = self.get_ports(context, filters)
net_partition = nuagedb.get_net_partition_by_id(context.session,
netpart_id)
params = {
'port_id': port['id'],
'id': port['device_id'],
'mac': port['mac_address'],
'parent_id': parent_id,
'net_partition': net_partition,
'ip': port['fixed_ips'][0]['ip_address'],
'no_of_ports': len(ports),
'tenant': port['tenant_id'],
}
nuage_vm = self.nuageclient.create_vms(params)
if nuage_vm:
if port['fixed_ips'][0]['ip_address'] != str(nuage_vm['ip']):
self._update_port_ip(context, port, nuage_vm['ip'])
def _get_router_by_subnet(self, context, subnet_id):
filters = {
'fixed_ips': {'subnet_id': [subnet_id]},
'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF]
}
router_port = self.get_ports(context, filters=filters)
if not router_port:
msg = (_("Router for subnet %s not found ") % subnet_id)
raise n_exc.BadRequest(resource='port', msg=msg)
return router_port[0]['device_id']
def _process_port_create_security_group(self, context, port,
sec_group):
if not attributes.is_attr_set(sec_group):
port[ext_sg.SECURITYGROUPS] = []
return
port_id = port['id']
with context.session.begin(subtransactions=True):
for sg_id in sec_group:
super(NuagePlugin,
self)._create_port_security_group_binding(context,
port_id,
sg_id)
try:
vptag_vport_list = []
for sg_id in sec_group:
params = {
'neutron_port_id': port_id
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port and nuage_port.get('nuage_vport_id'):
nuage_vport_id = nuage_port['nuage_vport_id']
sg = self._get_security_group(context, sg_id)
sg_rules = self.get_security_group_rules(
context,
{'security_group_id': [sg_id]})
sg_params = {
'nuage_port': nuage_port,
'sg': sg,
'sg_rules': sg_rules
}
nuage_vptag_id = (
self.nuageclient.process_port_create_security_group(
sg_params))
vptag_vport = {
'nuage_vporttag_id': nuage_vptag_id
}
vptag_vport_list.append(vptag_vport)
if vptag_vport_list:
params = {
'vptag_vport_list': vptag_vport_list,
'nuage_vport_id': nuage_vport_id
}
self.nuageclient.update_nuage_vport(params)
except Exception:
with excutils.save_and_reraise_exception():
for sg_id in sec_group:
super(NuagePlugin,
self)._delete_port_security_group_bindings(context,
port_id)
# Convert to list as a set might be passed here and
# this has to be serialized
port[ext_sg.SECURITYGROUPS] = (list(sec_group) if sec_group else [])
def _delete_port_security_group_bindings(self, context, port_id):
super(NuagePlugin,
self)._delete_port_security_group_bindings(context, port_id)
self.nuageclient.delete_port_security_group_bindings(port_id)
@lockutils.synchronized('create_port', 'nuage-port', external=True)
def create_port(self, context, port):
session = context.session
with session.begin(subtransactions=True):
p = port['port']
self._ensure_default_security_group_on_port(context, port)
port = super(NuagePlugin, self).create_port(context, port)
device_owner = port.get('device_owner', None)
if device_owner not in constants.AUTO_CREATE_PORT_OWNERS:
if 'fixed_ips' not in port or len(port['fixed_ips']) == 0:
return self._extend_port_dict_binding(context, port)
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if subnet_mapping:
port_prefix = constants.NOVA_PORT_OWNER_PREF
if port['device_owner'].startswith(port_prefix):
#This request is coming from nova
try:
self._create_update_port(
context,
port,
subnet_mapping['net_partition_id'],
subnet_mapping['nuage_subnet_id'])
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_port(
context,
port['id'])
if ext_sg.SECURITYGROUPS in p:
self._process_port_create_security_group(
context,
port,
p[ext_sg.SECURITYGROUPS])
return self._extend_port_dict_binding(context, port)
def update_port(self, context, id, port):
p = port['port']
sg_groups = None
if p.get('device_owner', '').startswith(
constants.NOVA_PORT_OWNER_PREF):
session = context.session
with session.begin(subtransactions=True):
port = self._get_port(context, id)
port.update(p)
if not port.get('fixed_ips'):
return self._make_port_dict(port)
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if not subnet_mapping:
msg = (_("Subnet %s not found on VSD") % subnet_id)
raise n_exc.BadRequest(resource='port', msg=msg)
params = {
'neutron_port_id': id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if not nuage_port or not nuage_port.get('nuage_vport_id'):
self._create_update_port(context, port,
subnet_mapping[
'net_partition_id'],
subnet_mapping['nuage_subnet_id'])
updated_port = self._make_port_dict(port)
sg_port = self._extend_port_dict_security_group(
updated_port,
port
)
sg_groups = sg_port[ext_sg.SECURITYGROUPS]
else:
updated_port = super(NuagePlugin, self).update_port(context, id,
port)
if not updated_port.get('fixed_ips'):
return updated_port
subnet_id = updated_port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session,
subnet_id)
if subnet_mapping:
if sg_groups:
self._delete_port_security_group_bindings(context,
updated_port['id'])
self._process_port_create_security_group(context,
updated_port,
sg_groups)
elif ext_sg.SECURITYGROUPS in p:
self._delete_port_security_group_bindings(context,
updated_port['id'])
self._process_port_create_security_group(
context,
updated_port,
p[ext_sg.SECURITYGROUPS]
)
return updated_port
@lockutils.synchronized('delete-port', 'nuage-del', external=True)
def delete_port(self, context, id, l3_port_check=True):
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
port = self._get_port(context, id)
nuage_vif_id = None
params = {
'neutron_port_id': id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
# This is required for to pass ut test_floatingip_port_delete
self.disassociate_floatingips(context, id)
if not port['fixed_ips']:
return super(NuagePlugin, self).delete_port(context, id)
sub_id = port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session,
sub_id)
if not subnet_mapping:
return super(NuagePlugin, self).delete_port(context, id)
# Need to call this explicitly to delete vport to vporttag binding
if ext_sg.SECURITYGROUPS in port:
self._delete_port_security_group_bindings(context, id)
netpart_id = subnet_mapping['net_partition_id']
net_partition = nuagedb.get_net_partition_by_id(context.session,
netpart_id)
# Need to call this explicitly to delete vport
if constants.NOVA_PORT_OWNER_PREF in port['device_owner']:
if nuage_port:
nuage_vif_id = nuage_port['nuage_vif_id']
# This was a VM Port
filters = {'device_id': [port['device_id']]}
ports = self.get_ports(context, filters)
params = {
'no_of_ports': len(ports),
'net_partition': net_partition,
'tenant': port['tenant_id'],
'mac': port['mac_address'],
'nuage_vif_id': nuage_vif_id,
'id': port['device_id']
}
self.nuageclient.delete_vms(params)
super(NuagePlugin, self).delete_port(context, id)
def _check_view_auth(self, context, resource, action):
return policy.check(context, action, resource)
def _extend_port_dict_binding(self, context, port):
if self._check_view_auth(context, port, self.binding_view):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
port[portbindings.VIF_DETAILS] = {
portbindings.CAP_PORT_FILTER: False
}
return port
def get_port(self, context, id, fields=None):
port = super(NuagePlugin, self).get_port(context, id, fields)
return self._fields(self._extend_port_dict_binding(context, port),
fields)
def get_ports(self, context, filters=None, fields=None):
ports = super(NuagePlugin, self).get_ports(context, filters, fields)
return [self._fields(self._extend_port_dict_binding(context, port),
fields) for port in ports]
def _check_router_subnet_for_tenant(self, context, tenant_id):
# Search router and subnet tables.
# If no entry left delete user and group from VSD
filters = {'tenant_id': [tenant_id]}
routers = self.get_routers(context, filters=filters)
subnets = self.get_subnets(context, filters=filters)
return bool(routers or subnets)
def _extend_network_dict_provider(self, context, network):
binding = nuagedb.get_network_binding(context.session, network['id'])
if binding:
network[pnet.NETWORK_TYPE] = binding.network_type
network[pnet.PHYSICAL_NETWORK] = binding.physical_network
network[pnet.SEGMENTATION_ID] = binding.vlan_id
def _process_provider_create(self, context, attrs):
network_type = attrs.get(pnet.NETWORK_TYPE)
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return None, None, None
if not network_type_set:
msg = _("provider:network_type required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type != 'vlan':
msg = (_("provider:network_type %s not supported in VSP")
% network_type)
raise nuage_exc.NuageBadRequest(msg=msg)
if not physical_network_set:
msg = _("provider:physical_network required")
raise nuage_exc.NuageBadRequest(msg=msg)
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise nuage_exc.NuageBadRequest(msg=msg)
self.nuageclient.validate_provider_network(network_type,
physical_network,
segmentation_id)
return network_type, physical_network, segmentation_id
def create_network(self, context, network):
(network_type, physical_network,
vlan_id) = self._process_provider_create(context,
network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']['tenant_id']
)
net = super(NuagePlugin, self).create_network(context,
network)
self._process_l3_create(context, net, network['network'])
if network_type == 'vlan':
nuagedb.add_network_binding(context.session, net['id'],
network_type,
physical_network, vlan_id)
self._extend_network_dict_provider(context, net)
return net
def _validate_update_network(self, context, id, network):
req_data = network['network']
is_external_set = req_data.get(external_net.EXTERNAL)
if not attributes.is_attr_set(is_external_set):
return (None, None)
neutron_net = self.get_network(context, id)
if neutron_net.get(external_net.EXTERNAL) == is_external_set:
return (None, None)
subnet = self._validate_nuage_sharedresource(context, 'network', id)
if subnet and not is_external_set:
msg = _('External network with subnets can not be '
'changed to non-external network')
raise nuage_exc.OperationNotSupported(msg=msg)
if is_external_set:
# Check if there are vm ports attached to this network
# If there are, then updating the network is not allowed
ports = self.get_ports(context, filters={'network_id': [id]})
for p in ports:
if p['device_owner'].startswith(
constants.NOVA_PORT_OWNER_PREF):
raise n_exc.NetworkInUse(net_id=id)
return (is_external_set, subnet)
def get_network(self, context, net_id, fields=None):
net = super(NuagePlugin, self).get_network(context,
net_id,
None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
nets = super(NuagePlugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def update_network(self, context, id, network):
pnet._raise_if_updates_provider_attributes(network['network'])
with context.session.begin(subtransactions=True):
is_external_set, subnet = self._validate_update_network(context,
id,
network)
net = super(NuagePlugin, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
if subnet and is_external_set:
subn = subnet[0]
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session,
subn['id'])
if subnet_l2dom:
nuage_subnet_id = subnet_l2dom['nuage_subnet_id']
nuage_l2dom_tid = subnet_l2dom['nuage_l2dom_tmplt_id']
user_id = subnet_l2dom['nuage_user_id']
group_id = subnet_l2dom['nuage_group_id']
self.nuageclient.delete_subnet(nuage_subnet_id,
nuage_l2dom_tid)
nuagedb.delete_subnetl2dom_mapping(context.session,
subnet_l2dom)
if not self._check_router_subnet_for_tenant(
context, subn['tenant_id']):
self.nuageclient.delete_user(user_id)
self.nuageclient.delete_group(group_id)
self._add_nuage_sharedresource(subnet[0],
id,
constants.SR_TYPE_FLOATING)
return net
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, id)
filter = {'network_id': [id]}
subnets = self.get_subnets(context, filters=filter)
for subnet in subnets:
self.delete_subnet(context, subnet['id'])
super(NuagePlugin, self).delete_network(context, id)
def _get_net_partition_for_subnet(self, context, subnet):
ent = subnet.get('net_partition', None)
if not ent:
def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name
net_partition = nuagedb.get_net_partition_by_name(context.session,
def_net_part)
else:
net_partition = self._resource_finder(context, 'subnet',
'net_partition', subnet)
if not net_partition:
msg = _('Either net_partition is not provided with subnet OR '
'default net_partition is not created at the start')
raise n_exc.BadRequest(resource='subnet', msg=msg)
return net_partition
@staticmethod
def _validate_create_subnet(subnet):
if (attributes.is_attr_set(subnet['gateway_ip'])
and netaddr.IPAddress(subnet['gateway_ip'])
not in netaddr.IPNetwork(subnet['cidr'])):
msg = "Gateway IP outside of the subnet CIDR "
raise nuage_exc.NuageBadRequest(msg=msg)
def _validate_create_provider_subnet(self, context, net_id):
net_filter = {'network_id': [net_id]}
existing_subn = self.get_subnets(context, filters=net_filter)
if len(existing_subn) > 0:
msg = _('Only one subnet is allowed per '
'Provider network %s') % net_id
raise nuage_exc.OperationNotSupported(msg=msg)
def _delete_nuage_sharedresource(self, net_id):
self.nuageclient.delete_nuage_sharedresource(net_id)
def _validate_nuage_sharedresource(self, context, resource, net_id):
filter = {'network_id': [net_id]}
existing_subn = self.get_subnets(context, filters=filter)
if len(existing_subn) > 1:
msg = _('Only one subnet is allowed per '
'external network %s') % net_id
raise nuage_exc.OperationNotSupported(msg=msg)
return existing_subn
def _add_nuage_sharedresource(self, subnet, net_id, type):
net = netaddr.IPNetwork(subnet['cidr'])
params = {
'neutron_subnet': subnet,
'net': net,
'type': type,
'net_id': net_id
}
self.nuageclient.create_nuage_sharedresource(params)
def _create_nuage_sharedresource(self, context, subnet, type):
subn = subnet['subnet']
net_id = subn['network_id']
self._validate_nuage_sharedresource(context, 'subnet', net_id)
with context.session.begin(subtransactions=True):
subn = super(NuagePlugin, self).create_subnet(context, subnet)
self._add_nuage_sharedresource(subn, net_id, type)
return subn
def _create_port_gateway(self, context, subnet, gw_ip=None):
if gw_ip is not None:
fixed_ip = [{'ip_address': gw_ip, 'subnet_id': subnet['id']}]
else:
fixed_ip = [{'subnet_id': subnet['id']}]
port_dict = dict(port=dict(
name='',
device_id='',
admin_state_up=True,
network_id=subnet['network_id'],
tenant_id=subnet['tenant_id'],
fixed_ips=fixed_ip,
mac_address=attributes.ATTR_NOT_SPECIFIED,
device_owner=os_constants.DEVICE_OWNER_DHCP))
port = super(NuagePlugin, self).create_port(context, port_dict)
return port
def _delete_port_gateway(self, context, ports):
for port in ports:
super(NuagePlugin, self).delete_port(context, port['id'])
def _create_nuage_subnet(self, context, neutron_subnet, netpart_id,
l2dom_template_id, pnet_binding):
net = netaddr.IPNetwork(neutron_subnet['cidr'])
# list(net)[-1] is the broadcast
last_address = neutron_subnet['allocation_pools'][-1]['end']
gw_port = self._create_port_gateway(context, neutron_subnet,
last_address)
params = {
'netpart_id': netpart_id,
'tenant_id': neutron_subnet['tenant_id'],
'net': net,
'l2dom_tmplt_id': l2dom_template_id,
'pnet_binding': pnet_binding,
'dhcp_ip': gw_port['fixed_ips'][0]['ip_address']
}
try:
nuage_subnet = self.nuageclient.create_subnet(neutron_subnet,
params)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_port_gateway(context, [gw_port])
super(NuagePlugin, self).delete_subnet(context,
neutron_subnet['id'])
if nuage_subnet:
l2dom_id = str(nuage_subnet['nuage_l2template_id'])
user_id = nuage_subnet['nuage_userid']
group_id = nuage_subnet['nuage_groupid']
id = nuage_subnet['nuage_l2domain_id']
with context.session.begin(subtransactions=True):
nuagedb.add_subnetl2dom_mapping(context.session,
neutron_subnet['id'],
id,
netpart_id,
l2dom_id=l2dom_id,
nuage_user_id=user_id,
nuage_group_id=group_id)
def create_subnet(self, context, subnet):
subn = subnet['subnet']
net_id = subn['network_id']
if self._network_is_external(context, net_id):
return self._create_nuage_sharedresource(
context, subnet, constants.SR_TYPE_FLOATING)
pnet_binding = nuagedb.get_network_binding(context.session, net_id)
if pnet_binding:
self._validate_create_provider_subnet(context, net_id)
self._validate_create_subnet(subn)
net_partition = self._get_net_partition_for_subnet(context, subn)
neutron_subnet = super(NuagePlugin, self).create_subnet(context,
subnet)
self._create_nuage_subnet(context, neutron_subnet, net_partition['id'],
subn['nuage_subnet_template'],
pnet_binding)
return neutron_subnet
def update_subnet(self, context, id, subnet):
subn = copy.deepcopy(subnet['subnet'])
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session,
id)
params = {
'parent_id': subnet_l2dom['nuage_subnet_id'],
'type': subnet_l2dom['nuage_l2dom_tmplt_id']
}
with context.session.begin(subtransactions=True):
neutron_subnet = super(NuagePlugin, self).update_subnet(context,
id, subnet)
self.nuageclient.update_subnet(subn, params)
return neutron_subnet
def delete_subnet(self, context, id):
subnet = self.get_subnet(context, id)
if self._network_is_external(context, subnet['network_id']):
super(NuagePlugin, self).delete_subnet(context, id)
return self._delete_nuage_sharedresource(id)
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, id)
if subnet_l2dom:
try:
self.nuageclient.delete_subnet(
subnet_l2dom['nuage_subnet_id'],
subnet_l2dom['nuage_l2dom_tmplt_id'])
except Exception:
msg = (_('Unable to complete operation on subnet %s.'
'One or more ports have an IP allocation '
'from this subnet.') % id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
super(NuagePlugin, self).delete_subnet(context, id)
if subnet_l2dom and not self._check_router_subnet_for_tenant(
context, subnet['tenant_id']):
self.nuageclient.delete_user(subnet_l2dom['nuage_user_id'])
self.nuageclient.delete_group(subnet_l2dom['nuage_group_id'])
def add_router_interface(self, context, router_id, interface_info):
session = context.session
with session.begin(subtransactions=True):
rtr_if_info = super(NuagePlugin,
self).add_router_interface(context,
router_id,
interface_info)
subnet_id = rtr_if_info['subnet_id']
subn = self.get_subnet(context, subnet_id)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session,
router_id)
nuage_zone = self.nuageclient.get_zone_by_routerid(router_id)
if not nuage_zone or not ent_rtr_mapping:
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Router %s does not hold default zone OR "
"domain in VSD. Router-IF add failed")
% router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if not subnet_l2dom:
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Subnet %s does not hold Nuage VSD reference. "
"Router-IF add failed") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
if (subnet_l2dom['net_partition_id'] !=
ent_rtr_mapping['net_partition_id']):
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Subnet %(subnet)s and Router %(router)s belong to "
"different net_partition Router-IF add "
"not permitted") % {'subnet': subnet_id,
'router': router_id})
raise n_exc.BadRequest(resource='subnet', msg=msg)
nuage_subnet_id = subnet_l2dom['nuage_subnet_id']
nuage_l2dom_tmplt_id = subnet_l2dom['nuage_l2dom_tmplt_id']
if self.nuageclient.vms_on_l2domain(nuage_subnet_id):
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Subnet %s has one or more active VMs "
"Router-IF add not permitted") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
self.nuageclient.delete_subnet(nuage_subnet_id,
nuage_l2dom_tmplt_id)
net = netaddr.IPNetwork(subn['cidr'])
pnet_binding = nuagedb.get_network_binding(context.session,
subn['network_id'])
params = {
'net': net,
'zone_id': nuage_zone['nuage_zone_id'],
'neutron_subnet_id': subnet_id,
'pnet_binding': pnet_binding
}
if not attributes.is_attr_set(subn['gateway_ip']):
subn['gateway_ip'] = str(netaddr.IPAddress(net.first + 1))
try:
nuage_subnet = self.nuageclient.create_domain_subnet(subn,
params)
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
if nuage_subnet:
ns_dict = {}
ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_subnetid']
ns_dict['nuage_l2dom_tmplt_id'] = None
nuagedb.update_subnetl2dom_mapping(subnet_l2dom,
ns_dict)
return rtr_if_info
def remove_router_interface(self, context, router_id, interface_info):
if 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self.get_subnet(context, subnet_id)
found = False
try:
filters = {'device_id': [router_id],
'device_owner':
[os_constants.DEVICE_OWNER_ROUTER_INTF],
'network_id': [subnet['network_id']]}
ports = self.get_ports(context, filters)
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
found = True
break
except exc.NoResultFound:
msg = (_("No router interface found for Router %s. "
"Router-IF delete failed") % router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if not found:
msg = (_("No router interface found for Router %s. "
"Router-IF delete failed") % router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
elif 'port_id' in interface_info:
port_db = self._get_port(context, interface_info['port_id'])
if not port_db:
msg = (_("No router interface found for Router %s. "
"Router-IF delete failed") % router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
subnet_id = port_db['fixed_ips'][0]['subnet_id']
session = context.session
with session.begin(subtransactions=True):
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if not subnet_l2dom:
return super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
nuage_subn_id = subnet_l2dom['nuage_subnet_id']
if self.nuageclient.vms_on_subnet(nuage_subn_id):
msg = (_("Subnet %s has one or more active VMs "
"Router-IF delete not permitted") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
neutron_subnet = self.get_subnet(context, subnet_id)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
context.session,
router_id)
if not ent_rtr_mapping:
msg = (_("Router %s does not hold net_partition "
"assoc on Nuage VSD. Router-IF delete failed")
% router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
net = netaddr.IPNetwork(neutron_subnet['cidr'])
netpart_id = ent_rtr_mapping['net_partition_id']
pnet_binding = nuagedb.get_network_binding(
context.session, neutron_subnet['network_id'])
params = {
'tenant_id': neutron_subnet['tenant_id'],
'net': net,
'netpart_id': netpart_id,
'nuage_subn_id': nuage_subn_id,
'neutron_subnet': neutron_subnet,
'pnet_binding': pnet_binding
}
nuage_subnet = self.nuageclient.remove_router_interface(params)
info = super(NuagePlugin,
self).remove_router_interface(context, router_id,
interface_info)
if nuage_subnet:
tmplt_id = str(nuage_subnet['nuage_l2template_id'])
ns_dict = {}
ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_l2domain_id']
ns_dict['nuage_l2dom_tmplt_id'] = tmplt_id
nuagedb.update_subnetl2dom_mapping(subnet_l2dom,
ns_dict)
return info
def _get_net_partition_for_router(self, context, rtr):
ent = rtr.get('net_partition', None)
if not ent:
def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name
net_partition = nuagedb.get_net_partition_by_name(context.session,
def_net_part)
else:
net_partition = self._resource_finder(context, 'router',
'net_partition', rtr)
if not net_partition:
msg = _("Either net_partition is not provided with router OR "
"default net_partition is not created at the start")
raise n_exc.BadRequest(resource='router', msg=msg)
return net_partition
def create_router(self, context, router):
net_partition = self._get_net_partition_for_router(context, router)
neutron_router = super(NuagePlugin, self).create_router(context,
router)
params = {
'net_partition': net_partition,
'tenant_id': neutron_router['tenant_id']
}
try:
nuage_router = self.nuageclient.create_router(neutron_router,
router['router'],
params)
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_router(context,
neutron_router['id'])
if nuage_router:
with context.session.begin(subtransactions=True):
nuagedb.add_entrouter_mapping(context.session,
net_partition['id'],
neutron_router['id'],
nuage_router['nuage_domain_id'])
return neutron_router
def _validate_nuage_staticroutes(self, old_routes, added, removed):
cidrs = []
for old in old_routes:
if old not in removed:
ip = netaddr.IPNetwork(old['destination'])
cidrs.append(ip)
for route in added:
ip = netaddr.IPNetwork(route['destination'])
matching = netaddr.all_matching_cidrs(ip.ip, cidrs)
if matching:
msg = _('for same subnet, multiple static routes not allowed')
raise n_exc.BadRequest(resource='router', msg=msg)
cidrs.append(ip)
def update_router(self, context, id, router):
r = router['router']
with context.session.begin(subtransactions=True):
if 'routes' in r:
old_routes = self._get_extra_routes_by_router_id(context,
id)
added, removed = utils.diff_list_of_dict(old_routes,
r['routes'])
self._validate_nuage_staticroutes(old_routes, added, removed)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
context.session, id)
if not ent_rtr_mapping:
msg = (_("Router %s does not hold net-partition "
"assoc on VSD. extra-route failed") % id)
raise n_exc.BadRequest(resource='router', msg=msg)
# Let it do internal checks first and verify it.
router_updated = super(NuagePlugin,
self).update_router(context,
id,
router)
for route in removed:
destaddr = route['destination']
cidr = destaddr.split('/')
params = {
"address": cidr[0],
"nexthop": route['nexthop'],
"nuage_domain_id": ent_rtr_mapping['nuage_router_id']
}
self.nuageclient.delete_nuage_staticroute(params)
for route in added:
params = {
'parent_id': ent_rtr_mapping['nuage_router_id'],
'net': netaddr.IPNetwork(route['destination']),
'nexthop': route['nexthop']
}
self.nuageclient.create_nuage_staticroute(
params)
else:
router_updated = super(NuagePlugin, self).update_router(
context, id, router)
return router_updated
def delete_router(self, context, id):
neutron_router = self.get_router(context, id)
session = context.session
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session,
id)
if ent_rtr_mapping:
filters = {
'device_id': [id],
'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF]
}
ports = self.get_ports(context, filters)
if ports:
raise l3.RouterInUse(router_id=id)
nuage_domain_id = ent_rtr_mapping['nuage_router_id']
self.nuageclient.delete_router(nuage_domain_id)
super(NuagePlugin, self).delete_router(context, id)
nuage_zone = self.nuageclient.get_zone_by_routerid(id)
if nuage_zone and not self._check_router_subnet_for_tenant(
context, neutron_router['tenant_id']):
user_id, group_id = self.nuageclient.get_usergroup(
neutron_router['tenant_id'],
ent_rtr_mapping['net_partition_id'])
self.nuageclient.delete_user(user_id)
self.nuageclient.delete_group(group_id)
def _make_net_partition_dict(self, net_partition, fields=None):
res = {
'id': net_partition['id'],
'name': net_partition['name'],
'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'],
'l2dom_tmplt_id': net_partition['l2dom_tmplt_id'],
}
return self._fields(res, fields)
def _create_net_partition(self, session, net_part_name):
fip_quota = cfg.CONF.RESTPROXY.default_floatingip_quota
params = {
"name": net_part_name,
"fp_quota": str(fip_quota)
}
nuage_net_partition = self.nuageclient.create_net_partition(params)
net_partitioninst = None
if nuage_net_partition:
nuage_entid = nuage_net_partition['nuage_entid']
l3dom_id = nuage_net_partition['l3dom_id']
l2dom_id = nuage_net_partition['l2dom_id']
with session.begin():
net_partitioninst = nuagedb.add_net_partition(session,
nuage_entid,
l3dom_id,
l2dom_id,
net_part_name)
if not net_partitioninst:
return {}
return self._make_net_partition_dict(net_partitioninst)
def _create_default_net_partition(self, default_net_part):
def_netpart = self.nuageclient.get_def_netpartition_data(
default_net_part)
session = db.get_session()
if def_netpart:
net_partition = nuagedb.get_net_partition_by_name(
session, default_net_part)
with session.begin(subtransactions=True):
if net_partition:
nuagedb.delete_net_partition(session, net_partition)
net_part = nuagedb.add_net_partition(session,
def_netpart['np_id'],
def_netpart['l3dom_tid'],
def_netpart['l2dom_tid'],
default_net_part)
return self._make_net_partition_dict(net_part)
else:
return self._create_net_partition(session, default_net_part)
def create_net_partition(self, context, net_partition):
ent = net_partition['net_partition']
session = context.session
return self._create_net_partition(session, ent["name"])
def delete_net_partition(self, context, id):
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid(context.session,
id)
if ent_rtr_mapping:
msg = (_("One or more router still attached to "
"net_partition %s.") % id)
raise n_exc.BadRequest(resource='net_partition', msg=msg)
net_partition = nuagedb.get_net_partition_by_id(context.session, id)
if not net_partition:
msg = (_("NetPartition with %s does not exist") % id)
raise n_exc.BadRequest(resource='net_partition', msg=msg)
l3dom_tmplt_id = net_partition['l3dom_tmplt_id']
l2dom_tmplt_id = net_partition['l2dom_tmplt_id']
self.nuageclient.delete_net_partition(net_partition['id'],
l3dom_id=l3dom_tmplt_id,
l2dom_id=l2dom_tmplt_id)
with context.session.begin(subtransactions=True):
nuagedb.delete_net_partition(context.session,
net_partition)
def get_net_partition(self, context, id, fields=None):
net_partition = nuagedb.get_net_partition_by_id(context.session,
id)
return self._make_net_partition_dict(net_partition)
def get_net_partitions(self, context, filters=None, fields=None):
net_partitions = nuagedb.get_net_partitions(context.session,
filters=filters,
fields=fields)
return [self._make_net_partition_dict(net_partition, fields)
for net_partition in net_partitions]
def _check_floatingip_update(self, context, port):
filter = {'fixed_port_id': [port['id']]}
local_fip = self.get_floatingips(context,
filters=filter)
if local_fip:
fip = local_fip[0]
self._create_update_floatingip(context,
fip, port['id'])
def _create_update_floatingip(self, context,
neutron_fip, port_id):
rtr_id = neutron_fip['router_id']
net_id = neutron_fip['floating_network_id']
fip_pool = self.nuageclient.get_nuage_fip_pool_by_id(net_id)
if not fip_pool:
msg = _('sharedresource %s not found on VSD') % net_id
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(context.session,
rtr_id)
if not ent_rtr_mapping:
msg = _('router %s is not associated with '
'any net-partition') % rtr_id
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
params = {
'router_id': ent_rtr_mapping['nuage_router_id'],
'fip_id': neutron_fip['id'],
'neutron_fip': neutron_fip
}
fip = self.nuageclient.get_nuage_fip_by_id(params)
if not fip:
params = {
'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'],
'nuage_fippool_id': fip_pool['nuage_fip_pool_id'],
'neutron_fip_ip': neutron_fip['floating_ip_address'],
'neutron_fip_id': neutron_fip['id']
}
nuage_fip_id = self.nuageclient.create_nuage_floatingip(params)
else:
nuage_fip_id = fip['nuage_fip_id']
# Update VM if required
params = {
'neutron_port_id': port_id,
'nuage_fip_id': nuage_fip_id,
'nuage_rtr_id': ent_rtr_mapping['nuage_router_id']
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port:
if (nuage_port['nuage_domain_id']) != (
ent_rtr_mapping['nuage_router_id']):
msg = _('Floating IP can not be associated to VM in '
'different router context')
raise nuage_exc.OperationNotSupported(msg=msg)
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': nuage_fip_id
}
self.nuageclient.update_nuage_vm_vport(params)
def create_floatingip(self, context, floatingip):
fip = floatingip['floatingip']
with context.session.begin(subtransactions=True):
neutron_fip = super(NuagePlugin, self).create_floatingip(
context, floatingip)
if not neutron_fip['router_id']:
return neutron_fip
try:
self._create_update_floatingip(context, neutron_fip,
fip['port_id'])
except (nuage_exc.OperationNotSupported, n_exc.BadRequest):
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_floatingip(
context, neutron_fip['id'])
return neutron_fip
def disassociate_floatingips(self, context, port_id, do_notify=True):
router_ids = super(NuagePlugin, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
params = {
'neutron_port_id': port_id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port:
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': None
}
self.nuageclient.update_nuage_vm_vport(params)
return router_ids
def update_floatingip(self, context, id, floatingip):
fip = floatingip['floatingip']
orig_fip = self._get_floatingip(context, id)
port_id = orig_fip['fixed_port_id']
router_ids = []
with context.session.begin(subtransactions=True):
neutron_fip = super(NuagePlugin, self).update_floatingip(
context, id, floatingip)
if fip['port_id'] is not None:
if not neutron_fip['router_id']:
ret_msg = 'floating-ip is not associated yet'
raise n_exc.BadRequest(resource='floatingip',
msg=ret_msg)
try:
self._create_update_floatingip(context,
neutron_fip,
fip['port_id'])
except nuage_exc.OperationNotSupported:
with excutils.save_and_reraise_exception():
router_ids = super(
NuagePlugin, self).disassociate_floatingips(
context, fip['port_id'], do_notify=False)
except n_exc.BadRequest:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_floatingip(context,
id)
else:
params = {
'neutron_port_id': port_id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port:
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': None
}
self.nuageclient.update_nuage_vm_vport(params)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
return neutron_fip
def delete_floatingip(self, context, id):
fip = self._get_floatingip(context, id)
port_id = fip['fixed_port_id']
with context.session.begin(subtransactions=True):
if port_id:
params = {
'neutron_port_id': id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if (nuage_port and
nuage_port['nuage_vport_id'] is not None):
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': None
}
self.nuageclient.update_nuage_vm_vport(params)
rtr_id = fip['last_known_router_id']
if rtr_id:
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
context.session,
rtr_id)
if not ent_rtr_mapping:
msg = _('router %s is not associated with '
'any net-partition') % rtr_id
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
params = {
'router_id': ent_rtr_mapping['nuage_router_id'],
'fip_id': id
}
fip = self.nuageclient.get_nuage_fip_by_id(params)
if fip:
self.nuageclient.delete_nuage_floatingip(
fip['nuage_fip_id'])
super(NuagePlugin, self).delete_floatingip(context, id)
def delete_security_group(self, context, id):
filters = {'security_group_id': [id]}
ports = self._get_port_security_group_bindings(context,
filters)
if ports:
raise ext_sg.SecurityGroupInUse(id=id)
sg_rules = self.get_security_group_rules(context,
{'security_group_id': [id]})
if sg_rules:
self.nuageclient.delete_nuage_sgrule(sg_rules)
self.nuageclient.delete_nuage_secgroup(id)
super(NuagePlugin, self).delete_security_group(context, id)
def create_security_group_rule(self, context, security_group_rule):
sg_rule = security_group_rule['security_group_rule']
self.nuageclient.validate_nuage_sg_rule_definition(sg_rule)
sg_id = sg_rule['security_group_id']
local_sg_rule = super(NuagePlugin,
self).create_security_group_rule(
context, security_group_rule)
try:
nuage_vptag = self.nuageclient.get_sg_vptag_mapping(sg_id)
if nuage_vptag:
sg_params = {
'sg_id': sg_id,
'neutron_sg_rule': local_sg_rule,
'vptag': nuage_vptag
}
self.nuageclient.create_nuage_sgrule(sg_params)
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin,
self).delete_security_group_rule(context,
local_sg_rule['id'])
return local_sg_rule
def delete_security_group_rule(self, context, id):
local_sg_rule = self.get_security_group_rule(context, id)
super(NuagePlugin, self).delete_security_group_rule(context, id)
self.nuageclient.delete_nuage_sgrule([local_sg_rule])
|
the-stack_106_17870
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
import jaeger_client.thrift_gen.jaeger.ttypes as ttypes
import jaeger_client.thrift_gen.sampling.SamplingManager as sampling_manager
from opentracing import child_of, follows_from
from jaeger_client import ProbabilisticSampler, RateLimitingSampler
from jaeger_client import thrift, Span, SpanContext
from jaeger_client.thrift_gen.agent import Agent as Agent
from thrift.protocol.TCompactProtocol import TCompactProtocol
from thrift.transport.TTransport import TMemoryBuffer
def test_submit_batch(tracer):
span = tracer.start_span('test-span')
span.set_tag('bender', 'is great')
span.set_tag('peer.ipv4', 123123)
span.set_tag('unicode_val', u'non-ascii: \xe9')
span.set_tag(u'unicode_key_\xe9', 'ascii val')
span.log_event('kiss-my-shiny-metal-...')
span.finish() # to get the duration defined
# verify that we can serialize the span
_marshall_span(span)
def _marshall_span(span):
class TestTrans(TMemoryBuffer):
def now_reading(self):
"""
Thrift TMemoryBuffer is not read-able AND write-able,
it's one or the other (really? yes.). This will convert
us from write-able to read-able.
"""
self._buffer = BytesIO(self.getvalue())
batch = thrift.make_jaeger_batch(
spans=[span], process=ttypes.Process(serviceName='x', tags={}))
# write and read them to test encoding
args = Agent.emitBatch_args(batch)
t = TestTrans()
prot = TCompactProtocol(t)
args.write(prot)
t.now_reading()
args.read(prot)
def test_large_ids(tracer):
def serialize(trace_id, span_id):
"""Checks that there are no exceptions during marshalling."""
parent_ctx = SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=0, flags=1)
parent = Span(context=parent_ctx, operation_name='x', tracer=tracer)
span = tracer.start_span(operation_name='x',
references=child_of(parent.context))
span.finish()
_marshall_span(span)
trace_id = 0x77fd53dc6b437681
serialize(trace_id, trace_id)
assert thrift.id_to_int(trace_id) == 0x77fd53dc6b437681
trace_id = 0x7fffffffffffffff
serialize(trace_id, trace_id)
assert thrift.id_to_int(trace_id) == 0x7fffffffffffffff
trace_id = 0x8000000000000000
serialize(trace_id, trace_id)
assert thrift.id_to_int(trace_id) == -0x8000000000000000
trace_id = 0x97fd53dc6b437681
serialize(trace_id, trace_id)
trace_id = (1 << 64) - 1
assert trace_id == 0xffffffffffffffff
serialize(trace_id, trace_id)
assert thrift.id_to_int(trace_id) == -1
trace_id = (1 << 128) - 1
span_id = 0xffffffffffffffff
assert trace_id == 0xffffffffffffffffffffffffffffffff
serialize(trace_id, span_id)
assert thrift._id_to_low(trace_id) == 0xffffffffffffffff
assert thrift._id_to_high(trace_id) == 0xffffffffffffffff
trace_id = 0xfb34678b8864f051e5c8c603484e57
span_id = 0x77fd53dc6b437681
serialize(trace_id, span_id)
assert thrift._id_to_low(trace_id) == 0x51e5c8c603484e57
assert thrift._id_to_high(trace_id) == 0xfb34678b8864f0
def test_none_ids():
assert thrift.id_to_int(None) is None
assert thrift._id_to_low(None) is None
assert thrift._id_to_high(None) is None
def test_large_tags():
tag = thrift.make_tag('x', 'y' * 300, max_length=256)
assert len(tag.vStr) <= 256
def test_bool_tags():
tag = thrift.make_tag('booltag', True, max_length=256)
assert tag.vBool is True
def test_bool_tags_false():
tag = thrift.make_tag('booltag', False, max_length=256)
assert tag.vBool is False
def test_long_tags():
tag = thrift.make_tag('longtag', 404, max_length=256)
assert tag.vLong == 404
def test_double_tags():
tag = thrift.make_tag('doubletag', 12.1, max_length=256)
assert tag.vDouble == 12.1
def test_parse_sampling_strategy():
# probabilistic
resp = sampling_manager.SamplingStrategyResponse(
strategyType=sampling_manager.SamplingStrategyType.PROBABILISTIC)
s, e = thrift.parse_sampling_strategy(response=resp)
assert s is None and e is not None
resp.probabilisticSampling = \
sampling_manager.ProbabilisticSamplingStrategy(samplingRate=2)
s, e = thrift.parse_sampling_strategy(response=resp)
assert s is None and e is not None
resp.probabilisticSampling = \
sampling_manager.ProbabilisticSamplingStrategy(samplingRate=0.5)
s, e = thrift.parse_sampling_strategy(response=resp)
assert isinstance(s, ProbabilisticSampler) and e is None
# rate limiting
resp = sampling_manager.SamplingStrategyResponse(
strategyType=sampling_manager.SamplingStrategyType.RATE_LIMITING)
s, e = thrift.parse_sampling_strategy(response=resp)
assert s is None and e is not None
resp.rateLimitingSampling = \
sampling_manager.RateLimitingSamplingStrategy(maxTracesPerSecond=-1)
s, e = thrift.parse_sampling_strategy(response=resp)
assert s is None and e is not None
resp.rateLimitingSampling = \
sampling_manager.RateLimitingSamplingStrategy(maxTracesPerSecond=1)
s, e = thrift.parse_sampling_strategy(response=resp)
assert isinstance(s, RateLimitingSampler) and e is None
# wrong strategy type
resp.strategyType = 'x'
s, e = thrift.parse_sampling_strategy(response=resp)
assert s is None and e is not None
def test_parse_span_references(tracer):
span = tracer.start_span('test')
span2 = tracer.start_span('test2')
follow_span = tracer.start_span('test-follow', references=[follows_from(span.context),
child_of(span2.context)])
span.finish()
span2.finish()
follow_span.finish()
_marshall_span(follow_span)
|
the-stack_106_17871
|
"""
https://leetcode.com/problems/reverse-linked-list/
Difficulty: Easy
Given the head of a singly linked list, reverse the list, and return the reversed list.
Example 1:
Input: head = [1,2,3,4,5]
Output: [5,4,3,2,1]
Example 2:
Input: head = [1,2]
Output: [2,1]
Example 3:
Input: head = []
Output: []
Constraints:
The number of nodes in the list is the range [0, 5000].
-5000 <= Node.val <= 5000
Follow up: A linked list can be reversed either iteratively or recursively. Could you implement both?
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head):
prev = None
cur = head
while cur:
n = cur.next
cur.next = prev
prev = cur
cur = n
return prev
|
the-stack_106_17872
|
#!/usr/bin/env python2
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import functools
import os
import re
macro_root = read_config('fbcode', 'macro_lib', '//macro_lib')
include_defs("{}/convert/base.py".format(macro_root), "base")
include_defs("{}/rule.py".format(macro_root))
class CppJvmLibrary(base.Converter):
def get_fbconfig_rule_type(self):
return 'cpp_jvm_library'
def get_allowed_args(self):
return set([
'name',
'major_version',
])
def convert(self, base_path, name, major_version, visibility=None):
attrs = collections.OrderedDict()
attrs['name'] = name
if visibility is not None:
attrs['visibility'] = visibility
def formatter(flags, platform, _):
arch = self.get_platform_architecture(platform)
# Remap arch to JVM-specific names.
arch = {'x86_64': 'amd64'}.get(arch, arch)
return [flag.format(arch=arch, platform=platform) for flag in flags]
platform_jvm_path = '/usr/local/fb-jdk-{}-{{platform}}'.format(major_version)
jvm_path = '/usr/local/fb-jdk-{}'.format(major_version)
# We use include/library paths to wrap the custom FB JDK installed at
# system locations. As such, we don't properly hash various components
# (e.g. headers, libraries) pulled into the build. Longer-term, we
# should move the FB JDK into tp2 to do this properly.
attrs['exported_platform_preprocessor_flags'] = (
self.format_platform_param(
functools.partial(
formatter,
['-isystem',
os.path.join(platform_jvm_path, 'include'),
'-isystem',
os.path.join(platform_jvm_path, 'include', 'linux'),
'-isystem',
os.path.join(jvm_path, 'include'),
'-isystem',
os.path.join(jvm_path, 'include', 'linux')])))
attrs['exported_platform_linker_flags'] = (
self.format_platform_param(
functools.partial(
formatter,
['-L{}/jre/lib/{{arch}}/server'.format(platform_jvm_path),
'-Wl,-rpath={}/jre/lib/{{arch}}/server'.format(platform_jvm_path),
'-L{}/jre/lib/{{arch}}/server'.format(jvm_path),
'-Wl,-rpath={}/jre/lib/{{arch}}/server'.format(jvm_path),
'-ljvm'])))
return [Rule('cxx_library', attrs)]
|
the-stack_106_17873
|
# coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BatchJobDescription(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'finished_at': 'Time',
'started_at': 'Time',
'workflow_name': 'str',
'workflow_status': 'NodePhase',
'workflow_uid': 'UID'
}
attribute_map = {
'finished_at': 'FinishedAt',
'started_at': 'StartedAt',
'workflow_name': 'workflowName',
'workflow_status': 'workflowStatus',
'workflow_uid': 'workflowUID'
}
def __init__(self, finished_at=None, started_at=None, workflow_name=None, workflow_status=None, workflow_uid=None): # noqa: E501
"""BatchJobDescription - a model defined in Swagger""" # noqa: E501
self._finished_at = None
self._started_at = None
self._workflow_name = None
self._workflow_status = None
self._workflow_uid = None
self.discriminator = None
if finished_at is not None:
self.finished_at = finished_at
if started_at is not None:
self.started_at = started_at
if workflow_name is not None:
self.workflow_name = workflow_name
if workflow_status is not None:
self.workflow_status = workflow_status
if workflow_uid is not None:
self.workflow_uid = workflow_uid
@property
def finished_at(self):
"""Gets the finished_at of this BatchJobDescription. # noqa: E501
:return: The finished_at of this BatchJobDescription. # noqa: E501
:rtype: Time
"""
return self._finished_at
@finished_at.setter
def finished_at(self, finished_at):
"""Sets the finished_at of this BatchJobDescription.
:param finished_at: The finished_at of this BatchJobDescription. # noqa: E501
:type: Time
"""
self._finished_at = finished_at
@property
def started_at(self):
"""Gets the started_at of this BatchJobDescription. # noqa: E501
:return: The started_at of this BatchJobDescription. # noqa: E501
:rtype: Time
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this BatchJobDescription.
:param started_at: The started_at of this BatchJobDescription. # noqa: E501
:type: Time
"""
self._started_at = started_at
@property
def workflow_name(self):
"""Gets the workflow_name of this BatchJobDescription. # noqa: E501
Name of related Argo Workflow # noqa: E501
:return: The workflow_name of this BatchJobDescription. # noqa: E501
:rtype: str
"""
return self._workflow_name
@workflow_name.setter
def workflow_name(self, workflow_name):
"""Sets the workflow_name of this BatchJobDescription.
Name of related Argo Workflow # noqa: E501
:param workflow_name: The workflow_name of this BatchJobDescription. # noqa: E501
:type: str
"""
self._workflow_name = workflow_name
@property
def workflow_status(self):
"""Gets the workflow_status of this BatchJobDescription. # noqa: E501
:return: The workflow_status of this BatchJobDescription. # noqa: E501
:rtype: NodePhase
"""
return self._workflow_status
@workflow_status.setter
def workflow_status(self, workflow_status):
"""Sets the workflow_status of this BatchJobDescription.
:param workflow_status: The workflow_status of this BatchJobDescription. # noqa: E501
:type: NodePhase
"""
self._workflow_status = workflow_status
@property
def workflow_uid(self):
"""Gets the workflow_uid of this BatchJobDescription. # noqa: E501
:return: The workflow_uid of this BatchJobDescription. # noqa: E501
:rtype: UID
"""
return self._workflow_uid
@workflow_uid.setter
def workflow_uid(self, workflow_uid):
"""Sets the workflow_uid of this BatchJobDescription.
:param workflow_uid: The workflow_uid of this BatchJobDescription. # noqa: E501
:type: UID
"""
self._workflow_uid = workflow_uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BatchJobDescription, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchJobDescription):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_17874
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Lbann(CMakePackage, CudaPackage, ROCmPackage):
"""LBANN: Livermore Big Artificial Neural Network Toolkit. A distributed
memory, HPC-optimized, model and data parallel training toolkit for deep
neural networks."""
homepage = "http://software.llnl.gov/lbann/"
url = "https://github.com/LLNL/lbann/archive/v0.91.tar.gz"
git = "https://github.com/LLNL/lbann.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('0.101', sha256='69d3fe000a88a448dc4f7e263bcb342c34a177bd9744153654528cd86335a1f7')
version('0.100', sha256='d1bab4fb6f1b80ae83a7286cc536a32830890f6e5b0c3107a17c2600d0796912')
version('0.99', sha256='3358d44f1bc894321ce07d733afdf6cb7de39c33e3852d73c9f31f530175b7cd')
version('0.98.1', sha256='9a2da8f41cd8bf17d1845edf9de6d60f781204ebd37bffba96d8872036c10c66')
version('0.98', sha256='8d64b9ac0f1d60db553efa4e657f5ea87e790afe65336117267e9c7ae6f68239')
version('0.97.1', sha256='2f2756126ac8bb993202cf532d72c4d4044e877f4d52de9fdf70d0babd500ce4')
version('0.97', sha256='9794a706fc7ac151926231efdf74564c39fbaa99edca4acb745ee7d20c32dae7')
version('0.96', sha256='97af78e9d3c405e963361d0db96ee5425ee0766fa52b43c75b8a5670d48e4b4a')
version('0.95', sha256='d310b986948b5ee2bedec36383a7fe79403721c8dc2663a280676b4e431f83c2')
version('0.94', sha256='567e99b488ebe6294933c98a212281bffd5220fc13a0a5cd8441f9a3761ceccf')
version('0.93', sha256='77bfd7fe52ee7495050f49bcdd0e353ba1730e3ad15042c678faa5eeed55fb8c')
version('0.92', sha256='9187c5bcbc562c2828fe619d53884ab80afb1bcd627a817edb935b80affe7b84')
version('0.91', sha256='b69f470829f434f266119a33695592f74802cff4b76b37022db00ab32de322f5')
variant('al', default=True, description='Builds with support for Aluminum Library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('conduit', default=True,
description='Builds with support for Conduit Library '
'(note that for v0.99 conduit is required)')
variant('deterministic', default=False,
description='Builds with support for deterministic execution')
variant('dihydrogen', default=True,
description='Builds with support for DiHydrogen Tensor Library')
variant('distconv', default=False,
description='Builds with support for spatial, filter, or channel '
'distributed convolutions')
variant('docs', default=False, description='Builds with support for building documentation')
variant('dtype', default='float',
description='Type for floating point representation of weights',
values=('float', 'double'))
variant('extras', default=False, description='Add python modules for LBANN related tools')
variant('fft', default=False, description='Support for FFT operations')
variant('half', default=False,
description='Builds with support for FP16 precision data types')
variant('hwloc', default=True, description='Add support for topology aware algorithms')
variant('nvprof', default=False, description='Build with region annotations for NVPROF')
variant('numpy', default=False,
description='Builds with support for processing NumPy data files')
variant('vision', default=False,
description='Builds with support for image processing data with OpenCV')
variant('vtune', default=False, description='Builds with support for Intel VTune')
variant('onednn', default=False, description='Support for OneDNN')
variant('nvshmem', default=False, description='Support for NVSHMEM')
variant('python', default=True, description='Support for Python extensions (e.g. Data Reader)')
variant('pfe', default=True, description='Python Frontend for generating and launching models')
variant('boost', default=False, description='Enable callbacks that use Boost libraries')
# Variant Conflicts
conflicts('@:0.90,0.99:', when='~conduit')
conflicts('@0.90:0.101.99', when='+fft')
conflicts('@:0.90,0.101.99:', when='~dihydrogen')
conflicts('~cuda', when='+nvprof')
conflicts('~hwloc', when='+al')
conflicts('~cuda', when='+nvshmem')
conflicts('+cuda', when='+rocm', msg='CUDA and ROCm support are mutually exclusive')
conflicts('+extras', when='~pfe', msg='Python extras require the Python front end support')
depends_on('[email protected]:', type='build')
# Specify the correct versions of Hydrogen
depends_on('hydrogen@:1.3.4', when='@0.95:0.100')
depends_on('[email protected]:1.4.99', when='@0.101:0.101.99')
depends_on('[email protected]:', when='@:0.90,0.102:')
# Add Hydrogen variants
depends_on('hydrogen +openmp +openmp_blas +shared +int64')
depends_on('hydrogen ~al', when='~al')
depends_on('hydrogen +al', when='+al')
depends_on('hydrogen ~cuda', when='~cuda')
depends_on('hydrogen +cuda', when='+cuda')
depends_on('hydrogen ~half', when='~half')
depends_on('hydrogen +half', when='+half')
depends_on('hydrogen ~rocm', when='~rocm')
depends_on('hydrogen +rocm', when='+rocm')
depends_on('hydrogen build_type=Debug', when='build_type=Debug')
# Older versions depended on Elemental not Hydrogen
depends_on('elemental +openmp_blas +shared +int64', when='@0.91:0.94')
depends_on('elemental +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @0.91:0.94')
# Specify the correct version of Aluminum
depends_on('aluminum@:0.3.99', when='@0.95:0.100 +al')
depends_on('[email protected]:0.4.99', when='@0.101:0.101.99 +al')
depends_on('[email protected]:', when='@:0.90,0.102: +al')
# Add Aluminum variants
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
depends_on('aluminum +rocm +rccl +ht', when='+al +rocm')
depends_on('[email protected]:', when='@:0.90,0.102:')
depends_on('dihydrogen +openmp', when='+dihydrogen')
depends_on('dihydrogen ~cuda', when='+dihydrogen ~cuda')
depends_on('dihydrogen +cuda', when='+dihydrogen +cuda')
depends_on('dihydrogen ~al', when='+dihydrogen ~al')
depends_on('dihydrogen +al', when='+dihydrogen +al')
depends_on('dihydrogen +distconv +cuda', when='+distconv')
depends_on('dihydrogen ~half', when='+dihydrogen ~half')
depends_on('dihydrogen +half', when='+dihydrogen +half')
depends_on('dihydrogen ~nvshmem', when='+dihydrogen ~nvshmem')
depends_on('dihydrogen +nvshmem', when='+dihydrogen +nvshmem')
depends_on('dihydrogen ~rocm', when='+dihydrogen ~rocm')
depends_on('dihydrogen +rocm', when='+dihydrogen +rocm')
depends_on('[email protected]', when='@0.101:0.101.99 +dihydrogen')
depends_on('dihydrogen@:0.0,0.2:', when='@:0.90,0.102: +dihydrogen')
conflicts('~dihydrogen', when='+distconv')
for arch in CudaPackage.cuda_arch_values:
depends_on('hydrogen cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
depends_on('dihydrogen cuda_arch=%s' % arch, when='+dihydrogen +cuda cuda_arch=%s' % arch)
depends_on('nccl cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
# variants +rocm and amdgpu_targets are not automatically passed to
# dependencies, so do it manually.
for val in ROCmPackage.amdgpu_targets:
depends_on('hydrogen amdgpu_target=%s' % val, when='amdgpu_target=%s' % val)
depends_on('aluminum amdgpu_target=%s' % val, when='+al amdgpu_target=%s' % val)
depends_on('dihydrogen amdgpu_target=%s' % val, when='+dihydrogen amdgpu_target=%s' % val)
depends_on('cudnn', when='@0.90:0.100.99 +cuda')
depends_on('[email protected]:', when='@:0.90,0.101: +cuda')
depends_on('cub', when='@0.94:0.98.2 +cuda ^cuda@:10.99')
depends_on('hipcub', when='+rocm')
depends_on('mpi')
depends_on('[email protected]:', when='@:0.90,0.102: +hwloc')
depends_on('[email protected]:1.11.99', when='@0.95:0.101.99 +hwloc')
depends_on('hwloc +cuda +nvml', when='+cuda')
depends_on('[email protected]:', when='+rocm')
depends_on('half', when='+half')
depends_on('[email protected]: +openmp', when='+fft')
# LBANN wraps OpenCV calls in OpenMP parallel loops, build without OpenMP
# Additionally disable video related options, they incorrectly link in a
# bad OpenMP library when building with clang or Intel compilers
depends_on('[email protected]: build_type=RelWithDebInfo +core +highgui '
'+imgcodecs +imgproc +jpeg +png +tiff +zlib +fast-math ~cuda',
when='+vision')
# Note that for Power systems we want the environment to add +powerpc +vsx
depends_on('[email protected]: +powerpc +vsx', when='+vision arch=ppc64le:')
depends_on('cnpy', when='+numpy')
depends_on('nccl', when='@0.94:0.98.2 +cuda')
depends_on('[email protected]: +hdf5~hdf5_compat', when='@0.94:0.99 +conduit')
depends_on('[email protected]: +hdf5~hdf5_compat', when='@:0.90,0.99:')
# LBANN can use Python in two modes 1) as part of an extensible framework
# and 2) to drive the front end model creation and launch
# Core library support for Python Data Reader and extensible interface
depends_on('python@3: +shared', type=('run'), when='@:0.90,0.99: +python')
extends("python", when='+python')
# Python front end and possible extra packages
depends_on('python@3: +shared', type=('build', 'run'), when='@:0.90,0.99: +pfe')
extends("python", when='+pfe')
depends_on('py-setuptools', type='build', when='+pfe')
depends_on('py-argparse', type='run', when='@:0.90,0.99: ^python@:2.6 +pfe')
depends_on('py-configparser', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type=('build', 'run'), when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('[email protected]:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-pytest', type='test', when='@:0.90,0.99: +pfe')
depends_on('[email protected]', type=('build', 'run'), when='@:0.90,0.99: +pfe')
depends_on('[email protected]', when='@:0.90,0.99:')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('py-m2r', type='build', when='+docs')
depends_on('cereal')
depends_on('catch2', type=('build', 'test'))
depends_on('clara')
depends_on('llvm-openmp', when='%apple-clang')
depends_on('onednn cpu_runtime=omp gpu_runtime=none', when='+onednn')
depends_on('nvshmem', when='+nvshmem')
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def common_config_args(self):
spec = self.spec
# Environment variables
cppflags = []
cppflags.append('-DLBANN_SET_EL_RNG')
args = []
args.extend([
'-DCMAKE_CXX_FLAGS=%s' % ' '.join(cppflags),
'-DLBANN_VERSION=spack',
])
if '+numpy' in spec:
args.append(
'-DCNPY_DIR={0}'.format(spec['cnpy'].prefix),
)
# Use a high performance linker
if self.spec.satisfies('%clang'):
args.extend([
'-DCMAKE_EXE_LINKER_FLAGS=-fuse-ld=lld',
'-DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=lld'])
elif self.spec.satisfies('%gcc'):
args.extend([
'-DCMAKE_EXE_LINKER_FLAGS=-fuse-ld=gold',
'-DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=gold'])
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
# Get any recent versions or non-numeric version
# Note that develop > numeric and non-develop < numeric
@when('@:0.90,0.94:')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DCMAKE_CXX_STANDARD=17',
'-DLBANN_WITH_CNPY=%s' % ('+numpy' in spec),
'-DLBANN_DETERMINISTIC:BOOL=%s' % ('+deterministic' in spec),
'-DLBANN_WITH_HWLOC=%s' % ('+hwloc' in spec),
'-DLBANN_WITH_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DLBANN_WITH_BOOST:BOOL=%s' % ('+boost' in spec),
'-DLBANN_WITH_CONDUIT:BOOL=%s' % ('+conduit' in spec),
'-DLBANN_WITH_NVSHMEM:BOOL=%s' % ('+nvshmem' in spec),
'-DLBANN_WITH_FFT:BOOL=%s' % ('+fft' in spec),
'-DLBANN_WITH_ONEDNN:BOOL=%s' % ('+onednn' in spec),
'-DLBANN_WITH_EMBEDDED_PYTHON:BOOL=%s' % ('+python' in spec),
'-DLBANN_WITH_PYTHON_FRONTEND:BOOL=%s' % ('+pfe' in spec),
'-DLBANN_WITH_TBINF=OFF',
'-DLBANN_WITH_UNIT_TESTING:BOOL=%s' % (self.run_tests),
'-DLBANN_WITH_VISION:BOOL=%s' % ('+vision' in spec),
'-DLBANN_WITH_VTUNE:BOOL=%s' % ('+vtune' in spec),
'-DLBANN_DATATYPE={0}'.format(spec.variants['dtype'].value),
'-DCEREAL_DIR={0}'.format(spec['cereal'].prefix),
# protobuf is included by py-protobuf+cpp
'-DProtobuf_DIR={0}'.format(spec['protobuf'].prefix),
'-Dprotobuf_MODULE_COMPATIBLE=ON'])
if '+cuda' in spec:
if spec.satisfies('^[email protected]:'):
args.append('-DCMAKE_CUDA_STANDARD=17')
else:
args.append('-DCMAKE_CUDA_STANDARD=14')
if spec.satisfies('@:0.90') or spec.satisfies('@0.95:'):
args.append(
'-DHydrogen_DIR={0}/CMake/hydrogen'.format(
spec['hydrogen'].prefix))
elif spec.satisfies('@0.94'):
args.append(
'-DElemental_DIR={0}/CMake/elemental'.format(
spec['elemental'].prefix))
if spec.satisfies('@0.94:0.98.2'):
args.append('-DLBANN_WITH_NCCL:BOOL=%s' %
('+cuda +nccl' in spec))
if '+vtune' in spec:
args.append('-DVTUNE_DIR={0}'.format(spec['vtune'].prefix))
if '+al' in spec:
args.append('-DAluminum_DIR={0}'.format(spec['aluminum'].prefix))
if '+conduit' in spec:
args.append('-DConduit_DIR={0}'.format(spec['conduit'].prefix))
# Add support for OpenMP with external (Brew) clang
if spec.satisfies('%clang platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cuda' in spec:
args.append(
'-DCUDA_TOOLKIT_ROOT_DIR={0}'.format(
spec['cuda'].prefix))
args.append(
'-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if spec.satisfies('@0.94:0.98.2'):
if spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
if '+nccl' in spec:
args.append(
'-DNCCL_DIR={0}'.format(
spec['nccl'].prefix))
args.append(
'-DLBANN_WITH_NVPROF:BOOL=%s' % ('+nvprof' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.100:'):
args.append(
'-DLBANN_WITH_DIHYDROGEN:BOOL=%s' % ('+dihydrogen' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.101:'):
args.append(
'-DLBANN_WITH_DISTCONV:BOOL=%s' % ('+distconv' in spec))
if '+rocm' in spec:
args.extend([
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix),
'-DHIP_CXX_COMPILER={0}'.format(self.spec['hip'].hipcc)])
archs = self.spec.variants['amdgpu_target'].value
if archs != 'none':
arch_str = ",".join(archs)
args.append(
'-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
' -g -fsized-deallocation -fPIC -std=c++17'.format(arch_str)
)
return args
@when('@0.91:0.93')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DWITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DWITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DELEMENTAL_USE_CUBLAS:BOOL=%s' % (
'+cublas' in spec['elemental']),
'-DWITH_TBINF=OFF',
'-DWITH_VTUNE=OFF',
'-DElemental_DIR={0}'.format(spec['elemental'].prefix),
'-DELEMENTAL_MATH_LIBS={0}'.format(
spec['elemental'].libs),
'-DVERBOSE=0',
'-DLBANN_HOME=.'])
if spec.variants['dtype'].value == 'float':
args.append('-DDATATYPE=4')
elif spec.variants['dtype'].value == 'double':
args.append('-DDATATYPE=8')
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cudnn' in spec:
args.append('-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if '+cub' in spec and spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
return args
|
the-stack_106_17875
|
#!/usr/bin/env python
# Part of the psychopy_ext library
# Copyright 2010-2016 Jonas Kubilius
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any later version.
"""
A wrapper of matplotlib for producing pretty plots by default. As `pandas`
evolves, some of these improvements will hopefully be merged into it.
Usage::
import plot
plt = plot.Plot(nrows_ncols=(1,2))
plt.plot(data) # plots data on the first subplot
plt.plot(data2) # plots data on the second subplot
plt.show()
TO-DO:
- factorplot with:
- predefined CIs
- no points
- fill_between
- tsplot improvements
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.stats
import pandas
import pandas.tools.plotting # for rcParams
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
try:
import seaborn as sns # hope you have it
_has_seaborn = True
except: # ok, stick to your ugly matplotlib then
# but I'm still gonna improve it using the ggplot style
# from https://gist.github.com/huyng/816622
# inspiration from mpltools
rc_params = pandas.tools.plotting.mpl_stylesheet
rc_params['interactive'] = False # doesn't display otherwise
plt.rcParams.update(rc_params)
from psychopy_ext import stats, utils
def plot_ci(df, what=['Line2D'], hue=None, ax=None):
"""
Add confidence intervals to a plot.
.. note: Experimental
:Args:
df (pandas.DataFrame)
Input data
:Kwargs:
- what ({'Line2D', anything else})
Which plot elements should be enhanced with confidence intervals.
- hue (str or None, default: None)
Whether there is grouping by hue (seaborn's convention) in data or not.
"""
# lines = sns.plt.gca().get_lines()
children = sns.plt.gca().get_children() if ax is None else ax.get_children()
colors = []
x = []
for child in children:
spl = str(child).split('(')[0]
if spl in what:
if spl == 'Line2D':
if child.get_color() not in colors:
colors.append(child.get_color())
x.append(child.get_xdata())
else:
colors.append('.15')
x.append((child.get_x(), child.get_x()))
if hue is not None:
for kind, color in zip(df[hue].unique(), colors):
sel = df[df[hue]==kind]
for r, (rowno, row) in enumerate(sel.iterrows()):
sns.plt.plot([r,r], [row.ci_low, row.ci_high], color=color,
lw=sns.mpl.rcParams['lines.linewidth']*1.8)
else:
for r, (rowno, row) in enumerate(df.iterrows()):
sns.plt.plot([x[r][0],x[r][1]], [row.ci_low, row.ci_high], color=colors[0],
lw=sns.mpl.rcParams['lines.linewidth']*1.8)
# def draw_sig(agg, ax, popmean=0, errkind='sem', within=None):
# ax.text(ticks[rno], mn.max(), stats.get_star(p), ha='center')
def mdsplot(df, icons=None, zoom=None):
"""
Plot multidimensional scaling results.
:Args:
df
:Kwargs:
- icons ()
- zoom (int or None, default: None)
"""
sns.set_style('white')
g = sns.FacetGrid(df, col='layer', size=9, #col_wrap=4, size=2,
sharex=False, sharey=False, aspect=1)
g.map(_mdsplot, 'x', 'y', color='white', icons=icons, zoom=zoom)
def _mdsplot(x, y, icons=None, zoom=None, **kwargs):
ax = sns.plt.gca()
x_inches = sns.plt.gcf().get_size_inches()[0]
x_range = np.ptp(x)
for imname, xi, yi in zip(icons, x, y):
if isinstance(imname, (str, unicode)):
im = utils.load_image(imname, flatten=False, keep_alpha=True)
else:
im = imname
if zoom is None:
zoom = max(im.shape[:2]) / 2000.
# zoom = .1 #max(im.shape[:2]) / 3000. * x_inches/3.
imagebox = OffsetImage(im, zoom=zoom)
ab = AnnotationBbox(imagebox, (xi, yi),
xybox=(0,0), #(20., -20.),
xycoords='data',
boxcoords="offset points",
frameon=False
)
ax.add_artist(ab)
ax.scatter(x, y, **kwargs)
ax.axis('off')
def tsplot(data, x=None, unit=None, hue=None, y=None, palette=None,
err_style='ci_band', ci=95., interpolate=True, color=None,
estimator=np.mean, n_boot=1000, err_palette=None,
err_kws=None, legend=True, ax=None, **kwargs):
"""
A poor-man's reimplementations of Seaborn's tsplot that is more reliable
but does not have all options working yet.
.. warning: Not fully working.
"""
def bootstrap_resample(r):
if n_boot == 0 or n_boot is None:
return (np.nan, np.nan)
else:
return stats.bootstrap_resample(r, ci=ci, niter=n_boot)
if isinstance(hue, (str, unicode)): hue = [hue]
if unit is None:
agg = data
else:
agg = data.groupby([x]+hue+[unit])[y].mean().reset_index()
agg = agg.pivot_table(index=x, columns=hue, values=y,
aggfunc=[estimator, bootstrap_resample])
if ax is None: ax = sns.plt.subplot(111)
if 'lw' not in kwargs:
kwargs['lw'] = sns.mpl.rcParams['lines.linewidth']*1.8
if hue is None:
ci_low = map(lambda x: x[0], agg['bootstrap_resample'])
ci_high = map(lambda x: x[1], agg['bootstrap_resample'])
ax.fill_between(agg.index, ci_low, ci_high, alpha=.5)
ax.plot(agg.index, agg['mean'], **kwargs)
else:
if color is None: color = sns.color_palette(palette, n_colors=len(data.groupby(hue).groups))
for n, col in enumerate(agg['mean']):
c = color[n % len(color)]
ci_low = map(lambda x: x[0], agg[('bootstrap_resample', col)])
ci_high = map(lambda x: x[1], agg[('bootstrap_resample', col)])
ax.fill_between(agg.index, ci_low, ci_high, alpha=.5, color=c)
ax.plot(agg.index, agg[('mean', col)], c=c, label=col, **kwargs)
if legend:
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1.1, .5))
ax.set_xlabel(x)
ax.set_ylabel(y)
|
the-stack_106_17877
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyCreateParameters(Model):
"""The key create parameters.
All required parameters must be populated in order to send to Azure.
:param kty: Required. The type of key to create. For valid values, see
JsonWebKeyType. Possible values include: 'EC', 'EC-HSM', 'RSA', 'RSA-HSM',
'oct'
:type kty: str or ~azure.keyvault.models.JsonWebKeyType
:param key_size: The key size in bits. For example: 2048, 3072, or 4096
for RSA.
:type key_size: int
:param key_ops:
:type key_ops: list[str or ~azure.keyvault.models.JsonWebKeyOperation]
:param key_attributes:
:type key_attributes: ~azure.keyvault.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param curve: Elliptic curve name. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384', 'P-521',
'P-256K'
:type curve: str or ~azure.keyvault.models.JsonWebKeyCurveName
"""
_validation = {
'kty': {'required': True, 'min_length': 1},
}
_attribute_map = {
'kty': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'key_ops': {'key': 'key_ops', 'type': '[str]'},
'key_attributes': {'key': 'attributes', 'type': 'KeyAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'curve': {'key': 'crv', 'type': 'str'},
}
def __init__(self, *, kty, key_size: int=None, key_ops=None, key_attributes=None, tags=None, curve=None, **kwargs) -> None:
super(KeyCreateParameters, self).__init__(**kwargs)
self.kty = kty
self.key_size = key_size
self.key_ops = key_ops
self.key_attributes = key_attributes
self.tags = tags
self.curve = curve
|
the-stack_106_17879
|
import uuid
# from cms.utils.copy_plugins import copy_plugins_to
from django.urls import reverse
from django.db import models, transaction
from django_extensions.db.fields import RandomCharField
from polymorphic.models import PolymorphicModel
# from cms.models.fields import PlaceholderField
from src.apps.core.managers.IterativeDeletionManagers import (
IterativeDeletion_Manager,
PolyIterativeDeletion_Manager,
)
from src.apps.core.models.PublicationModels import (
PolyPublicationChild,
PublicationChild,
)
'''***********************************************************
QuizQuestion model
***********************************************************'''
class QuizQuestion(models.Model):
class Meta:
app_label = 'core'
ordering = ('position',)
verbose_name = 'Quiz-Question'
verbose_name_plural = 'Quiz-Questions'
# define the question types
MULTI_CHOICE = "multi-choice"
MULTI_SELECT = "multi-select"
QUESTION_TYPES = (
# question types
(MULTI_CHOICE, "Multiple Choice"),
(MULTI_SELECT, "Multiple Selection"),
)
question_type = models.CharField(max_length=25, choices=QUESTION_TYPES, default=MULTI_CHOICE)
ref_id = models.UUIDField(default=uuid.uuid4, editable=False)
position = models.PositiveIntegerField(default=0, blank=False, null=False)
question_text = models.TextField(default="", blank=False,null=False)
quiz = models.ForeignKey(
'core.QuizSection',
related_name="questions",
blank=False,
default=None,
help_text=u'Please specify a Quiz Section to map this question to.',
null=False,
on_delete=models.CASCADE,
)
def absolute_url(self):
return reverse('core:quiz_question_detail', kwargs={
'module_slug': self.quiz.topic.module.slug,
'topic_slug': self.quiz.topic.slug,
'quiz_slug': self.quiz.slug,
'ref_id': self.ref_id,
})
def __unicode__(self):
# return self.name
return "%s:%s" %(self.quiz.name, self.ref_id)
# needed to show the name in the admin interface (otherwise will show 'Module Object' for all entries)
def __str__(self):
return "%s:%s" % (self.quiz.name, self.ref_id)
'''***********************************************************
QuizAnswer model
***********************************************************'''
class QuizAnswer(models.Model):
class Meta:
verbose_name = 'Quiz-Answer'
verbose_name_plural = 'Quiz-Answers'
ref_id = models.UUIDField(default=uuid.uuid4, editable=False)
position = models.PositiveIntegerField(default=0, blank=False, null=False)
answer_text = models.TextField(default="", blank=False, null=False)
is_correct = models.BooleanField()
question = models.ForeignKey(
'core.QuizQuestion',
related_name="answers",
blank=False,
default=None,
help_text=u'Please specify a Quiz Question to map this answer to.',
null=False,
on_delete=models.CASCADE,
)
def __unicode__(self):
# return self.name
return "%s:%s:answer" %(self.question.quiz.name, self.ref_id)
# needed to show the name in the admin interface (otherwise will show 'Module Object' for all entries)
def __str__(self):
return "%s:%s:answer" % (self.question.quiz.name, self.ref_id)
|
the-stack_106_17880
|
from datetime import datetime
import sys
from rich.console import Console
from rich.progress import Progress
from unsilence.Unsilence import Unsilence
from unsilence.command_line.ChoiceDialog import choice_dialog
from unsilence.command_line.ParseArguments import parse_arguments
from unsilence.command_line.PrettyTimeEstimate import format_timedelta, pretty_time_estimate
from unsilence.command_line.TerminalSupport import repair_console
def main():
"""
Entry Point if this script is run as a script instead of a library
:return: None
"""
try:
repair_console()
run()
except KeyboardInterrupt:
print("\nInterrupted")
repair_console()
sys.exit(0)
def run():
"""
Run the Console Interface for Unsilence
:return: None
"""
sys.tracebacklimit = 0
args = parse_arguments()
console = Console()
if args.debug:
sys.tracebacklimit = 1000
if args.output_file.exists() and not args.non_interactive_mode:
if not choice_dialog(console, "File already exists. Overwrite?", default=False):
return
args_dict = vars(args)
argument_list_for_silence_detect = [
"silence_level", "silence_time_threshold", "short_interval_threshold", "stretch_time"
]
argument_dict_for_silence_detect = {
key: args_dict[key] for key in argument_list_for_silence_detect if key in args_dict.keys()
}
argument_list_for_renderer = [
"audio_only", "audible_speed", "silent_speed", "audible_volume", "silent_volume",
"drop_corrupted_intervals", "threads"
]
argument_dict_for_renderer = {
key: args_dict[key] for key in argument_list_for_renderer if key in args_dict.keys()
}
progress = Progress()
continual = Unsilence(args.input_file)
with progress:
def update_task(current_task):
def handler(current_val, total):
progress.update(current_task, total=total, completed=current_val)
return handler
silence_detect_task = progress.add_task("Calculating Intervals...", total=1)
start_time = datetime.today()
continual.detect_silence(
on_silence_detect_progress_update=update_task(silence_detect_task),
**argument_dict_for_silence_detect
)
progress.stop()
progress.remove_task(silence_detect_task)
print()
estimated_time = continual.estimate_time(args.audible_speed, args.silent_speed)
console.print(pretty_time_estimate(estimated_time))
print()
if not args.non_interactive_mode:
if not choice_dialog(console, "Continue with these options?", default=True):
return
progress.start()
rendering_task = progress.add_task("Rendering Intervals...", total=1)
concat_task = progress.add_task("Combining Intervals...", total=1)
continual.render_media(
args.output_file,
on_render_progress_update=update_task(rendering_task),
on_concat_progress_update=update_task(concat_task),
**argument_dict_for_renderer
)
progress.stop()
time_passed = datetime.today() - start_time
time_passed_str = format_timedelta(time_passed.seconds)
console.print(f"\n[green]Finished in {time_passed_str}![\green] :tada:")
print()
|
the-stack_106_17882
|
"""
.. module:: runner
:platform: Unix, Windows
:synopsis: Runner for commands in virtualenv
"""
from __future__ import print_function
import tempfile
import shutil
import os
import subprocess
from contextlib import contextmanager
from virtualenvrunner.activateenv import ActivateEnv
from virtualenvrunner.utils import is_windows, get_exe_suffix, get_unicode
__copyright__ = 'Copyright (C) 2019, Nokia'
class RunnerInstallationFailed(Exception):
pass
class Runner(object):
""" The Runner class is a runner for commands in the virtualenv.
By default a temporary *virtualenv* is created to $(pwd)/.venv and
The user defined new or existing *virtualenv* can be used by
setting path to *virtualenv_dir* which points to the *virtualenv*
directory.
No packages are installed by default to the environment. The
requirements file can be given via *virtualenv_reqs*. It defines the
path to requirements file which are installed to the *virtualenv*
during the setup.
The Python interpreter can be defined by setting the argument
*virtualenv_pythonexe*. By default 'python' is used.
URL to PyPI can be altered via *pip_index_url*. The argument
*pip_index_url* is an URL to PyPI to be used by both pip and
:mod:`distutils`.
The command line *run* call can be changed via callable *run* argument.
The *run* must be a function similar to :func:`subprocess.check_call`
with *shell=True*. The *run* function has to be able to take at least
*env* keyword argument.
An example usage is shown below:
>>> from virtualenvrunner.runner import Runner
>>> pip_index_url='https://example.pypi.com/index/+simple'
>>> with open('requirements.txt', 'w') as f:
... f.write('crl.devutils')
...
>>> with Runner(virtualenv_reqs='requirements.txt',
... pip_index_url=pip_index_url) as runner:
... runner.run('crl -h')
...
Running virtualenv with interpreter ...
"""
virtualenv_bin = 'Scripts' if is_windows() else 'bin'
virtualenv_exe = 'virtualenv'
def __init__(self,
virtualenv_dir=None,
virtualenv_reqs=None,
virtualenv_reqs_upd=None,
virtualenv_pythonexe=None,
pip_index_url=None,
run=None):
""" Runner class for virtualenv.
Kwargs:
"""
self._virtualenv_dir = virtualenv_dir
self.virtualenv_reqs = virtualenv_reqs
if virtualenv_reqs_upd and virtualenv_reqs_upd.lower() == "true":
self.virtualenv_reqs_upd = (" --upgrade"
" --upgrade-strategy only-if-needed ")
else:
self.virtualenv_reqs_upd = ""
self._virtualenv_pythonexe = virtualenv_pythonexe
self.pip_index_url = pip_index_url
self._run = run or self.__run
self._activateenv = None
self._files = set()
self._new_virtualenv = False
self._save_freeze_path = None
print('virtualenv_pythonexe:{}'.format(virtualenv_pythonexe))
def __enter__(self):
self._setup_virtualenv()
return self
def __exit__(self, *args):
pass
def _setup_virtualenv(self):
""" The extended Runner classes may alter method *_setup_virtualenv*
for setting the virtualenv in the specific ways. Please note that this
is not a hook so the original *_setup_virtualenv* must be called in
order to guarantee the functionality.
"""
self._create_virtualenv_if_needed()
self._set_pydistutilscfg_if_needed()
self._activateenv = ActivateEnv(self.activate_this)
self._install_requirements_and_freeze_if_needed()
def set_save_freeze_path(self, save_freeze_path):
self._save_freeze_path = save_freeze_path
@property
def virtualenv_dir(self):
return self._virtualenv_dir or os.path.join(os.getcwd(), '.venv')
@property
def activate_this(self):
return os.path.join(self.virtualenv_dir,
self.virtualenv_bin,
'activate_this.py')
@property
def pydistutilscfg(self):
return os.path.join(
self.virtualenv_dir,
'{}pydistutils.cfg'.format('' if is_windows() else '.'))
@property
def requirements_log_file(self):
return os.path.join(
self.virtualenv_dir,
'{}virtualenvrunner_requirements.log'.format(
'' if is_windows() else '.'))
@property
def env(self):
""" Property *env* is :data:`os.environ` of
*virtualenv*.
"""
return self._activateenv.env
@property
def virtualenv_pythonexe(self):
return self._virtualenv_pythonexe or 'python' + get_exe_suffix()
def _create_virtualenv_if_needed(self):
if not os.path.isfile(self.activate_this):
self._create_virtualenv()
def _create_virtualenv(self):
self._run_in_install(
'{virtualenv_exe} --no-download -p {virtualenv_pythonexe} '
'{virtualenv_dir}'.format(
virtualenv_exe=self.virtualenv_exe,
virtualenv_pythonexe=self.virtualenv_pythonexe,
virtualenv_dir=self.virtualenv_dir))
self._new_virtualenv = True
def _set_pydistutilscfg_if_needed(self):
if self.pip_index_url and self.virtualenv_is_volatile:
self._set_pydistutilscfg()
@property
def virtualenv_is_volatile(self):
return self._new_virtualenv or self.virtualenv_reqs
def _set_pydistutilscfg(self):
with open(self.pydistutilscfg, 'w') as f:
f.write('[easy_install]\n'
'index_url={}\n'.format(self.pip_index_url))
@contextmanager
def _open_requirements_log_file(self):
with self._open_path_for_write_if_path(self.requirements_log_file):
yield None
@contextmanager
def _open_path_for_write_if_path(self, path, mode='a'):
if path is None:
yield None
else:
with self._open_path_for_write(path, mode):
yield None
@contextmanager
def _open_path_for_write(self, path, mode):
f = None
try:
with open(path, mode) as f:
self._files.add(f)
yield None
except IOError as file_err:
print("Error in {} file operation: Error #{} - {}".format(
path,
file_err.errno,
file_err.strerror))
if f is not None:
raise
yield None
finally:
if f in self._files:
self._files.remove(f)
def _install_requirements_and_freeze_if_needed(self):
if self.virtualenv_reqs and self.virtualenv_is_volatile:
self._pip_install()
self._pip_freeze_with_banner()
if self._save_freeze_path is not None:
self._save_pip_freeze_without_err()
def _pip_install(self):
with self._open_requirements_log_file():
self._run_in_install(
'pip install {req_update}-r {requirements}{index_arg}'.format(
requirements=self.virtualenv_reqs,
index_arg=(' -i {}'.format(self.pip_index_url)
if self.pip_index_url else ''),
req_update=self.virtualenv_reqs_upd),
env=self.env)
def _pip_freeze_with_banner(self):
with self._requirements_log_with_banner():
self._write_line('pip freeze:\n')
self._run_in_install('pip freeze', env=self.env)
def _save_pip_freeze_without_err(self):
with open(os.devnull, 'w') as devnull:
with self._open_path_for_write_if_path(self._save_freeze_path,
mode='w'):
self._run_in_install('pip freeze', stderr=devnull, env=self.env)
@contextmanager
def _requirements_log_with_banner(self):
with self._open_requirements_log_file():
with self._banner(20):
yield None
@contextmanager
def _banner(self, banner_length):
try:
self._write_line('\n{}\n'.format(banner_length * "#"))
yield None
finally:
self._write_line('{}\n'.format(banner_length * "#"))
def _run_in_install(self, cmd, stderr=subprocess.STDOUT, env=None):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=stderr,
shell=True,
env=env)
for line in self._lines_in_handle(proc.stdout):
self._write_line(get_unicode(line))
self._verify_status(cmd, proc)
@staticmethod
def _lines_in_handle(handle):
while True:
line = handle.readline()
if line in [b'', '']:
break
yield line
def _write_line(self, line):
for f in self._files:
f.write(line)
@staticmethod
def _verify_status(cmd, proc):
proc.communicate()
if proc.returncode:
raise RunnerInstallationFailed(
"Command execution of '{cmd}'"
" failed with exit status {returncode}".format(
cmd=cmd, returncode=proc.returncode))
@staticmethod
def __run(cmd, env=None, stdout=None):
return subprocess.check_call(cmd, shell=True, env=env, stdout=stdout)
def run(self, *args, **kwargs):
print('in runner run')
kwargscopy = kwargs.copy()
kwargscopy['env'] = self.env
return self._run(*args, **kwargscopy)
def remove_virtualenv(self):
"""Removes the virtualenv if it exists."""
shutil.rmtree(self.virtualenv_dir, ignore_errors=True)
class TmpVenvRunner(Runner):
""" This virtualenv runner is otherwise the same in functionality than
:class:`.Runner` but it uses temporaray virtualenv directory. This
directory is removed in *__exit__*.
"""
def __enter__(self):
self._tmp_virtualenv_dir = None
return super(TmpVenvRunner, self).__enter__() # pylint: disable=super-with-arguments
def __exit__(self, *args):
if self._tmp_virtualenv_dir:
shutil.rmtree(self._tmp_virtualenv_dir)
super(TmpVenvRunner, self).__exit__(*args) # pylint: disable=super-with-arguments
@property
def virtualenv_dir(self):
return self._virtualenv_dir or self.tmp_virtualenv_dir
@property
def tmp_virtualenv_dir(self):
if not self._tmp_virtualenv_dir:
self._tmp_virtualenv_dir = tempfile.mkdtemp(prefix='venv_')
return self._tmp_virtualenv_dir
class VerboseRunner(Runner):
"""This virtualenv runner is otherwise the same in functionality than
:class:`.Runner` but it prints the requirements installation log and
*pip freeze*.
"""
def _write_line(self, line):
super(VerboseRunner, self)._write_line(line) # pylint: disable=super-with-arguments
print(line, end='')
class ReadonlyBase(object):
@property
def virtualenv_is_volatile(self):
return self._new_virtualenv
class ReadonlyRunner(ReadonlyBase, Runner):
pass
class VerboseReadonlyRunner(ReadonlyBase, VerboseRunner):
pass
|
the-stack_106_17883
|
import platform
import stanza
from stanza.server import CoreNLPClient
print("OS = ", platform.system())
stanza.install_corenlp()
client = CoreNLPClient(port=8888)
client.start()
# Wait for server to start
client.ensure_alive()
# Get its PID
pid = client.server.pid
print(f"Process running on: {pid if pid else 'Cant find pid'}")
# client.stop()
client.stop()
# Make sure server has shut down
assert not client.server
|
the-stack_106_17885
|
'''
Main Script for FSI with Kratos Mutliphysics
This script is intended to be modified. Each solver can be imported and used as "BlackBox"
Chair of Structural Analysis, Technical University of Munich
All rights reserved
'''
'''
This example is based on the dissertation of Daniel Mok
"Partitionierte Lösungsansätze in der Strukturdynamik und der Fluid-Struktur-Interaktion"
Chapter 7.3 "Flexible Klappe in Kanalströmung mit Einschnürung"
'''
# ----- Importing the modules -----
import KratosMultiphysics
import KratosMultiphysics.MappingApplication as KratosMapping
import KratosMultiphysics.FluidDynamicsApplication as KratosFluidDynamics
import KratosMultiphysics.StructuralMechanicsApplication as KratosStructuralMechanics
# Import the "BlackBox" Solvers
from structural_mechanics_analysis import StructuralMechanicsAnalysis
from fluid_dynamics_analysis import FluidDynamicsAnalysis
import fsi_utilities # here auxiliary functions e.g. for relaxation are declared
fluid_model = KratosMultiphysics.Model()
structural_model = KratosMultiphysics.Model()
fluid_project_params_file_name = "ProjectParametersCFD.json"
with open(fluid_project_params_file_name,'r') as parameter_file:
parameters_fluid = KratosMultiphysics.Parameters(parameter_file.read())
structural_project_params_file_name = "ProjectParametersCSM.json"
with open(structural_project_params_file_name,'r') as parameter_file:
parameters_structure = KratosMultiphysics.Parameters(parameter_file.read())
'''
# --------------------------------------------------------
# ----- Setting up and initializing the Fluid Solver -----
# --------------------------------------------------------
'''
fluid_solver = FluidDynamicsAnalysis(fluid_model, parameters_fluid)
fluid_solver.Initialize()
fluid_model_part = fluid_model["MainModelPart"]
print("======================================================================")
print("||||||||||||||||||||||| SETTING UP FLUID DONE ||||||||||||||||||||||||")
print("======================================================================")
'''
# -------------------------------------------------------------
# ----- Setting up and initializing the Structural Solver -----
# -------------------------------------------------------------
'''
structural_solver = StructuralMechanicsAnalysis(structural_model, parameters_structure)
structural_solver.Initialize()
structural_model_part = structural_model["Structure"]
print("======================================================================")
print("||||||||||||||||| SETTING UP STRUCTURAL DYNAMICS DONE ||||||||||||||||")
print("======================================================================")
'''
# ------------------------------------------------------
# ----- Setting up the FSI-related functionalities -----
# ------------------------------------------------------
# '''
# ----- Setting up the time parameters -----
start_time = 0.0
end_time = 15.0
delta_time = 0.001
num_steps = int((end_time - start_time) / delta_time)
round_val = fsi_utilities.TimeRoundValue(delta_time)
time = start_time
step = 0
# ----- Setting up the FSI Parameters -----
# FSI parameters
max_iter = 10 # number of inner iterations (set to 1 for explicit coupling)
interface_epsilon = 1e-5 # interface residual (only needed for implicit coupling)
relaxation_coefficient = 0.125 # initial value
# ---------------
# ----- ALE -----
# ---------------
fluid_solver._GetSolver().GetMeshMotionSolver().SetEchoLevel(0) # Fix until the source of the prints is found
# -------------------
# ----- Mapping -----
# -------------------
parameter_file = open(fluid_project_params_file_name,'r') # Here the time information of the fluid solver is used (can be changed if desired)
mapper_params = KratosMultiphysics.Parameters( parameter_file.read())
project_parameters_mapper_1 = mapper_params["mapper_settings"][0]
project_parameters_mapper_2 = mapper_params["mapper_settings"][1]
project_parameters_mapper_3 = mapper_params["mapper_settings"][2]
mapper_1 = KratosMapping.MapperFactory.CreateMapper(structural_model_part,fluid_model_part, project_parameters_mapper_1)
mapper_2 = KratosMapping.MapperFactory.CreateMapper(structural_model_part,fluid_model_part, project_parameters_mapper_2)
mapper_3 = KratosMapping.MapperFactory.CreateMapper(structural_model_part,fluid_model_part, project_parameters_mapper_3)
def NeumannToStructure(mapper, flag):
mapper.InverseMap(KratosStructuralMechanics.POINT_LOAD, KratosMultiphysics.REACTION, flag)
def DisplacementToMesh(mapper):
mapper.Map(KratosMultiphysics.DISPLACEMENT, KratosMultiphysics.MESH_DISPLACEMENT)
print("======================================================================")
print("|||||||||||||||||||||||| SETTING UP FSI DONE |||||||||||||||||||||||||")
print("======================================================================")
file_writer = fsi_utilities.FileWriter("Mok_Results.dat", ["Time", "Disp_X", "Disp_Y", "Disp_Z", "Coupling_Iterations"])
tip_node = structural_model_part.GetNode(1)
# ----- Solving the problem (time integration) -----
while(time <= end_time):
new_time_fluid = fluid_solver._GetSolver().AdvanceInTime(time)
new_time_structure = structural_solver._GetSolver().AdvanceInTime(time)
fluid_solver._GetSolver().Predict()
structural_solver._GetSolver().Predict()
fluid_solver.InitializeSolutionStep()
structural_solver.InitializeSolutionStep()
time = time + delta_time
if abs(time-new_time_fluid) > 1e-12:
raise Exception("Fluid has wrong time!")
if abs(time-new_time_structure) > 1e-12:
raise Exception("Structure has wrong time!")
step += 1
print("\n--- Step =", step, "/", num_steps, "---")
print("--- Time =", round(time, round_val), "/", end_time, "---")
residual = 1
old_displacements = fsi_utilities.GetDisplacements(structural_model_part.GetSubModelPart("GENERIC_Beam").Nodes, 2)
num_inner_iter = 1
### Inner FSI Loop (executed once in case of explicit coupling)
for k in range(max_iter):
# Apply Dirichlet B.C.'s from structural solver to mesh solver
DisplacementToMesh(mapper_1)
DisplacementToMesh(mapper_2)
DisplacementToMesh(mapper_3)
# Mesh and Fluid are currently solved independently, since the ALE solver does not copy the mesh velocity
# Solve Mesh
fluid_solver._GetSolver().SolveSolutionStep()
# Apply Neumann B.C.'s from fluid solver to structural solver
NeumannToStructure(mapper_1, KratosMapping.Mapper.SWAP_SIGN | KratosMapping.Mapper.CONSERVATIVE)
NeumannToStructure(mapper_2, KratosMapping.Mapper.SWAP_SIGN | KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.CONSERVATIVE)
# # Solver Structure
structural_solver._GetSolver().SolveSolutionStep()
# Convergence Checking (only for implicit coupling)
if max_iter > 1:
displacements = fsi_utilities.GetDisplacements(structural_model_part.GetSubModelPart("GENERIC_Beam").Nodes, 2)
# Compute Residual
old_residual = residual
residual = fsi_utilities.CalculateResidual(displacements,old_displacements)
if (fsi_utilities.Norm(residual) <= interface_epsilon):
fsi_utilities.SetDisplacements(displacements,structural_model_part.GetSubModelPart("GENERIC_Beam").Nodes, 2)
print("******************************************************")
print("************ CONVERGENCE AT INTERFACE ACHIEVED *******")
print("******************************************************")
break # TODO check if this works bcs it is nested
else:
relaxation_coefficient = fsi_utilities.ComputeAitkenRelaxation(relaxation_coefficient, residual, old_residual, k)
relaxed_displacements = fsi_utilities.CalculateRelaxation(relaxation_coefficient, old_displacements, residual)
old_displacements = relaxed_displacements
fsi_utilities.SetDisplacements(relaxed_displacements, structural_model_part.GetSubModelPart("GENERIC_Beam").Nodes, 2)
num_inner_iter += 1
if (k+1 >= max_iter):
print("######################################################")
print("##### CONVERGENCE AT INTERFACE WAS NOT ACHIEVED ######")
print("######################################################")
print("==========================================================")
print("COUPLING RESIDUAL = ", fsi_utilities.Norm(residual))
print("COUPLING ITERATION = ", k+1, "/", max_iter)
print("RELAXATION COEFFICIENT = ",relaxation_coefficient)
print("==========================================================")
fluid_solver.FinalizeSolutionStep()
structural_solver.FinalizeSolutionStep()
fluid_solver.OutputSolutionStep()
structural_solver.OutputSolutionStep()
disp = tip_node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT)
file_writer.WriteToFile([time, disp[0], disp[1], disp[2], num_inner_iter])
# TIME LOOP END
fluid_solver.Finalize()
structural_solver.Finalize()
file_writer.CloseFile()
|
the-stack_106_17887
|
#!/usr/bin/env python3
from core_symbol import CORE_SYMBOL
from Cluster import Cluster
from Cluster import NamedAccounts
from WalletMgr import WalletMgr
from Node import Node
from TestHelper import TestHelper
from testUtils import Utils
import testUtils
import time
import decimal
import math
import re
###############################################################
# nodeos_under_min_avail_ram
#
# Sets up 4 producing nodes using --chain-state-db-guard-size-mb and --chain-state-db-size-mb to verify that nodeos will
# shutdown safely when --chain-state-db-guard-size-mb is reached and restarts the shutdown nodes, with a higher
# --chain-state-db-size-mb size, to verify that the node can restart and continue till the guard is reached again. The
# test both verifies all nodes going down and 1 node at a time.
#
###############################################################
Print=Utils.Print
errorExit=Utils.errorExit
args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
killAll=args.clean_run
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="cleos"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
minRAMFlag="--chain-state-db-guard-size-mb"
minRAMValue=1002
maxRAMFlag="--chain-state-db-size-mb"
maxRAMValue=1010
extraNodeosArgs=" %s %d %s %d --http-max-response-time-ms 990000 --plugin eosio::trace_api_plugin --trace-no-abis " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue)
if cluster.launch(onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False:
Utils.cmdError("launcher")
errorExit("Failed to stand up eos cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
Print("creating accounts")
namedAccounts=NamedAccounts(cluster,10)
accounts=namedAccounts.accounts
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount])
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
nodes=[]
nodes.append(cluster.getNode(0))
nodes.append(cluster.getNode(1))
nodes.append(cluster.getNode(2))
nodes.append(cluster.getNode(3))
numNodes=len(nodes)
for account in accounts:
walletMgr.importKey(account, testWallet)
# create accounts via eosio as otherwise a bid is needed
for account in accounts:
Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name))
trans=nodes[0].createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=False, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True)
transferAmount="70000000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name))
nodes[0].transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer")
trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True)
contractAccount=cluster.createAccountKeys(1)[0]
contractAccount.name="contracttest"
walletMgr.importKey(contractAccount, testWallet)
Print("Create new account %s via %s" % (contractAccount.name, cluster.eosioAccount.name))
trans=nodes[0].createInitializeAccount(contractAccount, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=False, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True)
transferAmount="90000000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, contractAccount.name))
nodes[0].transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer")
trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True)
contractDir="unittests/test-contracts/integration_test"
wasmFile="integration_test.wasm"
abiFile="integration_test.abi"
Print("Publish contract")
trans=nodes[0].publishContract(contractAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True)
if trans is None:
Utils.cmdError("%s set contract %s" % (ClientName, contractAccount.name))
errorExit("Failed to publish contract.")
contract=contractAccount.name
Print("push create action to %s contract" % (contract))
action="store"
numAmount=5000
keepProcessing=True
count=0
while keepProcessing:
numAmount+=1
timeOutCount=0
for fromIndex in range(namedAccounts.numAccounts):
count+=1
toIndex=fromIndex+1
if toIndex==namedAccounts.numAccounts:
toIndex=0
fromAccount=accounts[fromIndex]
toAccount=accounts[toIndex]
data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount)
opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name)
try:
trans=nodes[count % numNodes].pushMessage(contract, action, data, opts)
if trans is None or not trans[0]:
timeOutCount+=1
if timeOutCount>=3:
Print("Failed to push create action to eosio contract for %d consecutive times, looks like nodeos already exited." % (timeOutCount))
keepProcessing=False
break
Print("Failed to push create action to eosio contract. sleep for 5 seconds")
count-=1 # failed attempt shouldn't be counted
time.sleep(5)
else:
timeOutCount=0
time.sleep(1)
except TypeError as ex:
keepProcessing=False
break
#spread the actions to all accounts, to use each accounts tps bandwidth
fromIndexStart=fromIndex+1 if fromIndex+1<namedAccounts.numAccounts else 0
# min and max are subjective, just assigned to make sure that many small changes in nodeos don't
# result in the test not correctly validating behavior
if count < 5 or count > 20:
strMsg="little" if count < 20 else "much"
Utils.cmdError("Was able to send %d store actions which was too %s" % (count, strMsg))
errorExit("Incorrect number of store actions sent")
# Make sure all the nodes are shutdown (may take a little while for this to happen, so making multiple passes)
count=0
while True:
allDone=True
for node in nodes:
if node.verifyAlive():
allDone=False
if allDone:
break
count+=1
if count>12:
Utils.cmdError("All Nodes should have died")
errorExit("Failure - All Nodes should have died")
time.sleep(5)
for i in range(numNodes):
f = open(Utils.getNodeDataDir(i) + "/stderr.txt")
contents = f.read()
if contents.find("database chain::guard_exception") == -1:
errorExit("Node%d is expected to exit because of database guard_exception, but was not." % (i))
Print("all nodes exited with expected reason database_guard_exception")
Print("relaunch nodes with new capacity")
addSwapFlags={}
maxRAMValue+=2
currentMinimumMaxRAM=maxRAMValue
enabledStaleProduction=False
for i in range(numNodes):
addSwapFlags[maxRAMFlag]=str(maxRAMValue)
#addSwapFlags["--max-irreversible-block-age"]=str(-1)
nodeIndex=numNodes-i-1
if not enabledStaleProduction:
addSwapFlags["--enable-stale-production"]="" # just enable stale production for the first node
enabledStaleProduction=True
if not nodes[nodeIndex].relaunch("", newChain=False, addSwapFlags=addSwapFlags):
Utils.cmdError("Failed to restart node0 with new capacity %s" % (maxRAMValue))
errorExit("Failure - Node should have restarted")
addSwapFlags={}
maxRAMValue=currentMinimumMaxRAM+30
time.sleep(20)
for i in range(numNodes):
if not nodes[i].verifyAlive():
Utils.cmdError("Node %d should be alive" % (i))
errorExit("Failure - All Nodes should be alive")
# get all the nodes to get info, so reported status (on error) reflects their current state
Print("push more actions to %s contract" % (contract))
cluster.getInfos()
action="store"
keepProcessing=True
count=0
while keepProcessing and count < 40:
Print("Send %s" % (action))
numAmount+=1
for fromIndexOffset in range(namedAccounts.numAccounts):
count+=1
fromIndex=fromIndexStart+fromIndexOffset
if fromIndex>=namedAccounts.numAccounts:
fromIndex-=namedAccounts.numAccounts
toIndex=fromIndex+1
if toIndex==namedAccounts.numAccounts:
toIndex=0
fromAccount=accounts[fromIndex]
toAccount=accounts[toIndex]
data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount)
opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name)
try:
trans=nodes[count % numNodes].pushMessage(contract, action, data, opts)
if trans is None or not trans[0]:
Print("Failed to push create action to eosio contract. sleep for 60 seconds")
time.sleep(60)
time.sleep(1)
except TypeError as ex:
Print("Failed to send %s" % (action))
if not nodes[len(nodes)-1].verifyAlive():
keepProcessing=False
break
if keepProcessing:
Utils.cmdError("node[%d] never shutdown" % (numNodes-1))
errorExit("Failure - Node should be shutdown")
for i in range(numNodes):
# only the last node should be dead
if not nodes[i].verifyAlive() and i<numNodes-1:
Utils.cmdError("Node %d should be alive" % (i))
errorExit("Failure - Node should be alive")
Print("relaunch node with even more capacity")
addSwapFlags={}
time.sleep(10)
maxRAMValue=currentMinimumMaxRAM+5
currentMinimumMaxRAM=maxRAMValue
addSwapFlags[maxRAMFlag]=str(maxRAMValue)
if not nodes[len(nodes)-1].relaunch("", newChain=False, addSwapFlags=addSwapFlags):
Utils.cmdError("Failed to restart node %d with new capacity %s" % (numNodes-1, maxRAMValue))
errorExit("Failure - Node should have restarted")
addSwapFlags={}
time.sleep(10)
for node in nodes:
if not node.verifyAlive():
Utils.cmdError("All Nodes should be alive")
errorExit("Failure - All Nodes should be alive")
time.sleep(20)
Print("Send 1 more action to every node")
numAmount+=1
for fromIndexOffset in range(namedAccounts.numAccounts):
# just sending one node to each
if fromIndexOffset>=len(nodes):
break
fromIndex=fromIndexStart+fromIndexOffset
if fromIndex>=namedAccounts.numAccounts:
fromIndex-=namedAccounts.numAccounts
toIndex=fromIndex+1
if toIndex==namedAccounts.numAccounts:
toIndex=0
fromAccount=accounts[fromIndex]
toAccount=accounts[toIndex]
node=nodes[fromIndexOffset]
data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount)
opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name)
try:
trans=node.pushMessage(contract, action, data, opts)
if trans is None or not trans[0]:
Print("Failed to push create action to eosio contract. sleep for 60 seconds")
time.sleep(60)
continue
time.sleep(1)
except TypeError as ex:
Utils.cmdError("Failed to send %s action to node %d" % (fromAccount, fromIndexOffset, action))
errorExit("Failure - send %s action should have succeeded" % (action))
time.sleep(10)
Print("Check nodes are alive")
for node in nodes:
if not node.verifyAlive():
Utils.cmdError("All Nodes should be alive")
errorExit("Failure - All Nodes should be alive")
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
exitCode = 0 if testSuccessful else 1
exit(exitCode)
|
the-stack_106_17888
|
import io
import json
import logging
from ssl import SSLContext
from typing import Any, AsyncGenerator, Dict, Optional, Tuple, Type, Union
import aiohttp
from aiohttp.client_exceptions import ClientResponseError
from aiohttp.client_reqrep import Fingerprint
from aiohttp.helpers import BasicAuth
from aiohttp.typedefs import LooseCookies, LooseHeaders
from graphql import DocumentNode, ExecutionResult, print_ast
from ..utils import extract_files
from .async_transport import AsyncTransport
from .exceptions import (
TransportAlreadyConnected,
TransportClosed,
TransportProtocolError,
TransportServerError,
)
log = logging.getLogger(__name__)
class AIOHTTPTransport(AsyncTransport):
""":ref:`Async Transport <async_transports>` to execute GraphQL queries
on remote servers with an HTTP connection.
This transport use the aiohttp library with asyncio.
"""
file_classes: Tuple[Type[Any], ...] = (
io.IOBase,
aiohttp.StreamReader,
AsyncGenerator,
)
def __init__(
self,
url: str,
headers: Optional[LooseHeaders] = None,
cookies: Optional[LooseCookies] = None,
auth: Optional[BasicAuth] = None,
ssl: Union[SSLContext, bool, Fingerprint] = False,
timeout: Optional[int] = None,
client_session_args: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the transport with the given aiohttp parameters.
:param url: The GraphQL server URL. Example: 'https://server.com:PORT/path'.
:param headers: Dict of HTTP Headers.
:param cookies: Dict of HTTP cookies.
:param auth: BasicAuth object to enable Basic HTTP auth if needed
:param ssl: ssl_context of the connection. Use ssl=False to disable encryption
:param client_session_args: Dict of extra args passed to
`aiohttp.ClientSession`_
.. _aiohttp.ClientSession:
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession
"""
self.url: str = url
self.headers: Optional[LooseHeaders] = headers
self.cookies: Optional[LooseCookies] = cookies
self.auth: Optional[BasicAuth] = auth
self.ssl: Union[SSLContext, bool, Fingerprint] = ssl
self.timeout: Optional[int] = timeout
self.client_session_args = client_session_args
self.session: Optional[aiohttp.ClientSession] = None
async def connect(self) -> None:
"""Coroutine which will create an aiohttp ClientSession() as self.session.
Don't call this coroutine directly on the transport, instead use
:code:`async with` on the client and this coroutine will be executed
to create the session.
Should be cleaned with a call to the close coroutine.
"""
if self.session is None:
client_session_args: Dict[str, Any] = {
"cookies": self.cookies,
"headers": self.headers,
"auth": self.auth,
}
if self.timeout is not None:
client_session_args["timeout"] = aiohttp.ClientTimeout(
total=self.timeout
)
# Adding custom parameters passed from init
if self.client_session_args:
client_session_args.update(self.client_session_args) # type: ignore
self.session = aiohttp.ClientSession(**client_session_args)
else:
raise TransportAlreadyConnected("Transport is already connected")
async def close(self) -> None:
"""Coroutine which will close the aiohttp session.
Don't call this coroutine directly on the transport, instead use
:code:`async with` on the client and this coroutine will be executed
when you exit the async context manager.
"""
if self.session is not None:
await self.session.close()
self.session = None
async def execute(
self,
document: DocumentNode,
variable_values: Optional[Dict[str, Any]] = None,
operation_name: Optional[str] = None,
extra_args: Dict[str, Any] = None,
upload_files: bool = False,
) -> ExecutionResult:
"""Execute the provided document AST against the configured remote server
using the current session.
This uses the aiohttp library to perform a HTTP POST request asynchronously
to the remote server.
Don't call this coroutine directly on the transport, instead use
:code:`execute` on a client or a session.
:param document: the parsed GraphQL request
:param variable_values: An optional Dict of variable values
:param operation_name: An optional Operation name for the request
:param extra_args: additional arguments to send to the aiohttp post method
:param upload_files: Set to True if you want to put files in the variable values
:returns: an ExecutionResult object.
"""
query_str = print_ast(document)
payload: Dict[str, Any] = {
"query": query_str,
}
if operation_name:
payload["operationName"] = operation_name
if upload_files:
# If the upload_files flag is set, then we need variable_values
assert variable_values is not None
# If we upload files, we will extract the files present in the
# variable_values dict and replace them by null values
nulled_variable_values, files = extract_files(
variables=variable_values, file_classes=self.file_classes,
)
# Save the nulled variable values in the payload
payload["variables"] = nulled_variable_values
# Prepare aiohttp to send multipart-encoded data
data = aiohttp.FormData()
# Generate the file map
# path is nested in a list because the spec allows multiple pointers
# to the same file. But we don't support that.
# Will generate something like {"0": ["variables.file"]}
file_map = {str(i): [path] for i, path in enumerate(files)}
# Enumerate the file streams
# Will generate something like {'0': <_io.BufferedReader ...>}
file_streams = {str(i): files[path] for i, path in enumerate(files)}
# Add the payload to the operations field
operations_str = json.dumps(payload)
log.debug("operations %s", operations_str)
data.add_field(
"operations", operations_str, content_type="application/json"
)
# Add the file map field
file_map_str = json.dumps(file_map)
log.debug("file_map %s", file_map_str)
data.add_field("map", file_map_str, content_type="application/json")
# Add the extracted files as remaining fields
for k, v in file_streams.items():
data.add_field(k, v, filename=getattr(v, "name", k))
post_args: Dict[str, Any] = {"data": data}
else:
if variable_values:
payload["variables"] = variable_values
if log.isEnabledFor(logging.INFO):
log.info(">>> %s", json.dumps(payload))
post_args = {"json": payload}
# Pass post_args to aiohttp post method
if extra_args:
post_args.update(extra_args)
if self.session is None:
raise TransportClosed("Transport is not connected")
async with self.session.post(self.url, ssl=self.ssl, **post_args) as resp:
async def raise_response_error(resp: aiohttp.ClientResponse, reason: str):
# We raise a TransportServerError if the status code is 400 or higher
# We raise a TransportProtocolError in the other cases
try:
# Raise a ClientResponseError if response status is 400 or higher
resp.raise_for_status()
except ClientResponseError as e:
raise TransportServerError(str(e), e.status) from e
result_text = await resp.text()
raise TransportProtocolError(
f"Server did not return a GraphQL result: "
f"{reason}: "
f"{result_text}"
)
try:
result = await resp.json()
if log.isEnabledFor(logging.INFO):
result_text = await resp.text()
log.info("<<< %s", result_text)
except Exception:
await raise_response_error(resp, "Not a JSON answer")
if "errors" not in result and "data" not in result:
await raise_response_error(resp, 'No "data" or "errors" keys in answer')
return ExecutionResult(
errors=result.get("errors"),
data=result.get("data"),
extensions=result.get("extensions"),
)
def subscribe(
self,
document: DocumentNode,
variable_values: Optional[Dict[str, Any]] = None,
operation_name: Optional[str] = None,
) -> AsyncGenerator[ExecutionResult, None]:
"""Subscribe is not supported on HTTP.
:meta private:
"""
raise NotImplementedError(" The HTTP transport does not support subscriptions")
|
the-stack_106_17890
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 10:56:25 2019
@author: Manuel Camargo
"""
import os
import subprocess
import types
import itertools
import platform as pl
import copy
import multiprocessing
from multiprocessing import Pool
from xml.dom import minidom
import time
import shutil
from lxml import etree
import xmltodict as xtd
import pandas as pd
import numpy as np
from operator import itemgetter
from tqdm import tqdm
import traceback
import utils.support as sup
from utils.support import timeit
import readers.log_reader as lr
import readers.log_splitter as ls
import analyzers.sim_evaluator as sim
from support_modules.writers import xes_writer as xes
from support_modules.writers import xml_writer as xml
from support_modules.writers.model_serialization import serialize_model
from extraction import parameter_extraction as par
from extraction import log_replayer as rpl
import opt_times.times_optimizer as to
import opt_structure.structure_optimizer as so
import opt_structure.structure_miner as sm
class Simod():
"""
Main class of the Simulation Models Discoverer
"""
class Decorators(object):
@classmethod
def safe_exec(cls, method):
"""
Decorator to safe execute methods and return the state
----------
method : Any method.
Returns
-------
dict : execution status
"""
def safety_check(*args, **kw):
is_safe = kw.get('is_safe', method.__name__.upper())
if is_safe:
try:
method(*args)
except Exception as e:
print(e)
traceback.print_exc()
is_safe = False
return is_safe
return safety_check
def __init__(self, settings):
"""constructor"""
self.settings = settings
self.log = types.SimpleNamespace()
self.log_train = types.SimpleNamespace()
self.log_test = types.SimpleNamespace()
self.sim_values = list()
self.response = dict()
# self.parameters = dict()
self.is_safe = True
self.output_file = sup.file_id(prefix='SE_')
def execute_pipeline(self, can=False) -> None:
exec_times = dict()
self.is_safe = self.read_inputs(
log_time=exec_times, is_safe=self.is_safe)
self.is_safe = self.temp_path_creation(
log_time=exec_times, is_safe=self.is_safe)
self.is_safe = self.mine_structure(
log_time=exec_times, is_safe=self.is_safe)
self.is_safe = self.replay_process(
log_time=exec_times, is_safe=self.is_safe)
self.is_safe = self.extract_parameters(
log_time=exec_times, is_safe=self.is_safe)
self.is_safe = self.simulate(
log_time=exec_times, is_safe=self.is_safe)
self.mannage_results()
self.save_times(exec_times, self.settings)
self.is_safe = self.export_canonical_model(is_safe=self.is_safe)
print("-- End of trial --")
@timeit(rec_name='READ_INPUTS')
@Decorators.safe_exec
def read_inputs(self, **kwargs) -> None:
# Event log reading
self.log = lr.LogReader(os.path.join(self.settings['input'],
self.settings['file']),
self.settings['read_options'])
# Time splitting 80-20
self.split_timeline(0.8,
self.settings['read_options']['one_timestamp'])
@timeit(rec_name='PATH_DEF')
@Decorators.safe_exec
def temp_path_creation(self, **kwargs) -> None:
# Output folder creation
if not os.path.exists(self.settings['output']):
os.makedirs(self.settings['output'])
os.makedirs(os.path.join(self.settings['output'], 'sim_data'))
# Create customized event-log for the external tools
xes.XesWriter(self.log_train, self.settings)
@timeit(rec_name='MINING_STRUCTURE')
@Decorators.safe_exec
def mine_structure(self, **kwargs) -> None:
print(self.settings)
structure_miner = sm.StructureMiner(self.settings, self.log_train)
structure_miner.execute_pipeline()
if structure_miner.is_safe:
self.bpmn = structure_miner.bpmn
self.process_graph = structure_miner.process_graph
else:
raise RuntimeError('Mining Structure error')
@timeit(rec_name='REPLAY_PROCESS')
@Decorators.safe_exec
def replay_process(self) -> None:
"""
Process replaying
"""
replayer = rpl.LogReplayer(self.process_graph,
self.log_train.get_traces(),
self.settings,
msg='reading conformant training traces')
self.process_stats = replayer.process_stats
self.conformant_traces = replayer.conformant_traces
@timeit(rec_name='EXTRACTION')
@Decorators.safe_exec
def extract_parameters(self, **kwargs) -> None:
print("-- Mining Simulation Parameters --")
p_extractor = par.ParameterMiner(self.log_train,
self.bpmn,
self.process_graph,
self.settings)
num_inst = len(pd.DataFrame(self.log_test).caseid.unique())
start_time = (pd.DataFrame(self.log_test)
.start_timestamp.min().strftime("%Y-%m-%dT%H:%M:%S.%f+00:00"))
p_extractor.extract_parameters(num_inst, start_time)
if p_extractor.is_safe:
self.process_stats = self.process_stats.merge(
p_extractor.resource_table[['resource', 'role']],
on='resource',
how='left')
# save parameters
self.parameters = copy.deepcopy(p_extractor.parameters)
# print parameters in xml bimp format
xml.print_parameters(os.path.join(
self.settings['output'],
self.settings['file'].split('.')[0]+'.bpmn'),
os.path.join(self.settings['output'],
self.settings['file'].split('.')[0]+'.bpmn'),
p_extractor.parameters)
else:
raise RuntimeError('Parameters extraction error')
@timeit(rec_name='SIMULATION_EVAL')
@Decorators.safe_exec
def simulate(self, **kwargs) -> None:
def pbar_async(p, msg):
pbar = tqdm(total=reps, desc=msg)
processed = 0
while not p.ready():
cprocesed = (reps - p._number_left)
if processed < cprocesed:
increment = cprocesed - processed
pbar.update(n=increment)
processed = cprocesed
time.sleep(1)
pbar.update(n=(reps - processed))
p.wait()
pbar.close()
reps = self.settings['repetitions']
cpu_count = multiprocessing.cpu_count()
w_count = reps if reps <= cpu_count else cpu_count
pool = Pool(processes=w_count)
# Simulate
args = [(self.settings, rep) for rep in range(reps)]
p = pool.map_async(self.execute_simulator, args)
pbar_async(p, 'simulating:')
# Read simulated logs
args = [(self.settings, rep) for rep in range(reps)]
p = pool.map_async(self.read_stats, args)
pbar_async(p, 'reading simulated logs:')
# Evaluate
args = [(self.settings, self.process_stats, log) for log in p.get()]
if len(self.log_test.caseid.unique()) > 1000:
pool.close()
results = [self.evaluate_logs(arg) for arg in tqdm(args, 'evaluating results:')]
# Save results
self.sim_values = list(itertools.chain(*results))
else:
p = pool.map_async(self.evaluate_logs, args)
pbar_async(p, 'evaluating results:')
pool.close()
# Save results
self.sim_values = list(itertools.chain(*p.get()))
@staticmethod
def read_stats(args):
def read(settings, rep):
"""Reads the simulation results stats
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
# message = 'Reading log repetition: ' + str(rep+1)
# print(message)
path = os.path.join(settings['output'], 'sim_data')
log_name = settings['file'].split('.')[0]+'_'+str(rep+1)+'.csv'
rep_results = pd.read_csv(os.path.join(path, log_name),
dtype={'caseid': object})
rep_results['caseid'] = 'Case' + rep_results['caseid']
rep_results['run_num'] = rep
rep_results['source'] = 'simulation'
rep_results.rename(columns={'resource': 'user'}, inplace=True)
rep_results['start_timestamp'] = pd.to_datetime(
rep_results['start_timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
rep_results['end_timestamp'] = pd.to_datetime(
rep_results['end_timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
return rep_results
return read(*args)
@staticmethod
def evaluate_logs(args):
def evaluate(settings, process_stats, sim_log):
"""Reads the simulation results stats
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
# print('Reading repetition:', (rep+1), sep=' ')
rep = (sim_log.iloc[0].run_num)
sim_values = list()
evaluator = sim.SimilarityEvaluator(
process_stats,
sim_log,
settings,
max_cases=1000)
metrics = [settings['sim_metric']]
if 'add_metrics' in settings.keys():
metrics = list(set(list(settings['add_metrics']) +
metrics))
for metric in metrics:
evaluator.measure_distance(metric)
sim_values.append({**{'run_num': rep}, **evaluator.similarity})
return sim_values
return evaluate(*args)
@staticmethod
def execute_simulator(args):
def sim_call(settings, rep):
"""Executes BIMP Simulations.
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
# message = 'Executing BIMP Simulations Repetition: ' + str(rep+1)
# print(message)
args = ['java', '-jar', settings['bimp_path'],
os.path.join(settings['output'],
settings['file'].split('.')[0]+'.bpmn'),
'-csv',
os.path.join(settings['output'], 'sim_data',
settings['file']
.split('.')[0]+'_'+str(rep+1)+'.csv')]
subprocess.run(args, check=True, stdout=subprocess.PIPE)
sim_call(*args)
@staticmethod
def get_traces(data, one_timestamp):
"""
returns the data splitted by caseid and ordered by start_timestamp
"""
cases = list(set([x['caseid'] for x in data]))
traces = list()
for case in cases:
order_key = 'end_timestamp' if one_timestamp else 'start_timestamp'
trace = sorted(
list(filter(lambda x: (x['caseid'] == case), data)),
key=itemgetter(order_key))
traces.append(trace)
return traces
def mannage_results(self) -> None:
self.sim_values = pd.DataFrame.from_records(self.sim_values)
self.sim_values['output'] = self.settings['output']
self.sim_values.to_csv(os.path.join(self.settings['output'],
self.output_file),
index=False)
@staticmethod
def save_times(times, settings):
times = [{**{'output': settings['output']}, **times}]
log_file = os.path.join('outputs', 'execution_times.csv')
if not os.path.exists(log_file):
open(log_file, 'w').close()
if os.path.getsize(log_file) > 0:
sup.create_csv_file(times, log_file, mode='a')
else:
sup.create_csv_file_header(times, log_file)
@Decorators.safe_exec
def export_canonical_model(self, **kwargs):
ns = {'qbp': "http://www.qbp-simulator.com/Schema201212"}
time_table = etree.tostring(self.parameters['time_table'],
pretty_print=True)
time_table = xtd.parse(time_table,
process_namespaces=True,
namespaces=ns)
self.parameters['time_table'] = time_table
self.parameters['discovery_parameters'] = self.filter_dic_params(
self.settings)
sup.create_json(self.parameters, os.path.join(
self.settings['output'],
self.settings['file'].split('.')[0]+'_canon.json'))
@staticmethod
def filter_dic_params(settings):
best_params = dict()
best_params['alg_manag'] = settings['alg_manag']
best_params['gate_management'] = settings['gate_management']
best_params['rp_similarity'] = str(settings['rp_similarity'])
# best structure mining parameters
if settings['mining_alg'] in ['sm1', 'sm3']:
best_params['epsilon'] = str(settings['epsilon'])
best_params['eta'] = str(settings['eta'])
elif settings['mining_alg'] == 'sm2':
best_params['concurrency'] = str(settings['concurrency'])
if settings['res_cal_met'] == 'default':
best_params['res_dtype'] = settings['res_dtype']
else:
best_params['res_support'] = str(settings['res_support'])
best_params['res_confidence'] = str(settings['res_confidence'])
if settings['arr_cal_met'] == 'default':
best_params['arr_dtype'] = settings['res_dtype']
else:
best_params['arr_support'] = str(settings['arr_support'])
best_params['arr_confidence'] = str(settings['arr_confidence'])
return best_params
# =============================================================================
# Support methods
# =============================================================================
def split_timeline(self, size: float, one_ts: bool) -> None:
"""
Split an event log dataframe by time to peform split-validation.
prefered method time splitting removing incomplete traces.
If the testing set is smaller than the 10% of the log size
the second method is sort by traces start and split taking the whole
traces no matter if they are contained in the timeframe or not
Parameters
----------
size : float, validation percentage.
one_ts : bool, Support only one timestamp.
"""
# Split log data
splitter = ls.LogSplitter(self.log.data)
train, test = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(self.log.data)
# Check size and change time splitting method if necesary
if len(test) < int(total_events*0.1):
train, test = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
test = pd.DataFrame(test)
train = pd.DataFrame(train)
self.log_test = (test.sort_values(key, ascending=True)
.reset_index(drop=True))
self.log_train = copy.deepcopy(self.log)
self.log_train.set_data(train.sort_values(key, ascending=True)
.reset_index(drop=True).to_dict('records'))
# =============================================================================
# Hyperparameter-optimizer
# =============================================================================
class DiscoveryOptimizer():
"""
Hyperparameter-optimizer class
"""
def __init__(self, settings):
"""constructor"""
self.settings = settings
self.best_params = dict()
self.log = types.SimpleNamespace()
self.log_train = types.SimpleNamespace()
self.log_test = types.SimpleNamespace()
if not os.path.exists('outputs'):
os.makedirs('outputs')
def execute_pipeline(self) -> None:
exec_times = dict()
self.read_inputs(log_time=exec_times)
output_file = sup.file_id(prefix='SE_')
print('############ Structure optimization ############')
# Structure optimization
structure_optimizer = so.StructureOptimizer(
{**self.settings['gl'], **self.settings['strc']},
copy.deepcopy(self.log_train))
structure_optimizer.execute_trials()
struc_model = structure_optimizer.best_output
best_parms = structure_optimizer.best_parms
self.settings['gl']['alg_manag'] = (
self.settings['strc']['alg_manag'][best_parms['alg_manag']])
self.best_params['alg_manag'] = self.settings['gl']['alg_manag']
self.settings['gl']['gate_management'] = (
self.settings['strc']['gate_management'][best_parms['gate_management']])
self.best_params['gate_management'] = self.settings['gl']['gate_management']
# best structure mining parameters
if self.settings['gl']['mining_alg'] in ['sm1', 'sm3']:
self.settings['gl']['epsilon'] = best_parms['epsilon']
self.settings['gl']['eta'] = best_parms['eta']
self.best_params['epsilon'] = best_parms['epsilon']
self.best_params['eta'] = best_parms['eta']
elif self.settings['gl']['mining_alg'] == 'sm2':
self.settings['gl']['concurrency'] = best_parms['concurrency']
self.best_params['concurrency'] = best_parms['concurrency']
for key in ['rp_similarity', 'res_dtype', 'arr_dtype', 'res_sup_dis',
'res_con_dis', 'arr_support', 'arr_confidence',
'res_cal_met', 'arr_cal_met']:
self.settings.pop(key, None)
# self._test_model(struc_model, output_file)
print('############ Times optimization ############')
times_optimizer = to.TimesOptimizer(
self.settings['gl'],
self.settings['tm'],
copy.deepcopy(self.log_train),
struc_model)
times_optimizer.execute_trials()
# Discovery parameters
if times_optimizer.best_parms['res_cal_met'] == 1:
self.best_params['res_dtype'] = (
self.settings['tm']['res_dtype']
[times_optimizer.best_parms['res_dtype']])
else:
self.best_params['res_support'] = (
times_optimizer.best_parms['res_support'])
self.best_params['res_confidence'] = (
times_optimizer.best_parms['res_confidence'])
if times_optimizer.best_parms['arr_cal_met'] == 1:
self.best_params['arr_dtype'] = (
self.settings['tm']['res_dtype']
[times_optimizer.best_parms['arr_dtype']])
else:
self.best_params['arr_support'] = (
times_optimizer.best_parms['arr_support'])
self.best_params['arr_confidence'] = (
times_optimizer.best_parms['arr_confidence'])
print('############ Final comparison ############')
self._test_model(times_optimizer.best_output,
output_file,
structure_optimizer.file_name,
times_optimizer.file_name)
self._export_canonical_model(times_optimizer.best_output)
shutil.rmtree(structure_optimizer.temp_output)
shutil.rmtree(times_optimizer.temp_output)
print("-- End of trial --")
def _test_model(self, best_output, output_file, opt_strf, opt_timf):
output_path = os.path.join('outputs', sup.folder_id())
if not os.path.exists(output_path):
os.makedirs(output_path)
os.makedirs(os.path.join(output_path, 'sim_data'))
self.settings['gl'].pop('output', None)
self.settings['gl']['output'] = output_path
self._modify_simulation_model(
os.path.join(best_output,
self.settings['gl']['file'].split('.')[0]+'.bpmn'))
self._load_model_and_measures()
self._simulate()
self.sim_values = pd.DataFrame.from_records(self.sim_values)
self.sim_values['output'] = output_path
self.sim_values.to_csv(os.path.join(output_path, output_file),
index=False)
shutil.move(opt_strf, output_path)
shutil.move(opt_timf, output_path)
def _export_canonical_model(self, best_output):
print(os.path.join(
self.settings['gl']['output'],
self.settings['gl']['file'].split('.')[0]+'.bpmn'))
canonical_model = serialize_model(os.path.join(
self.settings['gl']['output'],
self.settings['gl']['file'].split('.')[0]+'.bpmn'))
# Users in rol data
resource_table = pd.read_pickle(
os.path.join(best_output, 'resource_table.pkl'))
user_rol = dict()
for key, group in resource_table.groupby('role'):
user_rol[key] = list(group.resource)
canonical_model['rol_user'] = user_rol
# Json creation
self.best_params = {k: str(v) for k, v in self.best_params.items()}
canonical_model['discovery_parameters'] = self.best_params
sup.create_json(canonical_model, os.path.join(
self.settings['gl']['output'],
self.settings['gl']['file'].split('.')[0]+'_canon.json'))
@timeit
def read_inputs(self, **kwargs) -> None:
# Event log reading
self.log = lr.LogReader(os.path.join(self.settings['gl']['input'],
self.settings['gl']['file']),
self.settings['gl']['read_options'])
# Time splitting 80-20
self.split_timeline(0.8,
self.settings['gl']['read_options']['one_timestamp'])
def split_timeline(self, size: float, one_ts: bool) -> None:
"""
Split an event log dataframe by time to peform split-validation.
prefered method time splitting removing incomplete traces.
If the testing set is smaller than the 10% of the log size
the second method is sort by traces start and split taking the whole
traces no matter if they are contained in the timeframe or not
Parameters
----------
size : float, validation percentage.
one_ts : bool, Support only one timestamp.
"""
# Split log data
splitter = ls.LogSplitter(self.log.data)
train, test = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(self.log.data)
# Check size and change time splitting method if necesary
if len(test) < int(total_events*0.1):
train, test = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
test = pd.DataFrame(test)
train = pd.DataFrame(train)
self.log_test = (test.sort_values(key, ascending=True)
.reset_index(drop=True))
self.log_train = copy.deepcopy(self.log)
self.log_train.set_data(train.sort_values(key, ascending=True)
.reset_index(drop=True).to_dict('records'))
def _modify_simulation_model(self, model):
"""Modifies the number of instances of the BIMP simulation model
to be equal to the number of instances in the testing log"""
num_inst = len(self.log_test.caseid.unique())
# Get minimum date
start_time = (self.log_test
.start_timestamp
.min().strftime("%Y-%m-%dT%H:%M:%S.%f+00:00"))
mydoc = minidom.parse(model)
items = mydoc.getElementsByTagName('qbp:processSimulationInfo')
items[0].attributes['processInstances'].value = str(num_inst)
items[0].attributes['startDateTime'].value = start_time
new_model_path = os.path.join(self.settings['gl']['output'],
os.path.split(model)[1])
with open(new_model_path, 'wb') as f:
f.write(mydoc.toxml().encode('utf-8'))
f.close()
return new_model_path
def _load_model_and_measures(self):
self.process_stats = self.log_test
self.process_stats['source'] = 'log'
self.process_stats['run_num'] = 0
def _simulate(self, **kwargs) -> None:
def pbar_async(p, msg):
pbar = tqdm(total=reps, desc=msg)
processed = 0
while not p.ready():
cprocesed = (reps - p._number_left)
if processed < cprocesed:
increment = cprocesed - processed
pbar.update(n=increment)
processed = cprocesed
time.sleep(1)
pbar.update(n=(reps - processed))
p.wait()
pbar.close()
reps = self.settings['gl']['repetitions']
cpu_count = multiprocessing.cpu_count()
w_count = reps if reps <= cpu_count else cpu_count
pool = Pool(processes=w_count)
# Simulate
args = [(self.settings['gl'], rep) for rep in range(reps)]
p = pool.map_async(self.execute_simulator, args)
pbar_async(p, 'simulating:')
# Read simulated logs
args = [(self.settings['gl'], rep) for rep in range(reps)]
p = pool.map_async(self.read_stats, args)
pbar_async(p, 'reading simulated logs:')
# Evaluate
args = [(self.settings['gl'], self.process_stats, log) for log in p.get()]
if len(self.log_test.caseid.unique()) > 1000:
pool.close()
results = [self.evaluate_logs(arg) for arg in tqdm(args, 'evaluating results:')]
# Save results
self.sim_values = list(itertools.chain(*results))
else:
p = pool.map_async(self.evaluate_logs, args)
pbar_async(p, 'evaluating results:')
pool.close()
# Save results
self.sim_values = list(itertools.chain(*p.get()))
@staticmethod
def read_stats(args):
def read(settings, rep):
"""Reads the simulation results stats
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
# message = 'Reading log repetition: ' + str(rep+1)
# print(message)
path = os.path.join(settings['output'], 'sim_data')
log_name = settings['file'].split('.')[0]+'_'+str(rep+1)+'.csv'
rep_results = pd.read_csv(os.path.join(path, log_name),
dtype={'caseid': object})
rep_results['caseid'] = 'Case' + rep_results['caseid']
rep_results['run_num'] = rep
rep_results['source'] = 'simulation'
rep_results.rename(columns={'resource': 'user'}, inplace=True)
rep_results['start_timestamp'] = pd.to_datetime(
rep_results['start_timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
rep_results['end_timestamp'] = pd.to_datetime(
rep_results['end_timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
return rep_results
return read(*args)
@staticmethod
def evaluate_logs(args):
def evaluate(settings, process_stats, sim_log):
"""Reads the simulation results stats
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
# print('Reading repetition:', (rep+1), sep=' ')
rep = (sim_log.iloc[0].run_num)
sim_values = list()
evaluator = sim.SimilarityEvaluator(
process_stats,
sim_log,
settings,
max_cases=1000)
metrics = [settings['sim_metric']]
if 'add_metrics' in settings.keys():
metrics = list(set(list(settings['add_metrics']) +
metrics))
for metric in metrics:
evaluator.measure_distance(metric)
sim_values.append({**{'run_num': rep}, **evaluator.similarity})
return sim_values
return evaluate(*args)
@staticmethod
def execute_simulator(args):
def sim_call(settings, rep):
"""Executes BIMP Simulations.
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
# message = 'Executing BIMP Simulations Repetition: ' + str(rep+1)
# print(message)
args = ['java', '-jar', settings['bimp_path'],
os.path.join(settings['output'],
settings['file'].split('.')[0]+'.bpmn'),
'-csv',
os.path.join(settings['output'], 'sim_data',
settings['file']
.split('.')[0]+'_'+str(rep+1)+'.csv')]
subprocess.run(args, check=True, stdout=subprocess.PIPE)
sim_call(*args)
@staticmethod
def get_traces(data, one_timestamp):
"""
returns the data splitted by caseid and ordered by start_timestamp
"""
cases = list(set([x['caseid'] for x in data]))
traces = list()
for case in cases:
order_key = 'end_timestamp' if one_timestamp else 'start_timestamp'
trace = sorted(
list(filter(lambda x: (x['caseid'] == case), data)),
key=itemgetter(order_key))
traces.append(trace)
return traces
|
the-stack_106_17892
|
"""Dell PowerConnect Driver."""
from __future__ import unicode_literals
from paramiko import SSHClient
import time
from os import path
from netmiko.cisco_base_connection import CiscoBaseConnection
class SSHClient_noauth(SSHClient):
def _auth(self, username, *args):
self._transport.auth_none(username)
return
class DellPowerConnectBase(CiscoBaseConnection):
"""Dell PowerConnect Driver."""
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.ansi_escape_codes = True
self._test_channel_read()
self.set_base_prompt()
self.enable()
self.disable_paging(command="terminal datadump")
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def set_base_prompt(self, pri_prompt_terminator='>', alt_prompt_terminator='#',
delay_factor=1):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
prompt = super(DellPowerConnectBase, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor)
prompt = prompt.strip()
self.base_prompt = prompt
return self.base_prompt
def check_config_mode(self, check_string='(config)#'):
"""Checks if the device is in configuration mode"""
return super(DellPowerConnectBase, self).check_config_mode(check_string=check_string)
def config_mode(self, config_command='config'):
"""Enter configuration mode."""
return super(DellPowerConnectBase, self).config_mode(config_command=config_command)
class DellPowerConnectSSH(DellPowerConnectBase):
"""Dell PowerConnect Driver.
To make it work, we have to override the SSHClient _auth method.
If we use login/password, the ssh server use the (none) auth mechanism.
"""
def _build_ssh_client(self):
"""Prepare for Paramiko SSH connection.
See base_connection.py file for any updates.
"""
# Create instance of SSHClient object
# If user does not provide SSH key, we use noauth
if not self.use_keys:
remote_conn_pre = SSHClient_noauth()
else:
remote_conn_pre = SSHClient()
# Load host_keys for better SSH security
if self.system_host_keys:
remote_conn_pre.load_system_host_keys()
if self.alt_host_keys and path.isfile(self.alt_key_file):
remote_conn_pre.load_host_keys(self.alt_key_file)
# Default is to automatically add untrusted hosts (make sure appropriate for your env)
remote_conn_pre.set_missing_host_key_policy(self.key_policy)
return remote_conn_pre
def special_login_handler(self, delay_factor=1):
"""
Powerconnect presents with the following on login
User Name:
Password: ****
"""
delay_factor = self.select_delay_factor(delay_factor)
i = 0
time.sleep(delay_factor * .5)
output = ""
while i <= 12:
output = self.read_channel()
if output:
if 'User Name:' in output:
self.write_channel(self.username + self.RETURN)
elif 'Password:' in output:
self.write_channel(self.password + self.RETURN)
break
time.sleep(delay_factor * 1)
else:
self.write_channel(self.RETURN)
time.sleep(delay_factor * 1.5)
i += 1
class DellPowerConnectTelnet(DellPowerConnectBase):
"""Dell PowerConnect Telnet Driver."""
pass
|
the-stack_106_17894
|
# noxfile.py
"""Configure nox sessions."""
# standard library
import shutil
import tempfile
from pathlib import Path
from textwrap import dedent
# third pary packages
import nox
# local packages
# define default sessions:
nox.options.sessions = (
"pre-commit",
"lint",
"tests",
"xdoctest",
"docs_rebuild",
)
def install_with_constraints(session, *args, **kwargs):
"""Install packages constrained by Poetry's lock file."""
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
"--without-hashes", # requ for working with pip resolver
f"--output={requirements.name}",
external=True,
)
session.install(f"--constraint={requirements.name}", *args, **kwargs)
@nox.session(python="3.10")
def tests(session):
"""Run test suite."""
args = session.posargs or [
"--cov",
"-m",
"not e2e and not con and not slow",
# add markers as "and not ..."
]
session.run("poetry", "install", "--no-dev", external=True)
install_with_constraints(
session,
"coverage[toml]",
"pytest",
"pytest-cov",
"pytest-mock",
)
session.run("pytest", *args)
# locations to run linting and formatting on:
locations = "src", "tests", "noxfile.py", "docs/conf.py"
@nox.session(python="3.10")
def lint(session):
"""Lint using flake8."""
args = session.posargs or locations
install_with_constraints(
session,
"darglint",
"flake8",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-isort",
"flake8-rst-docstrings",
"pep8-naming",
"pre-commit",
"pre-commit-hooks",
"pyupgrade",
)
# installs flak8 when 'nox -rs lint' is called
session.run("flake8", *args)
@nox.session(python="3.10")
def black(session):
"""Reformat code using black."""
args = session.posargs or locations
install_with_constraints(session, "black")
session.run("black", *args)
@nox.session(python="3.10")
def xdoctest(session):
"""Run examples with xdoctest."""
args = session.posargs or ["all"]
session.run("poetry", "install", "--no-dev", external=True)
install_with_constraints(session, "xdoctest", "pygments")
session.run("python", "-m", "xdoctest", "strutils", *args)
@nox.session(python="3.10")
def docs(session):
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
install_with_constraints(
session,
"sphinx",
"sphinx-click",
"furo",
"sphinx-paramlinks",
"sphinx-rtd-theme",
"pytest",
)
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.10")
def docs_live(session):
"""Build and serve the documentation with live reloading on changes."""
args = session.posargs or ["--open-browser", "docs", "docs/_build"]
session.run("poetry", "install", "--no-dev", external=True)
install_with_constraints(
session,
"sphinx",
"sphinx-autobuild",
"sphinx-click",
"furo",
"sphinx-paramlinks",
"sphinx-rtd-theme",
"pytest",
)
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-autobuild", *args)
@nox.session(python="3.10")
def docs_rebuild(session):
"""Rebuild the entire sphinx documentation."""
session.run("poetry", "install", "--no-dev", external=True)
install_with_constraints(
session,
"sphinx",
"sphinx-click",
"furo",
"sphinx-paramlinks",
"sphinx-rtd-theme",
"pytest",
)
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.10")
def coverage(session):
"""Produce coverage report."""
install_with_constraints(session, "coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
@nox.session(python="3.10")
def codecov(session):
"""Produce coverage report and try uploading to codecov."""
install_with_constraints(session, "coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
def activate_virtualenv_in_precommit_hooks(session):
"""Activate virtualenv in hooks installed by pre-commit.
This function patches git hooks installed by pre-commit to activate the
session's virtual environment. This allows pre-commit to locate hooks in
that environment when invoked from git.
Parameters
----------
session
The Session object.
"""
assert session.bin is not None # noqa: S101
virtualenv = session.env.get("VIRTUAL_ENV")
if virtualenv is None:
return
hookdir = Path(".git") / "hooks"
if not hookdir.is_dir():
return
for hook in hookdir.iterdir():
if hook.name.endswith(".sample") or not hook.is_file():
continue
text = hook.read_text()
bindir = repr(session.bin)[1:-1] # strip quotes
if not (
Path("A") == Path("a") and bindir.lower() in text.lower() or bindir in text
):
continue
lines = text.splitlines()
if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
continue
header = dedent(
f"""\
import os
os.environ["VIRTUAL_ENV"] = {virtualenv!r}
os.environ["PATH"] = os.pathsep.join((
{session.bin!r},
os.environ.get("PATH", ""),
))
"""
)
lines.insert(1, header)
hook.write_text("\n".join(lines))
@nox.session(name="pre-commit", python="3.10")
def precommit(session):
"""Lint using pre-commit."""
args = session.posargs or ["run", "--all-files", "--show-diff-on-failure"]
install_with_constraints(
session,
"darglint",
"black",
"flake8",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-isort",
"flake8-rst-docstrings",
"pep8-naming",
"pre-commit",
"pre-commit-hooks",
"pyupgrade",
)
session.run("pre-commit", *args)
if args and args[0] == "install":
activate_virtualenv_in_precommit_hooks(session)
@nox.session(python="3.10")
def safety(session):
"""Scan dependencies for insecure packages using safety."""
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
"--without-hashes",
f"--output={requirements.name}",
external=True,
)
session.install("safety")
session.run("safety", "check", f"--file={requirements.name}", "--full-report")
|
the-stack_106_17895
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from .. import core
__all__ = [
'start_gperf_profiler',
'stop_gperf_profiler',
]
def start_gperf_profiler():
core.start_imperative_gperf_profiler()
def stop_gperf_profiler():
core.stop_imperative_gperf_profiler()
|
the-stack_106_17897
|
# not via pip available, therefore stolen
# https://github.com/wizeline/sqlalchemy-pagination.git
# The MIT License (MIT)
#
# Copyright (c) 2016 Wizeline
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from aiohttp import web
class Page:
def __init__(self, items, page, page_size, total):
self.items = items
self.previous_page = None
self.next_page = None
self.has_previous = page > 1
if self.has_previous:
self.previous_page = page - 1
previous_items = (page - 1) * page_size
self.has_next = previous_items + len(items) < total
if self.has_next:
self.next_page = page + 1
self.total = total
self.pages = int(math.ceil(total / float(page_size)))
self.current_page = page
async def paginate(session, request, query, by="limit", total=-1, pms=None):
if total is None:
# None is passed at the changes endpoint if there are no changes yet.
raise web.HTTPBadRequest(body="No changes have been logged.")
page_size = int(request.query.get("pagesize", 25))
if not (0 < page_size < 100):
raise web.HTTPBadRequest(body=f"page_size ({page_size}) must be > 0 and < 100")
page_count = max(math.ceil(total / page_size), 1)
# min page_count is 1
page = int(request.query.get("page", page_count))
# min page is 1
if not (0 < page <= page_count):
raise web.HTTPBadRequest(
body=f"page ({page}) must be between > 0 and <= {page_count}"
)
if by != "limit":
# BETWEEN on indexed values is way faster …
begin = (page - 1) * page_size
end = max(page * page_size - 1, 0)
q = query.filter(by.between(begin, end))
else:
begin = (page_count - page) * page_size
q = query.limit(page_size).offset(begin)
try:
if pms:
async with pms.measure():
r = await session.execute(q)
else:
r = await session.execute(q)
items = r.scalars().all()
assert len(items) <= page_size
except Exception as e:
print(e)
raise e
return Page(items, page, page_size, total)
|
the-stack_106_17898
|
#!/bin/python3
from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self, title, author):
self.title = title
self.author = author
@abstractmethod
def display(): pass
class MyBook(Book):
def __init__(self, title, author, price):
# super().__init__(title, author)
self.title = title
self.author = author
self.price = price
def display(self):
print("Title: " + self.title)
print("Author: " + self.author)
print("Price: " + str(self.price))
title = input()
author = input()
price = int(input())
new_novel = MyBook(title, author, price)
new_novel.display()
|
the-stack_106_17902
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from paypal.pro.exceptions import PayPalFailure
from paypal.pro.forms import ConfirmForm, PaymentForm
from paypal.pro.helpers import PayPalWPP, express_endpoint_for_token
class PayPalPro(object):
"""
This class-based view takes care of PayPal WebsitePaymentsPro (WPP).
PayPalPro has two separate flows - DirectPayment and ExpressPayFlow. In
DirectPayment the user buys on your site. In ExpressPayFlow the user is
direct to PayPal to confirm their purchase. PayPalPro implements both
flows. To it create an instance using the these parameters:
item: a dictionary that holds information about the item being purchased.
For single item purchase (pay once):
Required Keys:
* amt: Float amount of the item.
Optional Keys:
* custom: You can set this to help you identify a transaction.
* invnum: Unique ID that identifies this transaction.
For recurring billing:
Required Keys:
* amt: Float amount for each billing cycle.
* billingperiod: String unit of measure for the billing cycle (Day|Week|SemiMonth|Month|Year)
* billingfrequency: Integer number of periods that make up a cycle.
* profilestartdate: The date to begin billing. "2008-08-05T17:00:00Z" UTC/GMT
* desc: Description of what you're billing for.
Optional Keys:
* trialbillingperiod: String unit of measure for trial cycle (Day|Week|SemiMonth|Month|Year)
* trialbillingfrequency: Integer # of periods in a cycle.
* trialamt: Float amount to bill for the trial period.
* trialtotalbillingcycles: Integer # of cycles for the trial payment period.
* failedinitamtaction: set to continue on failure (ContinueOnFailure / CancelOnFailure)
* maxfailedpayments: number of payments before profile is suspended.
* autobilloutamt: automatically bill outstanding amount.
* subscribername: Full name of the person who paid.
* profilereference: Unique reference or invoice number.
* taxamt: How much tax.
* initamt: Initial non-recurring payment due upon creation.
* currencycode: defaults to USD
* + a bunch of shipping fields
payment_form_cls: form class that will be used to display the payment form.
It should inherit from `paypal.pro.forms.PaymentForm` if you're adding more.
payment_template: template used to ask the dude for monies. To comply with
PayPal standards it must include a link to PayPal Express Checkout.
confirm_form_cls: form class that will be used to display the confirmation form.
It should inherit from `paypal.pro.forms.ConfirmForm`. It is only used in the Express flow.
success_url / fail_url: URLs to be redirected to when the payment successful or fails.
"""
errors = {
"processing": "There was an error processing your payment. Check your information and try again.",
"form": "Please correct the errors below and try again.",
"paypal": "There was a problem contacting PayPal. Please try again later."
}
def __init__(self, item=None, payment_form_cls=PaymentForm,
payment_template="pro/payment.html", confirm_form_cls=ConfirmForm,
confirm_template="pro/confirm.html", success_url="?success",
fail_url=None, context=None, form_context_name="form", nvp_handler=None):
self.item = item
self.payment_form_cls = payment_form_cls
self.payment_template = payment_template
self.confirm_form_cls = confirm_form_cls
self.confirm_template = confirm_template
self.success_url = success_url
self.fail_url = fail_url
self.context = context or {}
self.form_context_name = form_context_name
self.nvp_handler = nvp_handler
if nvp_handler is None:
warnings.warn(
"You didn't pass `nvp_handler` to PayPalPro. You should pass a callback "
"here instead of using the `payment_was_successful` "
"signal", DeprecationWarning)
def __call__(self, request):
"""Return the appropriate response for the state of the transaction."""
self.request = request
if request.method == "GET":
if self.should_redirect_to_express():
return self.redirect_to_express()
elif self.should_render_confirm_form():
return self.render_confirm_form()
elif self.should_render_payment_form():
return self.render_payment_form()
else:
if self.should_validate_confirm_form():
return self.validate_confirm_form()
elif self.should_validate_payment_form():
return self.validate_payment_form()
# Default to the rendering the payment form.
return self.render_payment_form()
def is_recurring(self):
return self.item is not None and 'billingperiod' in self.item
def should_redirect_to_express(self):
return 'express' in self.request.GET
def should_render_confirm_form(self):
return 'token' in self.request.GET and 'PayerID' in self.request.GET
def should_render_payment_form(self):
return True
def should_validate_confirm_form(self):
return 'token' in self.request.POST and 'PayerID' in self.request.POST
def should_validate_payment_form(self):
return True
def render_payment_form(self):
"""Display the DirectPayment for entering payment information."""
self.context[self.form_context_name] = self.payment_form_cls()
return TemplateResponse(self.request, self.payment_template, self.context)
def validate_payment_form(self):
"""Try to validate and then process the DirectPayment form."""
form = self.payment_form_cls(self.request.POST)
if form.is_valid():
success = form.process(self.request, self.item)
if success:
return HttpResponseRedirect(self.success_url)
else:
self.context['errors'] = self.errors['processing']
self.context[self.form_context_name] = form
self.context.setdefault("errors", self.errors['form'])
return TemplateResponse(self.request, self.payment_template, self.context)
def redirect_to_express(self):
"""
First step of ExpressCheckout. Redirect the request to PayPal using the
data returned from setExpressCheckout.
"""
wpp = PayPalWPP(self.request)
try:
nvp_obj = wpp.setExpressCheckout(self.item)
except PayPalFailure:
self.context['errors'] = self.errors['paypal']
return self.render_payment_form()
else:
return HttpResponseRedirect(express_endpoint_for_token(nvp_obj.token))
def render_confirm_form(self):
"""
Second step of ExpressCheckout. Display an order confirmation form which
contains hidden fields with the token / PayerID from PayPal.
"""
initial = dict(token=self.request.GET['token'], PayerID=self.request.GET['PayerID'])
self.context[self.form_context_name] = self.confirm_form_cls(initial=initial)
return TemplateResponse(self.request, self.confirm_template, self.context)
def validate_confirm_form(self):
"""
Third and final step of ExpressCheckout. Request has pressed the confirmation but
and we can send the final confirmation to PayPal using the data from the POST'ed form.
"""
wpp = PayPalWPP(self.request)
pp_data = dict(token=self.request.POST['token'], payerid=self.request.POST['PayerID'])
self.item.update(pp_data)
# @@@ This check and call could be moved into PayPalWPP.
try:
if self.is_recurring():
nvp = wpp.createRecurringPaymentsProfile(self.item)
else:
nvp = wpp.doExpressCheckoutPayment(self.item)
self.handle_nvp(nvp)
except PayPalFailure:
self.context['errors'] = self.errors['processing']
return self.render_payment_form()
else:
return HttpResponseRedirect(self.success_url)
def handle_nvp(self, nvp):
if self.nvp_handler is not None:
self.nvp_handler(nvp)
|
the-stack_106_17903
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Person import Person
from alipay.aop.api.domain.Person import Person
class Injured(object):
def __init__(self):
self._cert_name = None
self._cert_no = None
self._cert_type = None
self._damage_type = None
self._injured_identity = None
self._medical_assessor = None
self._medical_surveyor = None
self._mobile_no = None
@property
def cert_name(self):
return self._cert_name
@cert_name.setter
def cert_name(self, value):
self._cert_name = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def damage_type(self):
return self._damage_type
@damage_type.setter
def damage_type(self, value):
self._damage_type = value
@property
def injured_identity(self):
return self._injured_identity
@injured_identity.setter
def injured_identity(self, value):
self._injured_identity = value
@property
def medical_assessor(self):
return self._medical_assessor
@medical_assessor.setter
def medical_assessor(self, value):
if isinstance(value, Person):
self._medical_assessor = value
else:
self._medical_assessor = Person.from_alipay_dict(value)
@property
def medical_surveyor(self):
return self._medical_surveyor
@medical_surveyor.setter
def medical_surveyor(self, value):
if isinstance(value, Person):
self._medical_surveyor = value
else:
self._medical_surveyor = Person.from_alipay_dict(value)
@property
def mobile_no(self):
return self._mobile_no
@mobile_no.setter
def mobile_no(self, value):
self._mobile_no = value
def to_alipay_dict(self):
params = dict()
if self.cert_name:
if hasattr(self.cert_name, 'to_alipay_dict'):
params['cert_name'] = self.cert_name.to_alipay_dict()
else:
params['cert_name'] = self.cert_name
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.cert_type:
if hasattr(self.cert_type, 'to_alipay_dict'):
params['cert_type'] = self.cert_type.to_alipay_dict()
else:
params['cert_type'] = self.cert_type
if self.damage_type:
if hasattr(self.damage_type, 'to_alipay_dict'):
params['damage_type'] = self.damage_type.to_alipay_dict()
else:
params['damage_type'] = self.damage_type
if self.injured_identity:
if hasattr(self.injured_identity, 'to_alipay_dict'):
params['injured_identity'] = self.injured_identity.to_alipay_dict()
else:
params['injured_identity'] = self.injured_identity
if self.medical_assessor:
if hasattr(self.medical_assessor, 'to_alipay_dict'):
params['medical_assessor'] = self.medical_assessor.to_alipay_dict()
else:
params['medical_assessor'] = self.medical_assessor
if self.medical_surveyor:
if hasattr(self.medical_surveyor, 'to_alipay_dict'):
params['medical_surveyor'] = self.medical_surveyor.to_alipay_dict()
else:
params['medical_surveyor'] = self.medical_surveyor
if self.mobile_no:
if hasattr(self.mobile_no, 'to_alipay_dict'):
params['mobile_no'] = self.mobile_no.to_alipay_dict()
else:
params['mobile_no'] = self.mobile_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Injured()
if 'cert_name' in d:
o.cert_name = d['cert_name']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'cert_type' in d:
o.cert_type = d['cert_type']
if 'damage_type' in d:
o.damage_type = d['damage_type']
if 'injured_identity' in d:
o.injured_identity = d['injured_identity']
if 'medical_assessor' in d:
o.medical_assessor = d['medical_assessor']
if 'medical_surveyor' in d:
o.medical_surveyor = d['medical_surveyor']
if 'mobile_no' in d:
o.mobile_no = d['mobile_no']
return o
|
the-stack_106_17905
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.neighbors import KDTree
from sklearn.neighbors import KNeighborsClassifier
class TrustScore:
"""
Trust Score: a measure of classifier uncertainty based on nearest neighbors.
"""
def __init__(self, k=10, alpha=0., filtering="none", min_dist=1e-12):
"""
k and alpha are the tuning parameters for the filtering,
filtering: method of filtering. option are "none", "density",
"uncertainty"
min_dist: some small number to mitigate possible division by 0.
"""
self.k = k
self.filtering = filtering
self.alpha = alpha
self.min_dist = min_dist
def filter_by_density(self, X):
"""Filter out points with low kNN density.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
original points of kNN density.
"""
kdtree = KDTree(X)
knn_radii = kdtree.query(X, k=self.k)[0][:, -1]
eps = np.percentile(knn_radii, (1 - self.alpha) * 100)
return X[np.where(knn_radii <= eps)[0], :]
def filter_by_uncertainty(self, X, y):
"""Filter out points with high label disagreement amongst its kNN neighbors.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
samples with highest disagreement amongst its k nearest neighbors.
"""
neigh = KNeighborsClassifier(n_neighbors=self.k)
neigh.fit(X, y)
confidence = neigh.predict_proba(X)
cutoff = np.percentile(confidence, self.alpha * 100)
unfiltered_idxs = np.where(confidence >= cutoff)[0]
return X[unfiltered_idxs, :], y[unfiltered_idxs]
def fit(self, X, y):
"""Initialize trust score precomputations with training data.
WARNING: assumes that the labels are 0-indexed (i.e.
0, 1,..., n_labels-1).
Args:
X: an array of sample points.
y: corresponding labels.
"""
self.n_labels = np.max(y) + 1
self.kdtrees = [None] * self.n_labels
if self.filtering == "uncertainty":
X_filtered, y_filtered = self.filter_by_uncertainty(X, y)
for label in range(self.n_labels):
if self.filtering == "none":
X_to_use = X[np.where(y == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "density":
X_to_use = self.filter_by_density(X[np.where(y == label)[0]])
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "uncertainty":
X_to_use = X_filtered[np.where(y_filtered == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
if len(X_to_use) == 0:
print(
"Filtered too much or missing examples from a label! Please lower "
"alpha or check data.")
def get_score(self, X, y_pred):
"""Compute the trust scores.
Given a set of points, determines the distance to each class.
Args:
X: an array of sample points.
y_pred: The predicted labels for these points.
Returns:
The trust score, which is ratio of distance to closest class that was not
the predicted class to the distance to the predicted class.
"""
d = np.tile(None, (X.shape[0], self.n_labels))
for label_idx in range(self.n_labels):
d[:, label_idx] = self.kdtrees[label_idx].query(X, k=2)[0][:, -1]
sorted_d = np.sort(d, axis=1)
d_to_pred = d[range(d.shape[0]), y_pred]
d_to_closest_not_pred = np.where(sorted_d[:, 0] != d_to_pred,
sorted_d[:, 0], sorted_d[:, 1])
return d_to_closest_not_pred / (d_to_pred + self.min_dist)
class KNNConfidence:
"""Baseline which uses disagreement to kNN classifier.
"""
def __init__(self, k=10):
self.k = k
def fit(self, X, y):
self.kdtree = KDTree(X)
self.y = y
def get_score(self, X, y_pred):
knn_idxs = self.kdtree.query(X, k=self.k)[1]
knn_outputs = self.y[knn_idxs]
return np.mean(
knn_outputs == np.transpose(np.tile(y_pred, (self.k, 1))), axis=1)
|
the-stack_106_17906
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "nub"
app_title = "Nub"
app_publisher = "Anvil Team"
app_description = "Al-Nuran Bank Customization"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "[email protected]"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/nub/css/nub.css"
# app_include_js = "/assets/nub/js/nub.js"
# include js, css files in header of web template
# web_include_css = "/assets/nub/css/nub.css"
# web_include_js = "/assets/nub/js/nub.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
doctype_js = {"Asset" : "public/js/asset.js"}
doctype_list_js = {"Asset" : "public/js/asset_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "nub.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "nub.install.before_install"
# after_install = "nub.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "nub.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
doc_events = {}
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "nub.tasks.all"
# ],
# "daily": [
# "nub.tasks.daily"
# ],
# "hourly": [
# "nub.tasks.hourly"
# ],
# "weekly": [
# "nub.tasks.weekly"
# ]
# "monthly": [
# "nub.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "nub.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "nub.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "nub.task.get_dashboard_data"
# }
fixtures = [
{
"dt": "Custom Field",
"filters": [
[
"name",
"in",
[
"Asset-is_available",
"Asset-barcode_serial_number",
"Asset-barcode"
]
]
]
},
{
"dt": "Print Format",
"filters": [
[
"name",
"in",
[
"Asset Printer - Print Format"
]
]
]
}
]
|
the-stack_106_17907
|
#!/usr/bin/env python3
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from select import select
from sys import stdin
from termios import tcsetattr, tcgetattr, TCSADRAIN
import pyaudio
import tty
import wave
from os.path import isfile
from prettyparse import create_parser
usage = '''
Record audio samples for use with precise
:-w --width int 2
Sample width of audio
:-r --rate int 16000
Sample rate of audio
:-c --channels int 1
Number of audio channels
'''
def key_pressed():
return select([stdin], [], [], 0) == ([stdin], [], [])
def termios_wrapper(main):
global orig_settings
orig_settings = tcgetattr(stdin)
try:
hide_input()
main()
finally:
tcsetattr(stdin, TCSADRAIN, orig_settings)
def show_input():
tcsetattr(stdin, TCSADRAIN, orig_settings)
def hide_input():
tty.setcbreak(stdin.fileno())
orig_settings = None
RECORD_KEY = ' '
EXIT_KEY_CODE = 27
def record_until(p, should_return, args):
chunk_size = 1024
stream = p.open(format=p.get_format_from_width(args.width), channels=args.channels,
rate=args.rate, input=True, frames_per_buffer=chunk_size)
frames = []
while not should_return():
frames.append(stream.read(chunk_size))
stream.stop_stream()
stream.close()
return b''.join(frames)
def save_audio(name, data, args):
wf = wave.open(name, 'wb')
wf.setnchannels(args.channels)
wf.setsampwidth(args.width)
wf.setframerate(args.rate)
wf.writeframes(data)
wf.close()
def next_name(name):
name += '.wav'
pos, num_digits = None, None
try:
pos = name.index('#')
num_digits = name.count('#')
except ValueError:
print("Name must contain at least one # to indicate where to put the number.")
raise
def get_name(i):
nonlocal name, pos
return name[:pos] + str(i).zfill(num_digits) + name[pos + num_digits:]
i = 0
while True:
if not isfile(get_name(i)):
break
i += 1
return get_name(i)
def wait_to_continue():
while True:
c = stdin.read(1)
if c == RECORD_KEY:
return True
elif ord(c) == EXIT_KEY_CODE:
return False
def record_until_key(p, args):
def should_return():
return key_pressed() and stdin.read(1) == RECORD_KEY
return record_until(p, should_return, args)
def _main():
parser = create_parser(usage)
parser.add_argument('file_label', nargs='?', help='File label (Ex. recording-##)')
args = parser.parse_args()
show_input()
args.file_label = args.file_label or input("File label (Ex. recording-##): ")
args.file_label = args.file_label + ('' if '#' in args.file_label else '-##')
hide_input()
p = pyaudio.PyAudio()
while True:
print('Press space to record (esc to exit)...')
if not wait_to_continue():
break
print('Recording...')
d = record_until_key(p, args)
name = next_name(args.file_label)
save_audio(name, d, args)
print('Saved as ' + name)
p.terminate()
def main():
termios_wrapper(_main)
if __name__ == '__main__':
main()
|
the-stack_106_17909
|
# coding=utf-8
#
# Copyright 2021 Biderman et al. This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import torch
from collections import defaultdict
from functools import partial
from megatron.model.utils import Lambda, SequentialWrapper
from megatron.model.norms import get_norm
from megatron.model.init_functions import get_init_methods
from megatron import mpu
from megatron.mpu import ParallelRelativePositionBias
import megatron.fp16 as fp16
from megatron.model.transformer import ParallelTransformerLayerPipe, NormPipe, ParallelLinearPipe, parallel_lm_logits
from megatron.model.gmlp import GMLPBlock
from megatron.model.word_embeddings import EmbeddingPipe
# Pipeline parallelism
from deepspeed.pipe import PipelineModule, LayerSpec, TiedLayerSpec
def gpt2_attention_mask_func(attention_scores, ltor_mask):
attention_scores.masked_fill_(ltor_mask, -10000.0)
return attention_scores
def cross_entropy(output, labels, _fp16=False):
""" From pretrain_gpt2:forward_step() """
"""
if self.fp16_lm_cross_entropy:
assert output.dtype == torch.half
loss = mpu.vocab_parallel_cross_entropy(output, labels)
else:
loss = mpu.vocab_parallel_cross_entropy(output.float(), labels)
return loss
"""
labels, loss_mask = labels[0], labels[1]
if _fp16:
assert (output.dtype == torch.half and loss_mask.dtype == torch.half)
losses = mpu.vocab_parallel_cross_entropy(output.contiguous(), labels)
else:
output = fp16.fp16_to_fp32(output)
losses = mpu.vocab_parallel_cross_entropy(output.contiguous(), labels)
loss_mask = loss_mask.view(-1)
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
return loss
class GPT2ModelPipe(PipelineModule, torch.nn.Module):
"""GPT2Model adapted for pipeline parallelism.
The largest change is flattening the GPTModel class so we can express it as a
sequence of layers including embedding, transformer layers, and output.
"""
def __init__(self, neox_args, num_tokentypes=0, parallel_output=True, topology=None, inference=False, get_key_value=True):
self.neox_args = neox_args
self._inference = inference
self.get_key_value = get_key_value if inference else False
self.parallel_output = parallel_output
self.hidden_size = self.neox_args.hidden_size
self.num_tokentypes = num_tokentypes
self.init_method, self.output_layer_init_method = get_init_methods(self.neox_args)
self.embedding_type = self.neox_args.pos_emb
self.specs = []
self.init_specs()
loss_fn = partial(cross_entropy, _fp16=self.neox_args.fp16_lm_cross_entropy)
if self.neox_args.checkpoint_activations:
interval = self.neox_args.checkpoint_num_layers
else:
interval = 0
super().__init__(layers=self.specs,
loss_fn=loss_fn if not self._inference else None,
topology=topology,
activation_checkpoint_interval=interval,
partition_method=neox_args.pipe_partition_method,
checkpointable_layers=['GMLPBlock', 'ParallelTransformerLayerPipe'])
def init_specs(self):
weight_tying = not self.neox_args.no_weight_tying
if self.embedding_type == 'rpe':
rpe_emb = ParallelRelativePositionBias(neox_args=self.neox_args, causal=True, num_buckets=self.neox_args.rpe_num_buckets,
max_distance=self.neox_args.rpe_max_distance,
heads=self.neox_args.num_attention_heads)
self.specs = []
# Embedding layer
# input will be (input_ids, position_ids, attention_mask) in Training
# and (input_ids, position_ids, attention_mask, layer_past) in Inference
if weight_tying:
self.specs.append(TiedLayerSpec('embed',
EmbeddingPipe,
self.neox_args,
self.hidden_size,
self.neox_args.padded_vocab_size,
self.neox_args.max_position_embeddings,
self.neox_args.hidden_dropout,
self.init_method,
self.num_tokentypes,
tied_weight_attr='word_embeddings_weight'))
else:
self.specs.append(LayerSpec(EmbeddingPipe,
self.neox_args,
self.hidden_size,
self.neox_args.padded_vocab_size,
self.neox_args.max_position_embeddings,
self.neox_args.hidden_dropout,
self.init_method,
self.num_tokentypes))
# NB: in inference, the attention mask always needs to be the *last* item in the args when being passed from
# one stage to the next, because deepspeed is hacks on top of hacks.
#
# outputs are now
# Train: (hidden_states, ((maybe) rotary_pos_emb), attention_mask)
# Inference: (hidden_states, layer_past, ((maybe) rotary_pos_emb), attention_mask)
#
# data format change for hidden_states to avoid explicit tranposes : [b s h] --> [s b h]
if self._inference:
# we need to add a container to cache `presents` from each layer's forward pass
# inputs/outputs are now (hidden_states, layer_past, presents, attention_mask)
self.specs.append(lambda x: (x[0].transpose(0, 1).contiguous(), x[1], torch.Tensor(), *x[2:]))
else:
self.specs.append(lambda x: (x[0].transpose(0, 1).contiguous(), *x[1:]))
# Transformer layers
for i in range(self.neox_args.num_layers):
layer_type = self.neox_args.attention_config[i]
if layer_type in ["gmlp", "amlp"]:
self.specs.append(
LayerSpec(
GMLPBlock,
init_method=self.init_method,
layer_number=i,
output_layer_init_method=self.output_layer_init_method,
neox_args=self.neox_args,
mask_fn=gpt2_attention_mask_func
)
)
else:
self.specs.append(
LayerSpec(
ParallelTransformerLayerPipe,
neox_args=self.neox_args,
attention_mask_func=gpt2_attention_mask_func,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_number=i,
rpe=rpe_emb if self.neox_args.pos_emb == 'rpe' else None,
rotary=self.neox_args.pos_emb == 'rotary',
get_key_value=self.get_key_value
)
)
if self._inference:
# we can get rid of the mask / pasts now
# from (hidden_states, layer_past, presents, attention_mask)
# to (hidden_states.T, presents)
self.specs.append(lambda x: (x[0].transpose(0, 1).contiguous(), x[2]))
else:
# Undo data format change and drop mask
self.specs.append(lambda x: x[0].transpose(0, 1).contiguous())
# Final layernorm after transformer layers
# NormPipe is a helper class to pass presents through to the output when doing inference
norm, eps = get_norm(self.neox_args)
self.specs.append(
LayerSpec(NormPipe,
norm,
self.neox_args.hidden_size,
eps=eps))
# outputs are now
# Train: hidden_states
# Inference: (hidden_states, presents)
# XXX forward_method_parallel_output is assumed to be None, but we're not in a
# fwd method to assert
def _logits_helper(embedding, lm_output):
"""Just a wrapper to massage inputs/outputs from pipeline. """
if self._inference and len(lm_output) == 2:
hidden_states, presents = lm_output
logits = parallel_lm_logits(
hidden_states,
embedding.word_embeddings_weight,
self.parallel_output)
return logits, presents
else:
logits = parallel_lm_logits(
lm_output,
embedding.word_embeddings_weight,
self.parallel_output)
return logits
if weight_tying:
self.specs.append(
TiedLayerSpec('embed',
EmbeddingPipe,
self.neox_args,
self.hidden_size,
self.neox_args.padded_vocab_size,
self.neox_args.max_position_embeddings,
self.neox_args.hidden_dropout,
self.init_method,
self.num_tokentypes,
forward_fn=_logits_helper,
tied_weight_attr='word_embeddings_weight')
)
else:
self.specs.append(
LayerSpec(
ParallelLinearPipe,
neox_args=self.neox_args,
init_method=self.init_method,
parallel_output=self.parallel_output
)
)
# output in training should just be logits
# in inference it will be (logits, presents) (assuming get_key_value) is true
def to_sequential(self):
"""
Transforms the PipelineModule to a plain nn.Sequential module
:return:
"""
layers = []
tied_layers = defaultdict(list)
for n, spec in enumerate(self.specs):
if isinstance(spec, TiedLayerSpec):
if spec.key in tied_layers:
# receiver
layers.append(Lambda(lambda x: spec.forward_fn(tied_layers[spec.key][0], x)))
else:
# owner
module = spec.build(log=False)
layers.append(module)
tied_layers[spec.key].append(module)
elif isinstance(spec, LayerSpec):
layers.append(spec.build(log=False))
elif hasattr(spec, '__call__'):
# check that it's a callable function
layers.append(Lambda(spec))
else:
raise ValueError(f'Layer number {n} ({spec}) Not recognized')
model = SequentialWrapper(layers,
self.activation_checkpoint_interval,
self.activation_checkpoint_func,
parent_class_name=self.__class__.__name__)
return model
|
the-stack_106_17912
|
from js9 import j
from . import (PRIORITY_NORMAL, PRIORITY_RECURRING, PRIORITY_SYSTEM,
TASK_STATE_ERROR, TASK_STATE_NEW, TASK_STATE_OK,
TASK_STATE_RUNNING)
from .task import Task
def _instantiate_task(task, service):
func = getattr(service, task['action_name'])
t = Task(func, task['args'])
if task['state'] in [TASK_STATE_RUNNING, TASK_STATE_NEW]:
t.state = TASK_STATE_NEW
else:
t.state = task['state']
t.guid = task['guid']
if task['eco']:
t._eco = j.core.errorhandler.getErrorConditionObject(ddict=task['eco'])
t._result = task.get('result')
t._created = task.get('created')
t._duration = task.get('duration')
return t
def wait_all(tasks, timeout=60, die=False):
"""
helper method to wait for a list of tasks
:param tasks: iterable that contains zerorobot.task.Task objects
:type tasks: iterable
:param timeout: timeout per task, defaults to 60
:param timeout: int, optional
:param die: if True, raise any exception that was raise in the tasks, defaults to False
:param die: bool, optional
:raises TypeError: raised if the iterable does not contains only zerorobot.task.Task
:return: a list of all the result from the tasks
:rtype: list
"""
results = []
for task in iter(tasks):
if not isinstance(task, Task):
raise TypeError("element of tasks should be an instance of zerorobot.task.Task")
try:
results.append(task.wait(timeout=timeout, die=die).result)
except TimeoutError:
continue
return results
|
the-stack_106_17914
|
"""
file handling utilities
"""
import os
import shutil
import fnmatch
import errno
def strip_src(file, src):
"""
remove the src path from a filename
"""
return file.replace(src, '')
def get_dest_file(file, src, dest):
"""
get the output file, make directories if needed
"""
f = dest + strip_src(file, src)
d = os.path.dirname(f)
try:
os.makedirs(d)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
return f
def copy_file_to_dest(file, src, dest):
"""
copy a file from src to dest, return the dest filename
"""
target = get_dest_file(file, src, dest)
shutil.copyfile(file, target)
return target
def get_files(src, pat):
"""
recursively collect files
"""
matches = []
for root, dirnames, filenames in os.walk(src):
for filename in fnmatch.filter(filenames, pat):
matches.append(os.path.join(root, filename))
return matches
def copy_files(src, dest, pat="*.html"):
"""
copy over ALL files from src to dest, return only those matching pat
"""
files = get_files(src, "*")
copied = [copy_file_to_dest(f, src, dest) for f in files]
return fnmatch.filter(copied, pat)
|
the-stack_106_17915
|
#!/usr/bin/python
'''
Usage: python KEGG-decoder.py <KOALA INPUT> <FUNCTION LIST FORMAT>
Designed to parse through a blastKoala or ghostKoala output to determine
the completeness of various KEGG pathways
Dependencies:
Pandas - http://pandas.pydata.org/pandas-docs/stable/install.html
Seaborn - http://seaborn.pydata.org/installing.html
matplotlib - http://matplotlib.org/users/installing.html
For extended information about KEGG assignments, genes and pathways,
please see accompanying document "KOALA_definitions.txt"
'''
def C5_PPH(ko_match):
#Check for presence of 9 genes
total = 0
#glutamyl-tRNA reductase, glutamate-1-semialdehyde 2,1-aminomutase
#porphobilinogen synthase, hydroxymethylbilane synthase
#uroporphyrinogen decarboxylase, ferrochelatase
single_ko = ['K02492', 'K01845', 'K01698', 'K01749', 'K01599', 'K01772']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#coproporphyrinogen III oxidase
if ('K00228' in ko_match or 'K02495' in ko_match):
total += 1
#protoporphyrinogen oxidase
if ('K00230' in ko_match or 'K00231' in ko_match or 'K08973' in ko_match):
total += 1
value = float(total)/float(9)
return {'C5_PPH': float("%.2f" % (value))}
def C5_CPH(ko_match):
#Check for presence of 9 genes
total = 0
#glutamyl-tRNA reductase, glutamate-1-semialdehyde 2,1-aminomutase
#porphobilinogen synthase, hydroxymethylbilane synthase
#uroporphyrinogen decarboxylase, coproporphyrinogen III oxidase, ferrochelatase
single_ko = ['K02492', 'K01845', 'K01698', 'K01749', 'K01599', 'K00231', 'K01772']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#heme synthase
if ('K00435' in ko_match or 'K22227' in ko_match):
total += 1
value = float(total)/float(9)
return {'C5_CPH': float("%.2f" % (value))}
def C5_SIRO(ko_match):
#Check for presence of 11 genes
total = 0
#glutamyl-tRNA reductase, glutamate-1-semialdehyde 2,1-aminomutase
#porphobilinogen synthase, hydroxymethylbilane synthase
#siroheme decarboxylase, Fe-coproporphyrin III synthase
single_ko = ['K02492', 'K01845', 'K01698', 'K01749', 'K22225', 'K22226']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#uroporphyrin-III C-methyltransferase
if ('K00589' in ko_match or 'K02302' in ko_match or 'K02303' in ko_match or 'K02496' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#precorrin-2 dehydrogenase
if ('K02302' in ko_match or 'K02304' in ko_match):
total += 1
#sirohydrochlorin ferrochelatase
if ('K02302' in ko_match or 'K02304' in ko_match or 'K03794' in ko_match):
total += 1
#heme synthase
if ('K00435' in ko_match or 'K22227' in ko_match):
total += 1
value = float(total)/float(11)
return {'C5_SIRO': float("%.2f" % (value))}
def C4_PPH(ko_match):
#Check for presence of 8 genes
total = 0
#5-aminolevulinate synthase
#porphobilinogen synthase, hydroxymethylbilane synthase
#uroporphyrinogen decarboxylase, ferrochelatase
single_ko = ['K00643', 'K01698', 'K01749', 'K01599', 'K01772']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#coproporphyrinogen III oxidase
if ('K00228' in ko_match or 'K02495' in ko_match):
total += 1
#protoporphyrinogen oxidase
if ('K00230' in ko_match or 'K00231' in ko_match or 'K08973' in ko_match):
total += 1
value = float(total)/float(8)
return {'C4_PPH': float("%.2f" % (value))}
def C4_CPH(ko_match):
#Check for presence of 8 genes
total = 0
#5-aminolevulinate synthase
#porphobilinogen synthase, hydroxymethylbilane synthase
#uroporphyrinogen decarboxylase, coproporphyrinogen III oxidase, ferrochelatase
single_ko = ['K00643', 'K01698', 'K01749', 'K01599', 'K00231', 'K01772']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#heme synthase
if ('K00435' in ko_match or 'K22227' in ko_match):
total += 1
value = float(total)/float(8)
return {'C4_CPH': float("%.2f" % (value))}
def C4_SIRO(ko_match):
#Check for presence of 10 genes
total = 0
#5-aminolevulinate synthase
#porphobilinogen synthase, hydroxymethylbilane synthase
#siroheme decarboxylase, Fe-coproporphyrin III synthase
single_ko = ['K00643', 'K01698', 'K01749', 'K22225', 'K22226']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#uroporphyrin-III C-methyltransferase
if ('K00589' in ko_match or 'K02302' in ko_match or 'K02303' in ko_match or 'K02496' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#precorrin-2 dehydrogenase
if ('K02302' in ko_match or 'K02304' in ko_match):
total += 1
#sirohydrochlorin ferrochelatase
if ('K02302' in ko_match or 'K02304' in ko_match or 'K03794' in ko_match):
total += 1
#heme synthase
if ('K00435' in ko_match or 'K22227' in ko_match):
total += 1
value = float(total)/float(10)
return {'C4_SIRO': float("%.2f" % (value))}
def upper_C5(ko_match):
#Check for presence of 2 genes
total = 0
#glutamyl-tRNA reductase, glutamate-1-semialdehyde 2,1-aminomutase
single_ko = ['K02492', 'K01845']
for i in single_ko:
if i in ko_match:
total += 1
value = float(total)/float(2)
return {'upper_C5': float("%.2f" % (value))}
def upper_C4(ko_match):
#Check for presence of 1 genes
total = 0
#5-aminolevulinate synthase
single_ko = ['K00643']
for i in single_ko:
if i in ko_match:
total += 1
value = float(total)/float(1)
return {'upper_C4': float("%.2f" % (value))}
def Common(ko_match):
#Check for presence of 3 genes
total = 0
#porphobilinogen synthase, hydroxymethylbilane synthase
single_ko = ['K01698', 'K01749']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrinogen-III synthase
if ('K01719' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
value = float(total)/float(3)
return {'Common': float("%.2f" % (value))}
def lower_PPH(ko_match):
#Check for presence of 4 genes
total = 0
#uroporphyrinogen decarboxylase, ferrochelatase
single_ko = ['K01599', 'K01772']
for i in single_ko:
if i in ko_match:
total += 1
#coproporphyrinogen III oxidase
if ('K00228' in ko_match or 'K02495' in ko_match):
total += 1
#protoporphyrinogen oxidase
if ('K00230' in ko_match or 'K00231' in ko_match or 'K08973' in ko_match):
total += 1
value = float(total)/float(4)
return {'lower_PPH': float("%.2f" % (value))}
def lower_CPH(ko_match):
#Check for presence of 4 genes
total = 0
#uroporphyrinogen decarboxylase, coproporphyrinogen III oxidase, ferrochelatase
single_ko = ['K01599', 'K00231', 'K01772']
for i in single_ko:
if i in ko_match:
total += 1
#heme synthase
if ('K00435' in ko_match or 'K22227' in ko_match):
total += 1
value = float(total)/float(4)
return {'lower_CPH': float("%.2f" % (value))}
def lower_SIRO(ko_match):
#Check for presence of 6 genes
total = 0
#siroheme decarboxylase, Fe-coproporphyrin III synthase
single_ko = ['K22225', 'K22226']
for i in single_ko:
if i in ko_match:
total += 1
#uroporphyrin-III C-methyltransferase
if ('K00589' in ko_match or 'K02302' in ko_match or 'K02303' in ko_match or 'K02496' in ko_match or 'K13542' in ko_match or 'K13543' in ko_match):
total += 1
#precorrin-2 dehydrogenase
if ('K02302' in ko_match or 'K02304' in ko_match):
total += 1
#sirohydrochlorin ferrochelatase
if ('K02302' in ko_match or 'K02304' in ko_match or 'K03794' in ko_match):
total += 1
#heme synthase
if ('K00435' in ko_match or 'K22227' in ko_match):
total += 1
value = float(total)/float(6)
return {'lower_SIRO': float("%.2f" % (value))}
def default_viz(genome_df, outfile_name):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(font_scale=1.2)
sns.set_style({"savefig.dpi": 200})
ax = sns.heatmap(genome_df, cmap=plt.cm.YlOrRd, linewidths=2,
linecolor='k', square=True, xticklabels=True,
yticklabels=True, cbar_kws={"shrink": 0.1})
ax.xaxis.tick_top()
#ax.set_yticklabels(ax.get_yticklabels(), rotation=90)
plt.xticks(rotation=90)
plt.yticks(rotation=0)
# get figure (usually obtained via "fig,ax=plt.subplots()" with matplotlib)
fig = ax.get_figure()
# specify dimensions and save
#xLen = len(genome_df.columns.values.tolist())*20
#yLen = len(genome_df.index.tolist())*20
fig.set_size_inches(100, 100)
fig.savefig(outfile_name, bbox_inches='tight', pad_inches=0.1)
def main():
import os
import matplotlib
matplotlib.use('Agg')
import argparse
import pandas as pd
from scipy.cluster import hierarchy
from scipy.spatial import distance
parser = argparse.ArgumentParser(description="Accepts KEGG KOALA\
text file as input. Produces function\
list and heat map figure.")
parser.add_argument('-i', '--input', help="Input KOALA file. See documentation\
for correct format")
parser.add_argument('-t', '--tangleopt', help="Number of tree iterations for minimizing tangles in tanglegram", default=1000)
parser.add_argument('-o', '--output', help="List version of the final heat\
map figure")
parser.add_argument('-v', '--vizoption', help="Options: static, interactive, tanglegram")
parser.add_argument('--newick', help="Required input for tanglegram visualization")
parser.add_argument("-m", "--myorder", help ="Orders output as specified by user.", default="None")
args = parser.parse_args()
arg_dict = vars(args)
genome_data = {}
for line in open(str(arg_dict['input']), "r"):
line = line.rstrip()
info = line.split()
if len(info) > 1:
if info[0].rsplit("_",1)[0] in genome_data.keys():
genome_data[info[0].rsplit("_",1)[0]].append(info[1])
else:
genome_data[info[0].rsplit("_",1)[0]] = [info[1]]
function_order = ['C5_PPH', 'C5_CPH', 'C5_SIRO', 'C4_PPH', 'C4_CPH', 'C4_SIRO', 'upper_C5', 'upper_C4', 'Common', 'lower_PPH', 'lower_CPH', 'lower_SIRO']
filehandle = str(arg_dict['output'])
out_file = open(filehandle, "w")
out_file.write('Function'+"\t"+str("\t".join(function_order))+"\n")
for k in genome_data:
pathway_data = {}
pathway_data.update(C5_PPH(genome_data[k]))
pathway_data.update(C5_CPH(genome_data[k]))
pathway_data.update(C5_SIRO(genome_data[k]))
pathway_data.update(C4_PPH(genome_data[k]))
pathway_data.update(C4_CPH(genome_data[k]))
pathway_data.update(C4_SIRO(genome_data[k]))
pathway_data.update(upper_C5(genome_data[k]))
pathway_data.update(upper_C4(genome_data[k]))
pathway_data.update(Common(genome_data[k]))
pathway_data.update(lower_PPH(genome_data[k]))
pathway_data.update(lower_CPH(genome_data[k]))
pathway_data.update(lower_SIRO(genome_data[k]))
# print k, pathway_data
out_string = str(k)+"\t"
out_list = [k]
for i in function_order:
out_list.append(pathway_data[i])
out_string = str(out_list).strip('[]')
tab_string = ""
for l in out_string:
if l == "\'":
continue
if l == ",":
tab_string = tab_string + "\t"
else:
tab_string = tab_string + l
out_file.write(tab_string+"\n")
out_file.close()
file_in = open(filehandle, "r")
genome = pd.read_csv(file_in, index_col=0, sep='\t')
rearrange = False
if arg_dict["myorder"] != 'None' and os.path.exists(arg_dict["myorder"]):
rearrange = True
leaf_order = []
for line in open(str(arg_dict["myorder"]), "r"):
line = line.rstrip("\r\n")
leaf_order.append(line)
genome = genome.reindex(leaf_order)
if arg_dict['vizoption'] == 'static':
from .KEGG_clustering import hClust_euclidean
if len(genome.index) >= 2 and not rearrange:
genome = hClust_euclidean(genome)
default_viz(genome, os.path.splitext(filehandle)[0] + ".svg")
if arg_dict['vizoption'] == 'interactive':
from .Plotly_viz import plotly_viz
plotly_viz(genome, os.path.splitext(filehandle)[0] + ".html")
if arg_dict['vizoption'] == 'tanglegram':
from .MakeTanglegram import make_tanglegram
if len(genome.index) >= 3:
make_tanglegram(genome, str(arg_dict['newick']), os.path.splitext(filehandle)[0] + ".tanglegram.svg", int(arg_dict["tangleopt"]))
else:
raise ValueError("Tanglegram mode requires three or more genomes")
if __name__ == "__main__":
main()
|
the-stack_106_17917
|
"""hknweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .shortlinks import views as viewsShortlink
from .views import landing
from .views import users
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/create/', users.account_create, name='account-create'),
path('accounts/settings/', users.account_settings, name='account-settings'),
path('accounts/activate/', users.activate),
path('about/', landing.about, name='about'),
path('events/', include('hknweb.events.urls')),
path('reviewsessions/', include('hknweb.reviewsessions.urls')),
path('exams/', include('hknweb.exams.urls')),
path('alumni/', include('hknweb.alumni.urls')),
path('tutoring/', include('hknweb.tutoring.urls')),
path('cand/', include('hknweb.candidate.urls')),
path('pages/', include('hknweb.markdown_pages.urls')),
path('markdownx/', include('markdownx.urls')),
path('elections/', include('hknweb.elections.urls')),
path('auth/', include('social_django.urls', namespace='social')),
path('', landing.home, name='home'),
path('<slug:temp>/', viewsShortlink.openLink),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
the-stack_106_17919
|
from mud.injector import Injector
from mud.event import Event
from mud.inject import inject
from utils.hash import get_random_hash
import gevent
import glob
import json
import logging
import os
import os.path
import settings
class CollectionStorage(object):
def __init__(self, collection):
self.collection = collection
def post_save(self, record):
pass
def post_delete(self, record):
pass
class MemoryStorage(CollectionStorage):
pass
class FileStorage(CollectionStorage):
def __init__(self, *args, **kwargs):
super(FileStorage, self).__init__(*args, **kwargs)
name = self.collection.__class__.__name__.lower()
self.folder = "{}/{}".format(settings.DATA_FOLDER, name)
self.load_initial_data()
def load_initial_data(self):
pattern = "{}/*".format(self.folder)
for path in glob.glob(pattern):
with open(path, "r") as fh:
try:
data = json.loads(fh.read())
record = self.collection.save(data, skip_storage=True)
self.collection.hydrate(data)
logging.debug("Stored {}: {}".format(
self.collection.__class__.__name__, record.vnum))
except Exception as e:
self.collection.game.handle_exception(e)
logging.error("Unable to parse file: {}".format(path))
def get_record_path(self, record):
filename = record[self.collection.STORAGE_FILENAME_FIELD]
format_name = self.collection.STORAGE_FORMAT
suffix = self.collection.STORAGE_FILENAME_SUFFIX
if suffix != "":
if suffix is None:
suffix = format_name
suffix = "." + suffix
return "{}/{}{}".format(
self.folder,
filename,
suffix)
def post_save(self, record):
path = self.get_record_path(record)
folders = path.split("/")
folder = "/".join(folders[:-1])
os.makedirs(folder, exist_ok=True)
temp_path = path + ".TMP"
with open(temp_path, "w") as fh:
fh.write(json.dumps(record, indent=4, sort_keys=True))
os.replace(temp_path, path)
def post_delete(self, record):
path = self.get_record_path(record)
os.remove(path)
class Entity(object):
DEFAULT_DATA = {
"flags": [],
}
@property
def children(self):
return []
@property
def parents(self):
return []
def add_flag(self, flag):
flags = self.flags or []
if flag not in flags:
flags.append(flag)
self.flags = flags
def remove_flag(self, flag):
flags = self.flags or []
if flag in flags:
flags.remove(flag)
self.flags = flags
def toggle_flag(self, flag):
flags = self.flags or []
if flag in flags:
flags.remove(flag)
else:
flags.append(flag)
self.flags = flags
def has_flag(self, flag):
flags = self.flags or []
return flag in flags
def emit(self, type, data=None, unblockable=False):
event = self.generate_event(type, data, unblockable=unblockable)
if unblockable:
gevent.spawn(self.emit_event, event)
return event
else:
return self.emit_event(event)
def trigger(self, type, data=None, unblockable=False):
event = self.generate_event(type, data, unblockable=unblockable)
level = self.room
if unblockable:
gevent.spawn(level.broadcast_event, event)
return event
else:
return level.broadcast_event(event)
def broadcast(self, type, data=None, unblockable=False):
event = self.generate_event(type, data, unblockable=unblockable)
if unblockable:
gevent.spawn(self.broadcast_event, event)
return event
else:
return self.broadcast_event(event)
def broadcast_event(self, event):
event = self.handle_event(event)
if event.blocked:
return event
for child in self.children:
event = child.broadcast_event(event)
if event.blocked:
return event
return event
def emit_event(self, event):
event = self.handle_event(event)
if event.blocked:
return event
for parent in self.parents:
event = parent.emit_event(event)
if event.blocked:
return event
return event
def handle(self, type, data=None):
event = self.generate_event(type, data)
return self.handle_event(event)
# FIXME Move this out of Collection, which is meant to be more generic
@inject("Scripts", "Behaviors")
def handle_event(self, event, Scripts=None, Behaviors=None):
# TODO HAVE COLLECTION CREATE DEEPCOPIES OF EVERYTHING
triggers = list(self.triggers or [])
for behavior_vnum in (self.behaviors or []):
behavior = Behaviors.get({"vnum": behavior_vnum})
triggers.append({
"type": behavior.type,
"script_vnum": behavior.script_vnum,
})
if triggers:
for entry in triggers:
if entry["type"] != event.type:
continue
script = Scripts.get({"vnum": entry["script_vnum"]})
script.execute(self, event)
return event
def generate_event(self, type, data=None, unblockable=False):
return Event(self, type, data, unblockable=unblockable)
def __eq__(self, other):
if not other:
return False
return other.id == self.id
def __neq__(self, other):
return not self.__eq__(other)
def __init__(self, data=None, collection=None):
merged_data = dict(self.DEFAULT_DATA)
if data:
merged_data.update(data)
data = merged_data
super(Entity, self).__setattr__("_data", data)
super(Entity, self).__setattr__("_collection", collection)
@property
def game(self):
return self._collection.game
@property
def collection(self):
return self._collection
def _get_property(self, name):
prop_check = getattr(self.__class__, name, None)
if isinstance(prop_check, property):
return prop_check
return None
def __setattr__(self, name, value):
return self.set(name, value)
def __getattr__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __getitem__(self, name):
return self.get(name)
def get(self, name, default=None):
prop = self._get_property(name)
if prop:
return prop.fget(self, default)
else:
return self._data.get(name, default)
def set(self, name, value):
prop = self._get_property(name)
if prop:
prop.fset(self, value)
else:
self._data[name] = value
def set_data(self, data):
super(Entity, self).__setattr__("_data", data)
def get_data(self):
return self._data
def refresh(self):
self.set_data(self._collection.data.get(self.id, {}))
def save(self):
return self._collection.save(self)
def delete(self):
return self._collection.delete(self)
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self.set_data(data)
class Collection(Injector):
PERSISTENT = False
ENTITY_CLASS = Entity
STORAGE_CLASS = MemoryStorage
STORAGE_FORMAT = "json"
STORAGE_FILENAME_FIELD = "vnum"
STORAGE_FILENAME_SUFFIX = None
DATA_NAME = None
def __init__(self, game):
super(Collection, self).__init__(game)
name = self.DATA_NAME or self.__class__.__name__
self.game.data[name] = {}
self.storage = self.STORAGE_CLASS(self)
def fuzzy_get(self, identifier):
"""Look up an Entity, using fuzzier logic."""
cleaned = identifier.strip().lower()
for entry in self.query():
compare = entry.name.lower()
if compare.startswith(cleaned):
return entry
@property
def data(self):
name = self.DATA_NAME or self.__class__.__name__
return self.game.data[name]
@data.setter
def data(self, data):
name = self.DATA_NAME or self.__class__.__name__
self.game.data[name] = data
def query(self, spec=None, as_dict=False):
def _filter_function(record):
if spec is None:
return True
else:
for key in spec:
if key not in record or spec[key] != record[key]:
return False
return True
filtered = filter(_filter_function, self.data.values())
for record in list(filtered):
if as_dict:
yield record
else:
yield self.wrap_record(record)
def get(self, spec):
if spec is None:
return None
elif isinstance(spec, str):
record = self.data.get(spec, None)
if not record:
return None
return self.wrap_record(record)
else:
for record in self.query(spec):
return record
def save(self, record, skip_storage=False):
logging.debug("Saving {} record {}".format(
self.__class__.__name__, record.get("vnum", None)))
record = self.unwrap_record(record)
default_data = self.ENTITY_CLASS.DEFAULT_DATA
if default_data:
for key, value in default_data.items():
if key not in record:
record[key] = value
if "id" not in record:
record["id"] = get_random_hash()
self.data[record["id"]] = record
if not skip_storage:
storage_record = self.dehydrate(record)
self.storage.post_save(storage_record)
return self.get(record["id"])
def unwrap_record(self, record):
if isinstance(record, Entity):
return record.get_data()
return record
def wrap_record(self, record):
if isinstance(record, dict):
return self.ENTITY_CLASS(data=record, collection=self)
return record
def delete(self, record):
record = self.unwrap_record(record)
wrapped = self.wrap_record(record)
for child in wrapped.children:
if child._collection.PERSISTENT:
continue
child.delete()
del self.data[record["id"]]
self.storage.post_delete(record)
self.post_delete(record)
def post_delete(self, record):
"""Handle a record being removed from the Collection."""
pass
def dehydrate(self, record):
"""Prepare data for cold storage."""
return record
def hydrate(self, record):
"""Load data for Game usage."""
return record
|
the-stack_106_17920
|
import itertools
from notifications_utils.recipients import allowed_to_send_to
from app.models import (
ServiceGuestList,
MOBILE_TYPE, EMAIL_TYPE,
KEY_TYPE_TEST, KEY_TYPE_TEAM, KEY_TYPE_NORMAL)
from app.dao.services_dao import dao_fetch_service_by_id
def get_recipients_from_request(request_json, key, type):
return [(type, recipient) for recipient in request_json.get(key)]
def get_guest_list_objects(service_id, request_json):
return [
ServiceGuestList.from_string(service_id, type, recipient)
for type, recipient in (
get_recipients_from_request(request_json,
'phone_numbers',
MOBILE_TYPE) +
get_recipients_from_request(request_json,
'email_addresses',
EMAIL_TYPE)
)
]
def service_allowed_to_send_to(recipient, service, key_type, allow_guest_list_recipients=True):
if key_type == KEY_TYPE_TEST:
return True
if key_type == KEY_TYPE_NORMAL and not service.restricted:
return True
# Revert back to the ORM model here so we can get some things which
# aren’t in the serialised model
service = dao_fetch_service_by_id(service.id)
team_members = itertools.chain.from_iterable(
[user.mobile_number, user.email_address] for user in service.users
)
guest_list_members = [
member.recipient for member in service.guest_list
if allow_guest_list_recipients
]
if (
(key_type == KEY_TYPE_NORMAL and service.restricted) or
(key_type == KEY_TYPE_TEAM)
):
return allowed_to_send_to(
recipient,
itertools.chain(
team_members,
guest_list_members
)
)
|
the-stack_106_17921
|
# coding=utf-8
from __future__ import unicode_literals
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.test import TestCase
from internationalflavor.vat_number import VATNumberValidator
from internationalflavor.vat_number.forms import VATNumberFormField
from internationalflavor.vat_number.models import VATNumberField
class VATNumberTestCase(TestCase):
valid = {
'NL820646660B01': 'NL820646660B01',
'NL82064-6660.B01': 'NL820646660B01',
'NL123456789B13': 'NL123456789B13',
'DE 114 103 379': 'DE114103379',
'DE114103379': 'DE114103379',
'BE 0203.201.340': 'BE0203201340',
'HU99999999': 'HU99999999',
'IE1234567XX': 'IE1234567XX',
'IE1X23456X': 'IE1X23456X',
'GR123456789': 'EL123456789',
'CH-123.456.789 MWST': 'CH123456789',
'CHE-123.456.789 MWST': 'CH123456789',
'CHE-123.456.789 IVA': 'CH123456789',
'RU5505035011': 'RU5505035011',
'RU550501929014': 'RU550501929014',
}
invalid = {
'NL820646661B01': ['This VAT number does not match the requirements for NL.'],
'BE0203201341': ['This VAT number does not match the requirements for BE.'],
'DE11410337': ['This VAT number does not match the requirements for DE.'],
'US123414132': ['US VAT numbers are not allowed in this field.'],
'123456': ['This VAT number does not start with a country code, or contains invalid characters.'],
'IE0É12345A': ['This VAT number does not start with a country code, or contains invalid characters.'],
'RU5505035012': ['This VAT number does not match the requirements for RU.'],
'RU550501929015': ['This VAT number does not match the requirements for RU.'],
}
def test_validator(self):
validator = VATNumberValidator()
# Our validator does not allow formatting characters, so check we do not pass it in.
for iban, cleaned in self.valid.items():
if iban == cleaned:
validator(iban)
else:
validator(cleaned)
self.assertRaises(ValidationError, validator, iban)
for iban, message in self.invalid.items():
self.assertRaisesMessage(ValidationError, message[0], validator, iban)
def test_validator_eu_only(self):
validator = VATNumberValidator(eu_only=True)
validator('CY12345678A')
def test_validator_greece(self):
validator = VATNumberValidator(eu_only=True)
self.assertRaises(ValidationError, validator, 'GR123456789')
validator('EL123456789')
validator = VATNumberValidator(countries=['GR'])
self.assertRaises(ValidationError, validator, 'GR123456789')
validator('EL123456789')
validator = VATNumberValidator(countries=['EL'])
self.assertRaises(ValidationError, validator, 'GR123456789')
validator('EL123456789')
def test_form_field(self):
self.assertFieldOutput(VATNumberFormField, valid=self.valid, invalid=self.invalid)
def test_form_field_formatting(self):
form_field = VATNumberFormField()
self.assertEqual(form_field.prepare_value('DE 114 103 379'), 'DE114103379')
self.assertEqual(form_field.prepare_value('CHE-123.456.789 IVA'), 'CHE123456789')
self.assertIsNone(form_field.prepare_value(None))
def test_model_field(self):
model_field = VATNumberField()
for input, output in self.valid.items():
self.assertEqual(model_field.clean(input, None), output)
# Invalid inputs for model field.
for input, errors in self.invalid.items():
with self.assertRaises(ValidationError) as context_manager:
model_field.clean(input, None)
self.assertEqual(context_manager.exception.messages, errors[::-1])
include_countries = ('NL', 'BE')
include_countries_valid = {
'NL820646660B01': 'NL820646660B01',
'BE0203201340': 'BE0203201340'
}
include_countries_invalid = {
'DE114103379': ['DE VAT numbers are not allowed in this field.']
}
def test_include_countries_form_field(self):
self.assertFieldOutput(VATNumberFormField, field_kwargs={'countries': self.include_countries},
valid=self.include_countries_valid, invalid=self.include_countries_invalid)
def test_include_countries_model_field(self):
model_field = VATNumberField(countries=self.include_countries)
for input, output in self.include_countries_valid.items():
self.assertEqual(model_field.clean(input, None), output)
# Invalid inputs for model field.
for input, errors in self.include_countries_invalid.items():
with self.assertRaises(ValidationError) as context_manager:
model_field.clean(input, None)
self.assertEqual(context_manager.exception.messages, errors[::-1])
def test_vies_check_validator(self):
validator = VATNumberValidator(vies_check=True)
validator('DE114103379')
try:
with self.assertRaises(ValidationError) as context_manager:
validator('DE999999999')
self.assertEqual(context_manager.exception.messages, ['This VAT number does not exist.'])
except AssertionError:
# Check if the validation succeeded due to a SUDS error.
# You should be wary of skipped tests because of this, but the service may also be unavailable at the time.
if validator._wsdl_exception is not None:
print("Suds WSDL test skipped due to connection failure")
self.skipTest("Suds WSDL client failed")
else:
raise
def test_vies_check_validator_native(self):
validator = VATNumberValidator(vies_check=True)
validator._check_vies = validator._check_vies_native
validator('DE114103379')
try:
with self.assertRaises(ValidationError) as context_manager:
validator('DE999999999')
self.assertEqual(context_manager.exception.messages, ['This VAT number does not exist.'])
except AssertionError:
# Check if the validation succeeded due to a SUDS error.
# You should be wary of skipped tests because of this, but the service may also be unavailable at the time.
if validator._wsdl_exception is not None:
print("Native WSDL test skipped due to connection failure")
self.skipTest("Native WSDL client failed")
else:
raise
|
the-stack_106_17922
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Dict
from airflow.decorators import dag, task
from airflow.operators.email import EmailOperator
from airflow.providers.http.operators.http import SimpleHttpOperator
from airflow.utils.dates import days_ago
DEFAULT_ARGS = {
"owner": "airflow",
}
# [START dag_decorator_usage]
@dag(default_args=DEFAULT_ARGS, schedule_interval=None, start_date=days_ago(2))
def example_dag_decorator(email: str = '[email protected]'):
"""
DAG to send server IP to email.
:param email: Email to send IP to. Defaults to [email protected].
:type email: str
"""
# Using default connection as it's set to httpbin.org by default
get_ip = SimpleHttpOperator(task_id='get_ip', endpoint='get', method='GET')
@task(multiple_outputs=True)
def prepare_email(raw_json: str) -> Dict[str, str]:
external_ip = json.loads(raw_json)['origin']
return {
'subject': f'Server connected from {external_ip}',
'body': f'Seems like today your server executing Airflow is connected from IP {external_ip}<br>',
}
email_info = prepare_email(get_ip.output)
EmailOperator(
task_id='send_email', to=email, subject=email_info['subject'], html_content=email_info['body']
)
dag = example_dag_decorator()
# [END dag_decorator_usage]
|
the-stack_106_17923
|
# Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import zipfile
from collections import namedtuple
from . import ports
from . import shared
from tools.shared import check_call
stdout = None
stderr = None
def call_process(cmd):
shared.run_process(cmd, stdout=stdout, stderr=stderr)
def run_commands(commands):
cores = min(len(commands), shared.Building.get_num_cores())
if cores <= 1:
for command in commands:
call_process(command)
else:
pool = shared.Building.get_multiprocessing_pool()
# https://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool, https://bugs.python.org/issue8296
# 999999 seconds (about 11 days) is reasonably huge to not trigger actual timeout
# and is smaller than the maximum timeout value 4294967.0 for Python 3 on Windows (threading.TIMEOUT_MAX)
pool.map_async(call_process, commands, chunksize=1).get(999999)
def files_in_path(path_components, filenames):
srcdir = shared.path_from_root(*path_components)
return [os.path.join(srcdir, f) for f in filenames]
def get_cflags():
flags = []
if not shared.Settings.WASM_OBJECT_FILES:
flags += ['-s', 'WASM_OBJECT_FILES=0']
return flags
def create_lib(libname, inputs):
"""Create a library from a set of input objects."""
if libname.endswith('.bc'):
shared.Building.link_to_object(inputs, libname)
elif libname.endswith('.a'):
shared.Building.emar('cr', libname, inputs)
else:
raise Exception('unknown suffix ' + libname)
def calculate(temp_files, in_temp, stdout_, stderr_, forced=[]):
global stdout, stderr
stdout = stdout_
stderr = stderr_
# Check if we need to include some libraries that we compile. (We implement libc ourselves in js, but
# compile a malloc implementation and stdlibc++.)
def read_symbols(path):
with open(path) as f:
content = f.read()
# Require that Windows newlines should not be present in a symbols file, if running on Linux or macOS
# This kind of mismatch can occur if one copies a zip file of Emscripten cloned on Windows over to
# a Linux or macOS system. It will result in Emscripten linker getting confused on stray \r characters,
# and be unable to link any library symbols properly. We could harden against this by .strip()ping the
# opened files, but it is possible that the mismatching line endings can cause random problems elsewhere
# in the toolchain, hence abort execution if so.
if os.name != 'nt' and '\r\n' in content:
raise Exception('Windows newlines \\r\\n detected in symbols file "' + path + '"! This could happen for example when copying Emscripten checkout from Windows to Linux or macOS. Please use Unix line endings on checkouts of Emscripten on Linux and macOS!')
return shared.Building.parse_symbols(content).defs
default_opts = ['-Werror']
# XXX We also need to add libc symbols that use malloc, for example strdup. It's very rare to use just them and not
# a normal malloc symbol (like free, after calling strdup), so we haven't hit this yet, but it is possible.
libc_symbols = read_symbols(shared.path_from_root('system', 'lib', 'libc.symbols'))
libcxx_symbols = read_symbols(shared.path_from_root('system', 'lib', 'libcxx', 'symbols'))
libcxxabi_symbols = read_symbols(shared.path_from_root('system', 'lib', 'libcxxabi', 'symbols'))
gl_symbols = read_symbols(shared.path_from_root('system', 'lib', 'gl.symbols'))
al_symbols = read_symbols(shared.path_from_root('system', 'lib', 'al.symbols'))
compiler_rt_symbols = read_symbols(shared.path_from_root('system', 'lib', 'compiler-rt.symbols'))
libc_extras_symbols = read_symbols(shared.path_from_root('system', 'lib', 'libc_extras.symbols'))
pthreads_symbols = read_symbols(shared.path_from_root('system', 'lib', 'pthreads.symbols'))
asmjs_pthreads_symbols = read_symbols(shared.path_from_root('system', 'lib', 'asmjs_pthreads.symbols'))
stub_pthreads_symbols = read_symbols(shared.path_from_root('system', 'lib', 'stub_pthreads.symbols'))
wasm_libc_symbols = read_symbols(shared.path_from_root('system', 'lib', 'wasm-libc.symbols'))
html5_symbols = read_symbols(shared.path_from_root('system', 'lib', 'html5.symbols'))
# XXX we should disable EMCC_DEBUG when building libs, just like in the relooper
def musl_internal_includes():
return [
'-I', shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'internal'),
'-I', shared.path_from_root('system', 'lib', 'libc', 'musl', 'arch', 'js'),
]
def build_libc(lib_filename, files, lib_opts):
o_s = []
commands = []
# Hide several musl warnings that produce a lot of spam to unit test build server logs.
# TODO: When updating musl the next time, feel free to recheck which of their warnings might have been fixed, and which ones of these could be cleaned up.
c_opts = ['-Wno-return-type', '-Wno-parentheses', '-Wno-ignored-attributes',
'-Wno-shift-count-overflow', '-Wno-shift-negative-value',
'-Wno-dangling-else', '-Wno-unknown-pragmas',
'-Wno-shift-op-parentheses', '-Wno-string-plus-int',
'-Wno-logical-op-parentheses', '-Wno-bitwise-op-parentheses',
'-Wno-visibility', '-Wno-pointer-sign', '-Wno-absolute-value',
'-Wno-empty-body']
for src in files:
o = in_temp(os.path.basename(src) + '.o')
commands.append([shared.PYTHON, shared.EMCC, shared.path_from_root('system', 'lib', src), '-o', o] + musl_internal_includes() + default_opts + c_opts + lib_opts + get_cflags())
o_s.append(o)
run_commands(commands)
create_lib(in_temp(lib_filename), o_s)
return in_temp(lib_filename)
def build_libcxx(src_dirname, lib_filename, files, lib_opts, has_noexcept_version=False):
o_s = []
commands = []
opts = default_opts + lib_opts
# Make sure we don't mark symbols as default visibility. This works around
# an issue with the wasm backend where all default visibility symbols are
# exported (and therefore can't be GC'd).
# FIXME(https://github.com/emscripten-core/emscripten/issues/7383)
opts += ['-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS']
if has_noexcept_version and shared.Settings.DISABLE_EXCEPTION_CATCHING:
opts += ['-fno-exceptions']
for src in files:
o = in_temp(src + '.o')
srcfile = shared.path_from_root(src_dirname, src)
commands.append([shared.PYTHON, shared.EMXX, srcfile, '-o', o, '-std=c++11'] + opts + get_cflags())
o_s.append(o)
run_commands(commands)
create_lib(in_temp(lib_filename), o_s)
return in_temp(lib_filename)
# Returns linker flags specific to singlethreading or multithreading
def threading_flags(libname):
if shared.Settings.USE_PTHREADS:
assert '-mt' in libname
return ['-s', 'USE_PTHREADS=1']
else:
assert '-mt' not in libname
return []
def legacy_gl_emulation_flags(libname):
if shared.Settings.LEGACY_GL_EMULATION:
assert '-emu' in libname
return ['-DLEGACY_GL_EMULATION=1']
else:
assert '-emu' not in libname
return []
def gl_version_flags(libname):
if shared.Settings.USE_WEBGL2:
assert '-webgl2' in libname
return ['-DUSE_WEBGL2=1']
else:
assert '-webgl2' not in libname
return []
# libc
def create_libc(libname):
logging.debug(' building libc for cache')
libc_files = []
musl_srcdir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src')
# musl modules
blacklist = [
'ipc', 'passwd', 'thread', 'signal', 'sched', 'ipc', 'time', 'linux',
'aio', 'exit', 'legacy', 'mq', 'process', 'search', 'setjmp', 'env',
'ldso', 'conf'
]
# individual files
blacklist += [
'memcpy.c', 'memset.c', 'memmove.c', 'getaddrinfo.c', 'getnameinfo.c',
'inet_addr.c', 'res_query.c', 'res_querydomain.c', 'gai_strerror.c',
'proto.c', 'gethostbyaddr.c', 'gethostbyaddr_r.c', 'gethostbyname.c',
'gethostbyname2_r.c', 'gethostbyname_r.c', 'gethostbyname2.c',
'usleep.c', 'alarm.c', 'syscall.c', '_exit.c', 'popen.c',
'getgrouplist.c', 'initgroups.c', 'wordexp.c', 'timer_create.c',
'faccessat.c',
]
# individual math files
blacklist += [
'abs.c', 'cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c', 'asin.c',
'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c', 'atan2.c',
'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c', 'log.c', 'logf.c',
'logl.c', 'sqrt.c', 'sqrtf.c', 'sqrtl.c', 'fabs.c', 'fabsf.c',
'fabsl.c', 'ceil.c', 'ceilf.c', 'ceill.c', 'floor.c', 'floorf.c',
'floorl.c', 'pow.c', 'powf.c', 'powl.c', 'round.c', 'roundf.c',
'rintf.c'
]
if shared.Settings.WASM_BACKEND:
# With the wasm backend these are included in wasm_libc_rt instead
blacklist += [
'fmin.c', 'fminf.c', 'fminl.c', 'fmax.c', 'fmaxf.c', 'fmaxl.c',
'fmod.c', 'fmodf.c', 'fmodl.c', 'log2.c', 'log2f.c', 'log10.c',
'log10f.c', 'exp2.c', 'exp2f.c', 'exp10.c', 'exp10f.c', 'scalbn.c',
'__fpclassifyl.c'
]
blacklist += ['memcpy.c', 'memset.c', 'memmove.c']
blacklist = set(blacklist)
# TODO: consider using more math code from musl, doing so makes box2d faster
for dirpath, dirnames, filenames in os.walk(musl_srcdir):
for f in filenames:
if f.endswith('.c'):
if f in blacklist:
continue
dir_parts = os.path.split(dirpath)
cancel = False
for part in dir_parts:
if part in blacklist:
cancel = True
break
if not cancel:
libc_files.append(os.path.join(musl_srcdir, dirpath, f))
# Without -fno-builtin, LLVM can optimize away or convert calls to library
# functions to something else based on assumptions that they behave exactly
# like the standard library. This can cause unexpected bugs when we use our
# custom standard library. The same for other libc/libm builds.
args = ['-Os', '-fno-builtin']
args += threading_flags(libname)
return build_libc(libname, libc_files, args)
def create_pthreads(libname):
# Add pthread files.
pthreads_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'thread'],
filenames=[
'pthread_attr_destroy.c', 'pthread_condattr_setpshared.c',
'pthread_mutex_lock.c', 'pthread_spin_destroy.c', 'pthread_attr_get.c',
'pthread_cond_broadcast.c', 'pthread_mutex_setprioceiling.c',
'pthread_spin_init.c', 'pthread_attr_init.c', 'pthread_cond_destroy.c',
'pthread_mutex_timedlock.c', 'pthread_spin_lock.c',
'pthread_attr_setdetachstate.c', 'pthread_cond_init.c',
'pthread_mutex_trylock.c', 'pthread_spin_trylock.c',
'pthread_attr_setguardsize.c', 'pthread_cond_signal.c',
'pthread_mutex_unlock.c', 'pthread_spin_unlock.c',
'pthread_attr_setinheritsched.c', 'pthread_cond_timedwait.c',
'pthread_once.c', 'sem_destroy.c', 'pthread_attr_setschedparam.c',
'pthread_cond_wait.c', 'pthread_rwlockattr_destroy.c', 'sem_getvalue.c',
'pthread_attr_setschedpolicy.c', 'pthread_equal.c', 'pthread_rwlockattr_init.c',
'sem_init.c', 'pthread_attr_setscope.c', 'pthread_getspecific.c',
'pthread_rwlockattr_setpshared.c', 'sem_open.c', 'pthread_attr_setstack.c',
'pthread_key_create.c', 'pthread_rwlock_destroy.c', 'sem_post.c',
'pthread_attr_setstacksize.c', 'pthread_mutexattr_destroy.c',
'pthread_rwlock_init.c', 'sem_timedwait.c', 'pthread_barrierattr_destroy.c',
'pthread_mutexattr_init.c', 'pthread_rwlock_rdlock.c', 'sem_trywait.c',
'pthread_barrierattr_init.c', 'pthread_mutexattr_setprotocol.c',
'pthread_rwlock_timedrdlock.c', 'sem_unlink.c',
'pthread_barrierattr_setpshared.c', 'pthread_mutexattr_setpshared.c',
'pthread_rwlock_timedwrlock.c', 'sem_wait.c', 'pthread_barrier_destroy.c',
'pthread_mutexattr_setrobust.c', 'pthread_rwlock_tryrdlock.c',
'__timedwait.c', 'pthread_barrier_init.c', 'pthread_mutexattr_settype.c',
'pthread_rwlock_trywrlock.c', 'vmlock.c', 'pthread_barrier_wait.c',
'pthread_mutex_consistent.c', 'pthread_rwlock_unlock.c', '__wait.c',
'pthread_condattr_destroy.c', 'pthread_mutex_destroy.c',
'pthread_rwlock_wrlock.c', 'pthread_condattr_init.c',
'pthread_mutex_getprioceiling.c', 'pthread_setcanceltype.c',
'pthread_condattr_setclock.c', 'pthread_mutex_init.c',
'pthread_setspecific.c', 'pthread_setcancelstate.c'
])
pthreads_files += [os.path.join('pthread', 'library_pthread.c')]
return build_libc(libname, pthreads_files, ['-O2', '-s', 'USE_PTHREADS=1'])
def create_pthreads_stub(libname):
pthreads_files = [os.path.join('pthread', 'library_pthread_stub.c')]
return build_libc(libname, pthreads_files, ['-O2'])
def create_pthreads_asmjs(libname):
pthreads_files = [os.path.join('pthread', 'library_pthread_asmjs.c')]
return build_libc(libname, pthreads_files, ['-O2', '-s', 'USE_PTHREADS=1'])
def create_pthreads_wasm(libname):
pthreads_files = [os.path.join('pthread', 'library_pthread_wasm.c')]
return build_libc(libname, pthreads_files, ['-O2', '-s', 'USE_PTHREADS=1'])
def create_wasm_libc(libname):
# in asm.js we just use Math.sin etc., which is good for code size. But
# wasm doesn't have such builtins, so we need to bundle in more code
files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'math'],
filenames=['cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c',
'asin.c', 'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c',
'atan2.c', 'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c',
'log.c', 'logf.c', 'logl.c', 'pow.c', 'powf.c', 'powl.c'])
return build_libc(libname, files, ['-O2', '-fno-builtin'])
# libc++
def create_libcxx(libname):
logging.debug('building libc++ for cache')
libcxx_files = [
'algorithm.cpp',
'any.cpp',
'bind.cpp',
'chrono.cpp',
'condition_variable.cpp',
'debug.cpp',
'exception.cpp',
'future.cpp',
'functional.cpp',
'hash.cpp',
'ios.cpp',
'iostream.cpp',
'locale.cpp',
'memory.cpp',
'mutex.cpp',
'new.cpp',
'optional.cpp',
'random.cpp',
'regex.cpp',
'shared_mutex.cpp',
'stdexcept.cpp',
'string.cpp',
'strstream.cpp',
'system_error.cpp',
'thread.cpp',
'typeinfo.cpp',
'utility.cpp',
'valarray.cpp',
'variant.cpp',
'vector.cpp',
os.path.join('experimental', 'memory_resource.cpp'),
os.path.join('experimental', 'filesystem', 'directory_iterator.cpp'),
os.path.join('experimental', 'filesystem', 'path.cpp'),
os.path.join('experimental', 'filesystem', 'operations.cpp')
]
libcxxabi_include = shared.path_from_root('system', 'lib', 'libcxxabi', 'include')
return build_libcxx(
os.path.join('system', 'lib', 'libcxx'), libname, libcxx_files,
['-DLIBCXX_BUILDING_LIBCXXABI=1', '-D_LIBCPP_BUILDING_LIBRARY', '-Oz', '-I' + libcxxabi_include],
has_noexcept_version=True)
# libcxxabi - just for dynamic_cast for now
def create_libcxxabi(libname):
logging.debug('building libc++abi for cache')
libcxxabi_files = [
'abort_message.cpp',
'cxa_aux_runtime.cpp',
'cxa_default_handlers.cpp',
'cxa_demangle.cpp',
'cxa_exception_storage.cpp',
'cxa_guard.cpp',
'cxa_new_delete.cpp',
'cxa_handlers.cpp',
'exception.cpp',
'stdexcept.cpp',
'typeinfo.cpp',
'private_typeinfo.cpp'
]
libcxxabi_include = shared.path_from_root('system', 'lib', 'libcxxabi', 'include')
return build_libcxx(
os.path.join('system', 'lib', 'libcxxabi', 'src'), libname, libcxxabi_files,
['-Oz', '-I' + libcxxabi_include])
# gl
def create_gl(libname):
src_dir = shared.path_from_root('system', 'lib', 'gl')
files = []
for dirpath, dirnames, filenames in os.walk(src_dir):
filenames = filter(lambda f: f.endswith('.c'), filenames)
files += map(lambda f: os.path.join(src_dir, f), filenames)
flags = ['-Oz', '-s', 'USE_WEBGL2=1']
flags += threading_flags(libname)
flags += legacy_gl_emulation_flags(libname)
flags += gl_version_flags(libname)
return build_libc(libname, files, flags)
# al
def create_al(libname): # libname is ignored, this is just one .o file
o = in_temp('al.o')
check_call([shared.PYTHON, shared.EMCC, shared.path_from_root('system', 'lib', 'al.c'), '-o', o, '-Os'] + get_cflags())
return o
def create_html5(libname):
src_dir = shared.path_from_root('system', 'lib', 'html5')
files = []
for dirpath, dirnames, filenames in os.walk(src_dir):
files += [os.path.join(src_dir, f) for f in filenames]
return build_libc(libname, files, ['-Oz'])
def create_compiler_rt(libname):
files = files_in_path(
path_components=['system', 'lib', 'compiler-rt', 'lib', 'builtins'],
filenames=['divdc3.c', 'divsc3.c', 'muldc3.c', 'mulsc3.c'])
o_s = []
commands = []
for src in files:
o = in_temp(os.path.basename(src) + '.o')
commands.append([shared.PYTHON, shared.EMCC, shared.path_from_root('system', 'lib', src), '-O2', '-o', o] + get_cflags())
o_s.append(o)
run_commands(commands)
shared.Building.emar('cr', in_temp(libname), o_s)
return in_temp(libname)
# libc_extras
def create_libc_extras(libname): # libname is ignored, this is just one .o file
o = in_temp('libc_extras.o')
check_call([shared.PYTHON, shared.EMCC, shared.path_from_root('system', 'lib', 'libc', 'extras.c'), '-o', o] + get_cflags())
return o
# decides which malloc to use, and returns the source for malloc and the full library name
def malloc_decision():
if shared.Settings.MALLOC == 'dlmalloc':
base = 'dlmalloc'
elif shared.Settings.MALLOC == 'emmalloc':
base = 'emmalloc'
else:
raise Exception('malloc must be one of "emmalloc", "dlmalloc", see settings.js')
# only dlmalloc supports most modes
def require_dlmalloc(what):
if base != 'dlmalloc':
shared.exit_with_error('only dlmalloc is possible when using %s' % what)
extra = ''
if shared.Settings.DEBUG_LEVEL >= 3:
extra += '_debug'
if not shared.Settings.SUPPORT_ERRNO:
# emmalloc does not use errno anyhow
if base != 'emmalloc':
extra += '_noerrno'
if shared.Settings.USE_PTHREADS:
extra += '_threadsafe'
require_dlmalloc('pthreads')
if shared.Settings.EMSCRIPTEN_TRACING:
extra += '_tracing'
require_dlmalloc('tracing')
if base == 'dlmalloc':
source = 'dlmalloc.c'
elif base == 'emmalloc':
source = 'emmalloc.cpp'
return (source, 'lib' + base + extra)
def malloc_source():
return malloc_decision()[0]
def malloc_name():
return malloc_decision()[1]
def create_malloc(out_name):
o = in_temp(out_name)
cflags = ['-O2', '-fno-builtin']
if shared.Settings.USE_PTHREADS:
cflags += ['-s', 'USE_PTHREADS=1']
if shared.Settings.EMSCRIPTEN_TRACING:
cflags += ['--tracing']
if shared.Settings.DEBUG_LEVEL >= 3:
cflags += ['-UNDEBUG', '-DDLMALLOC_DEBUG']
# TODO: consider adding -DEMMALLOC_DEBUG, but that is quite slow
else:
cflags += ['-DNDEBUG']
if not shared.Settings.SUPPORT_ERRNO:
cflags += ['-DMALLOC_FAILURE_ACTION=']
check_call([shared.PYTHON, shared.EMCC, shared.path_from_root('system', 'lib', malloc_source()), '-o', o] + cflags + get_cflags())
return o
def create_wasm_rt_lib(libname, files):
o_s = []
commands = []
for src in files:
o = in_temp(os.path.basename(src) + '.o')
# Use clang directly instead of emcc. Since emcc's intermediate format (produced by -S) is LLVM IR, there's no way to
# get emcc to output wasm .s files, which is what we archive in compiler_rt.
commands.append([
shared.CLANG_CC,
'--target={}'.format(shared.WASM_TARGET),
'-mthread-model', 'single', '-c',
shared.path_from_root('system', 'lib', src),
'-O2', '-fno-builtin', '-o', o] +
musl_internal_includes() +
shared.COMPILER_OPTS)
o_s.append(o)
run_commands(commands)
lib = in_temp(libname)
run_commands([[shared.LLVM_AR, 'cr', '-format=gnu', lib] + o_s])
return lib
def create_wasm_compiler_rt(libname):
files = files_in_path(
path_components=['system', 'lib', 'compiler-rt', 'lib', 'builtins'],
filenames=['addtf3.c', 'ashlti3.c', 'ashrti3.c', 'atomic.c', 'comparetf2.c',
'divtf3.c', 'divti3.c', 'udivmodti4.c',
'extenddftf2.c', 'extendsftf2.c',
'fixdfti.c', 'fixsfti.c', 'fixtfdi.c', 'fixtfsi.c', 'fixtfti.c',
'fixunsdfti.c', 'fixunssfti.c', 'fixunstfdi.c', 'fixunstfsi.c', 'fixunstfti.c',
'floatditf.c', 'floatsitf.c', 'floattidf.c', 'floattisf.c',
'floatunditf.c', 'floatunsitf.c', 'floatuntidf.c', 'floatuntisf.c', 'lshrti3.c',
'modti3.c', 'multc3.c', 'multf3.c', 'multi3.c', 'subtf3.c', 'udivti3.c', 'umodti3.c', 'ashrdi3.c',
'ashldi3.c', 'fixdfdi.c', 'floatdidf.c', 'lshrdi3.c', 'moddi3.c',
'trunctfdf2.c', 'trunctfsf2.c', 'umoddi3.c', 'fixunsdfdi.c', 'muldi3.c',
'divdi3.c', 'divmoddi4.c', 'udivdi3.c', 'udivmoddi4.c'])
files += files_in_path(path_components=['system', 'lib', 'compiler-rt'],
filenames=['extras.c'])
return create_wasm_rt_lib(libname, files)
def create_wasm_libc_rt(libname):
# Static linking is tricky with LLVM, since e.g. memset might not be used from libc,
# but be used as an intrinsic, and codegen will generate a libc call from that intrinsic
# *after* static linking would have thought it is all in there. In asm.js this is not an
# issue as we do JS linking anyhow, and have asm.js-optimized versions of all the LLVM
# intrinsics. But for wasm, we need a better solution. For now, make another archive
# that gets included at the same time as compiler-rt.
# Note that this also includes things that may be depended on by those functions - fmin
# uses signbit, for example, so signbit must be here (so if fmin is added by codegen,
# it will have all it needs).
math_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'math'],
filenames=[
'fmin.c', 'fminf.c', 'fminl.c',
'fmax.c', 'fmaxf.c', 'fmaxl.c',
'fmod.c', 'fmodf.c', 'fmodl.c',
'log2.c', 'log2f.c', 'log10.c', 'log10f.c',
'exp2.c', 'exp2f.c', 'exp10.c', 'exp10f.c',
'scalbn.c', '__fpclassifyl.c',
'__signbitl.c', '__signbitf.c', '__signbit.c'
])
string_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'string'],
filenames=['memset.c', 'memmove.c'])
other_files = files_in_path(
path_components=['system', 'lib', 'libc'],
filenames=['emscripten_memcpy.c'])
return create_wasm_rt_lib(libname, math_files + string_files + other_files)
# Set of libraries to include on the link line, as opposed to `force` which
# is the set of libraries to force include (with --whole-archive).
always_include = set()
# Setting this will only use the forced libs in EMCC_FORCE_STDLIBS. This avoids spending time checking
# for unresolved symbols in your project files, which can speed up linking, but if you do not have
# the proper list of actually needed libraries, errors can occur. See below for how we must
# export all the symbols in deps_info when using this option.
only_forced = os.environ.get('EMCC_ONLY_FORCED_STDLIBS')
if only_forced:
temp_files = []
# Add in some hacks for js libraries. If a js lib depends on a symbol provided by a C library, it must be
# added to here, because our deps go only one way (each library here is checked, then we check the next
# in order - libc++, libcxextra, etc. - and then we run the JS compiler and provide extra symbols from
# library*.js files. But we cannot then go back to the C libraries if a new dep was added!
# TODO: Move all __deps from src/library*.js to deps_info.json, and use that single source of info
# both here and in the JS compiler.
deps_info = json.loads(open(shared.path_from_root('src', 'deps_info.json')).read())
added = set()
def add_back_deps(need):
more = False
for ident, deps in deps_info.items():
if ident in need.undefs and ident not in added:
added.add(ident)
more = True
for dep in deps:
need.undefs.add(dep)
if shared.Settings.VERBOSE:
logging.debug('adding dependency on %s due to deps-info on %s' % (dep, ident))
shared.Settings.EXPORTED_FUNCTIONS.append('_' + dep)
if more:
add_back_deps(need) # recurse to get deps of deps
# Scan symbols
symbolses = shared.Building.parallel_llvm_nm([os.path.abspath(t) for t in temp_files])
if len(symbolses) == 0:
class Dummy(object):
defs = set()
undefs = set()
symbolses.append(Dummy())
# depend on exported functions
for export in shared.Settings.EXPORTED_FUNCTIONS:
if shared.Settings.VERBOSE:
logging.debug('adding dependency on export %s' % export)
symbolses[0].undefs.add(export[1:])
for symbols in symbolses:
add_back_deps(symbols)
# If we are only doing forced stdlibs, then we don't know the actual symbols we need,
# and must assume all of deps_info must be exported. Note that this might cause
# warnings on exports that do not exist.
if only_forced:
for key, value in deps_info.items():
for dep in value:
shared.Settings.EXPORTED_FUNCTIONS.append('_' + dep)
if shared.Settings.WASM_OBJECT_FILES:
ext = 'a'
else:
ext = 'bc'
libc_name = 'libc'
libc_deps = ['libcompiler_rt']
if shared.Settings.WASM:
libc_deps += ['libc-wasm']
if shared.Settings.USE_PTHREADS:
libc_name = 'libc-mt'
always_include.add('libpthreads')
if not shared.Settings.WASM_BACKEND:
always_include.add('libpthreads_asmjs')
else:
always_include.add('libpthreads_wasm')
else:
always_include.add('libpthreads_stub')
always_include.add(malloc_name())
if shared.Settings.WASM_BACKEND:
always_include.add('libcompiler_rt')
Library = namedtuple('Library', ['shortname', 'suffix', 'create', 'symbols', 'deps', 'can_noexcept'])
system_libs = [Library('libc++', 'a', create_libcxx, libcxx_symbols, ['libc++abi'], True), # noqa
Library('libc++abi', ext, create_libcxxabi, libcxxabi_symbols, [libc_name], False), # noqa
Library('libal', ext, create_al, al_symbols, [libc_name], False), # noqa
Library('libhtml5', ext, create_html5, html5_symbols, [], False), # noqa
Library('libcompiler_rt','a', create_compiler_rt, compiler_rt_symbols, [libc_name], False), # noqa
Library(malloc_name(), ext, create_malloc, [], [], False)] # noqa
gl_name = 'libgl'
if shared.Settings.USE_PTHREADS:
gl_name += '-mt'
if shared.Settings.LEGACY_GL_EMULATION:
gl_name += '-emu'
if shared.Settings.USE_WEBGL2:
gl_name += '-webgl2'
system_libs += [Library(gl_name, ext, create_gl, gl_symbols, [libc_name], False)] # noqa
if shared.Settings.USE_PTHREADS:
system_libs += [Library('libpthreads', ext, create_pthreads, pthreads_symbols, [libc_name], False)] # noqa
if not shared.Settings.WASM_BACKEND:
system_libs += [Library('libpthreads_asmjs', ext, create_pthreads_asmjs, asmjs_pthreads_symbols, [libc_name], False)] # noqa
else:
system_libs += [Library('libpthreads_wasm', ext, create_pthreads_wasm, [], [libc_name], False)] # noqa
else:
system_libs += [Library('libpthreads_stub', ext, create_pthreads_stub, stub_pthreads_symbols, [libc_name], False)] # noqa
system_libs.append(Library(libc_name, ext, create_libc, libc_symbols, libc_deps, False))
# if building to wasm, we need more math code, since we have less builtins
if shared.Settings.WASM:
system_libs.append(Library('libc-wasm', ext, create_wasm_libc, wasm_libc_symbols, [], False))
# Add libc-extras at the end, as libc may end up requiring them, and they depend on nothing.
system_libs.append(Library('libc-extras', ext, create_libc_extras, libc_extras_symbols, [], False))
libs_to_link = []
already_included = set()
system_libs_map = {l.shortname: l for l in system_libs}
# Setting this in the environment will avoid checking dependencies and make building big projects a little faster
# 1 means include everything; otherwise it can be the name of a lib (libc++, etc.)
# You can provide 1 to include everything, or a comma-separated list with the ones you want
force = os.environ.get('EMCC_FORCE_STDLIBS')
if force == '1':
force = ','.join(system_libs_map.keys())
force_include = set((force.split(',') if force else []) + forced)
if force_include:
logging.debug('forcing stdlibs: ' + str(force_include))
for lib in always_include:
assert lib in system_libs_map
for lib in force_include:
if lib not in system_libs_map:
shared.exit_with_error('invalid forced library: %s', lib)
def maybe_noexcept(name):
if shared.Settings.DISABLE_EXCEPTION_CATCHING:
name += '_noexcept'
return name
def add_library(lib):
if lib.shortname in already_included:
return
already_included.add(lib.shortname)
shortname = lib.shortname
if lib.can_noexcept:
shortname = maybe_noexcept(shortname)
name = shortname + '.' + lib.suffix
logging.debug('including %s' % name)
def do_create():
return lib.create(name)
libfile = shared.Cache.get(name, do_create)
need_whole_archive = lib.shortname in force_include and lib.suffix != 'bc'
libs_to_link.append((libfile, need_whole_archive))
# Recursively add dependencies
for d in lib.deps:
add_library(system_libs_map[d])
# Go over libraries to figure out which we must include
for lib in system_libs:
assert lib.shortname.startswith('lib')
if lib.shortname in already_included:
continue
force_this = lib.shortname in force_include
if not force_this and only_forced:
continue
include_this = force_this or lib.shortname in always_include
if not include_this:
need_syms = set()
has_syms = set()
for symbols in symbolses:
if shared.Settings.VERBOSE:
logging.debug('undefs: ' + str(symbols.undefs))
for library_symbol in lib.symbols:
if library_symbol in symbols.undefs:
need_syms.add(library_symbol)
if library_symbol in symbols.defs:
has_syms.add(library_symbol)
for haz in has_syms:
if haz in need_syms:
# remove symbols that are supplied by another of the inputs
need_syms.remove(haz)
if shared.Settings.VERBOSE:
logging.debug('considering %s: we need %s and have %s' % (lib.shortname, str(need_syms), str(has_syms)))
if not len(need_syms):
continue
# We need to build and link the library in
add_library(lib)
if shared.Settings.WASM_BACKEND:
libs_to_link.append((shared.Cache.get('libcompiler_rt_wasm.a', lambda: create_wasm_compiler_rt('libcompiler_rt_wasm.a')), False))
libs_to_link.append((shared.Cache.get('libc_rt_wasm.a', lambda: create_wasm_libc_rt('libc_rt_wasm.a')), False))
libs_to_link.sort(key=lambda x: x[0].endswith('.a')) # make sure to put .a files at the end.
# libc++abi and libc++ *static* linking is tricky. e.g. cxa_demangle.cpp disables c++
# exceptions, but since the string methods in the headers are *weakly* linked, then
# we might have exception-supporting versions of them from elsewhere, and if libc++abi
# is first then it would "win", breaking exception throwing from those string
# header methods. To avoid that, we link libc++abi last.
libs_to_link.sort(key=lambda x: x[0].endswith('libc++abi.bc'))
# Wrap libraries in --whole-archive, as needed. We need to do this last
# since otherwise the abort sorting won't make sense.
ret = []
for name, need_whole_archive in libs_to_link:
if need_whole_archive:
ret += ['--whole-archive', name, '--no-whole-archive']
else:
ret.append(name)
return ret
class Ports(object):
"""emscripten-ports library management (https://github.com/emscripten-ports).
"""
@staticmethod
def build_port(src_path, output_path, includes=[], flags=[], exclude_files=[], exclude_dirs=[]):
srcs = []
for root, dirs, files in os.walk(src_path, topdown=False):
if any((excluded in root) for excluded in exclude_dirs):
continue
for file in files:
if (file.endswith('.c') or file.endswith('.cpp')) and not any((excluded in file) for excluded in exclude_files):
srcs.append(os.path.join(root, file))
include_commands = ['-I' + src_path]
for include in includes:
include_commands.append('-I' + include)
commands = []
objects = []
for src in srcs:
obj = src + '.o'
commands.append([shared.PYTHON, shared.EMCC, src, '-O2', '-o', obj, '-w'] + include_commands + flags + get_cflags())
objects.append(obj)
run_commands(commands)
create_lib(output_path, objects)
@staticmethod
def run_commands(commands): # make easily available for port objects
run_commands(commands)
@staticmethod
def get_dir():
dirname = os.environ.get('EM_PORTS') or os.path.expanduser(os.path.join('~', '.emscripten_ports'))
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def erase():
dirname = Ports.get_dir()
shared.try_delete(dirname)
if os.path.exists(dirname):
logging.warning('could not delete ports dir %s - try to delete it manually' % dirname)
@staticmethod
def get_build_dir():
return shared.Cache.get_path('ports-builds')
name_cache = set()
@staticmethod
def fetch_project(name, url, subdir, is_tarbz2=False):
fullname = os.path.join(Ports.get_dir(), name)
# if EMCC_LOCAL_PORTS is set, we use a local directory as our ports. This is useful
# for testing. This env var should be in format
# name=dir,name=dir
# e.g.
# sdl2=/home/username/dev/ports/SDL2
# so you could run
# EMCC_LOCAL_PORTS="sdl2=/home/alon/Dev/ports/SDL2" ./tests/runner.py browser.test_sdl2_mouse
# this will simply copy that directory into the ports directory for sdl2, and use that. It also
# clears the build, so that it is rebuilt from that source.
local_ports = os.environ.get('EMCC_LOCAL_PORTS')
if local_ports:
logging.warning('using local ports: %s' % local_ports)
local_ports = [pair.split('=', 1) for pair in local_ports.split(',')]
for local in local_ports:
if name == local[0]:
path = local[1]
if name not in ports.ports_by_name:
logging.error('%s is not a known port' % name)
sys.exit(1)
port = ports.ports_by_name[name]
if not hasattr(port, 'SUBDIR'):
logging.error('port %s lacks .SUBDIR attribute, which we need in order to override it locally, please update it' % name)
sys.exit(1)
subdir = port.SUBDIR
logging.warning('grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ')')
shared.try_delete(fullname)
shutil.copytree(path, os.path.join(fullname, subdir))
Ports.clear_project_build(name)
return
fullpath = fullname + ('.tar.bz2' if is_tarbz2 else '.zip')
if name not in Ports.name_cache: # only mention each port once in log
logging.debug('including port: ' + name)
logging.debug(' (at ' + fullname + ')')
Ports.name_cache.add(name)
class State(object):
retrieved = False
unpacked = False
def retrieve():
# retrieve from remote server
logging.warning('retrieving port: ' + name + ' from ' + url)
try:
from urllib.request import urlopen
except ImportError:
# Python 2 compatibility
from urllib2 import urlopen
f = urlopen(url)
data = f.read()
open(fullpath, 'wb').write(data)
State.retrieved = True
def check_tag():
if is_tarbz2:
names = tarfile.open(fullpath, 'r:bz2').getnames()
else:
names = zipfile.ZipFile(fullpath, 'r').namelist()
# check if first entry of the archive is prefixed with the same
# tag as we need so no longer download and recompile if so
return bool(re.match(subdir + r'(\\|/|$)', names[0]))
def unpack():
logging.warning('unpacking port: ' + name)
shared.safe_ensure_dirs(fullname)
if is_tarbz2:
z = tarfile.open(fullpath, 'r:bz2')
else:
z = zipfile.ZipFile(fullpath, 'r')
try:
cwd = os.getcwd()
os.chdir(fullname)
z.extractall()
finally:
os.chdir(cwd)
State.unpacked = True
# main logic. do this under a cache lock, since we don't want multiple jobs to
# retrieve the same port at once
shared.Cache.acquire_cache_lock()
try:
if not os.path.exists(fullpath):
retrieve()
if not os.path.exists(fullname):
unpack()
if not check_tag():
logging.warning('local copy of port is not correct, retrieving from remote server')
shared.try_delete(fullname)
shared.try_delete(fullpath)
retrieve()
unpack()
if State.unpacked:
# we unpacked a new version, clear the build in the cache
Ports.clear_project_build(name)
finally:
shared.Cache.release_cache_lock()
@staticmethod
def build_project(name, subdir, configure, generated_libs, post_create=None):
def create():
logging.info('building port: ' + name + '...')
port_build_dir = Ports.get_build_dir()
shared.safe_ensure_dirs(port_build_dir)
libs = shared.Building.build_library(name, port_build_dir, None,
generated_libs,
source_dir=os.path.join(Ports.get_dir(), name, subdir),
copy_project=True,
configure=configure,
make=['make', '-j' + str(shared.Building.get_num_cores())])
assert len(libs) == 1
if post_create:
post_create()
return libs[0]
return shared.Cache.get(name, create)
@staticmethod
def clear_project_build(name):
shared.try_delete(os.path.join(Ports.get_build_dir(), name))
shared.try_delete(shared.Cache.get_path(name + '.bc'))
shared.try_delete(shared.Cache.get_path(name + '.a'))
@staticmethod
def build_native(subdir):
shared.Building.ensure_no_emmake('We cannot build the native system library in "%s" when under the influence of emmake/emconfigure. To avoid this, create system dirs beforehand, so they are not auto-built on demand. For example, for binaryen, do "python embuilder.py build binaryen"' % subdir)
old = os.getcwd()
try:
os.chdir(subdir)
cmake_build_type = 'Release'
# Configure
check_call(['cmake', '-DCMAKE_BUILD_TYPE=' + cmake_build_type, '.'])
# Check which CMake generator CMake used so we know which form to pass parameters to make/msbuild/etc. build tool.
generator = re.search('CMAKE_GENERATOR:INTERNAL=(.*)$', open('CMakeCache.txt', 'r').read(), re.MULTILINE).group(1)
# Make variants support '-jX' for number of cores to build, MSBuild does /maxcpucount:X
num_cores = str(shared.Building.get_num_cores())
make_args = []
if 'Makefiles' in generator and 'NMake' not in generator:
make_args = ['--', '-j', num_cores]
elif 'Visual Studio' in generator:
make_args = ['--config', cmake_build_type, '--', '/maxcpucount:' + num_cores]
# Kick off the build.
check_call(['cmake', '--build', '.'] + make_args)
finally:
os.chdir(old)
# get all ports
def get_ports(settings):
ret = []
try:
process_dependencies(settings)
for port in ports.ports:
# ports return their output files, which will be linked, or a txt file
ret += [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
except:
logging.error('a problem occurred when using an emscripten-ports library. try to run `emcc --clear-ports` and then run this command again')
raise
ret.reverse()
return ret
def process_dependencies(settings):
for port in reversed(ports.ports):
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
def process_args(args, settings):
process_dependencies(settings)
for port in ports.ports:
args = port.process_args(Ports, args, settings, shared)
return args
# get a single port
def get_port(name, settings):
port = ports.ports_by_name[name]
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
# ports return their output files, which will be linked, or a txt file
return [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
def show_ports():
print('Available ports:')
for port in ports.ports:
print(' ', port.show())
|
the-stack_106_17925
|
"""Remote vehicle services for Subaru integration."""
import logging
from subarulink.exceptions import SubaruException
from homeassistant.exceptions import HomeAssistantError
from .const import SERVICE_UNLOCK, VEHICLE_NAME, VEHICLE_VIN
_LOGGER = logging.getLogger(__name__)
async def async_call_remote_service(controller, cmd, vehicle_info, arg=None):
"""Execute subarulink remote command."""
car_name = vehicle_info[VEHICLE_NAME]
vin = vehicle_info[VEHICLE_VIN]
_LOGGER.debug("Sending %s command command to %s", cmd, car_name)
success = False
err_msg = ""
try:
if cmd == SERVICE_UNLOCK:
success = await getattr(controller, cmd)(vin, arg)
else:
success = await getattr(controller, cmd)(vin)
except SubaruException as err:
err_msg = err.message
if success:
_LOGGER.debug("%s command successfully completed for %s", cmd, car_name)
return
raise HomeAssistantError(f"Service {cmd} failed for {car_name}: {err_msg}")
|
the-stack_106_17928
|
"""Module for concatenating netCDF files."""
from typing import Union, Optional
import numpy as np
import logging
import netCDF4
from cloudnetpy import utils
def update_nc(old_file: str, new_file: str) -> int:
"""Appends data to existing netCDF file.
Args:
old_file: Filename of a existing netCDF file.
new_file: Filename of a new file whose data will be appended to the end.
Returns:
1 = success, 0 = failed to add new data.
Notes:
Requires 'time' variable with unlimited dimension.
"""
success = 0
try:
nc_new = netCDF4.Dataset(new_file)
except OSError:
return 0
nc_old = netCDF4.Dataset(old_file, 'a')
valid_ind = _find_valid_time_indices(nc_old, nc_new)
if len(valid_ind) > 0:
_update_fields(nc_old, nc_new, valid_ind)
success = 1
nc_new.close()
nc_old.close()
return success
def concatenate_files(filenames: list,
output_file: str,
concat_dimension: Optional[str] = 'time',
variables: Optional[list] = None,
new_attributes: Optional[dict] = None) -> None:
"""Concatenate netCDF files in one dimension.
Args:
filenames: List of files to be concatenated.
output_file: Output file name.
concat_dimension: Dimension name for concatenation. Default is 'time'.
variables: List of variables with the 'concat_dimension' to be concatenated.
Default is None when all variables with 'concat_dimension' will be saved.
new_attributes: Optional new global attributes as {'attribute_name': value}.
Notes:
Arrays without 'concat_dimension', scalars, and global attributes will be taken from
the first file. Groups, possibly present in a NETCDF4 formatted file, are ignored.
"""
concat = Concat(filenames, output_file, concat_dimension)
concat.get_constants()
concat.create_global_attributes(new_attributes)
concat.concat_data(variables)
concat.close()
class Concat:
def __init__(self,
filenames: list,
output_file: str,
concat_dimension: Optional[str] = 'time'):
self.filenames = sorted(filenames)
self.concat_dimension = concat_dimension
self.first_file = netCDF4.Dataset(self.filenames[0])
self.concatenated_file = self._init_output_file(output_file)
self.constants = ()
def create_global_attributes(self, new_attributes: Union[dict, None]) -> None:
"""Copies global attributes from one of the source files."""
_copy_attributes(self.first_file, self.concatenated_file)
if new_attributes is not None:
for key, value in new_attributes.items():
setattr(self.concatenated_file, key, value)
def get_constants(self):
"""Finds constants, i.e. arrays that have no concat_dimension and are not concatenated."""
for key, value in self.first_file.variables.items():
try:
dims = self._get_dim(value[:])
except np.core._exceptions.UFuncTypeError:
logging.warning(f'Problem with reading {key} - skipping it')
continue
if self.concat_dimension not in dims:
self.constants += (key,)
def close(self):
"""Closes open files."""
self.first_file.close()
self.concatenated_file.close()
def concat_data(self, variables: Optional[list] = None):
"""Concatenates data arrays."""
self._write_initial_data(variables)
if len(self.filenames) > 1:
for filename in self.filenames[1:]:
self._append_data(filename)
def _write_initial_data(self, variables: Union[list, None]) -> None:
for key in self.first_file.variables.keys():
if (variables is not None and key not in variables
and key not in self.constants and key != self.concat_dimension):
continue
self.first_file[key].set_auto_scale(False)
array = self.first_file[key][:]
dimensions = self._get_dim(array)
fill_value = getattr(self.first_file[key], '_FillValue', None)
var = self.concatenated_file.createVariable(key, array.dtype, dimensions, zlib=True,
complevel=3, shuffle=False,
fill_value=fill_value)
var.set_auto_scale(False)
var[:] = array
_copy_attributes(self.first_file[key], var)
def _append_data(self, filename: str) -> None:
file = netCDF4.Dataset(filename)
file.set_auto_scale(False)
ind0 = len(self.concatenated_file.variables[self.concat_dimension])
ind1 = ind0 + len(file.variables[self.concat_dimension])
for key in self.concatenated_file.variables.keys():
array = file[key][:]
if array.ndim == 0 or key in self.constants:
continue
if array.ndim == 1:
self.concatenated_file.variables[key][ind0:ind1] = array
else:
self.concatenated_file.variables[key][ind0:ind1, :] = array
file.close()
def _get_dim(self, array: np.ndarray) -> tuple:
"""Returns tuple of dimension names, e.g., ('time', 'range') that match the array size."""
if utils.isscalar(array):
return ()
variable_size = ()
file_dims = self.concatenated_file.dimensions
for length in array.shape:
try:
dim = [key for key in file_dims.keys() if file_dims[key].size == length][0]
except IndexError:
dim = self.concat_dimension
variable_size += (dim,)
return variable_size
def _init_output_file(self, output_file: str) -> netCDF4.Dataset:
data_model = 'NETCDF4' if self.first_file.data_model == 'NETCDF4' else 'NETCDF4_CLASSIC'
nc = netCDF4.Dataset(output_file, 'w', format=data_model)
for dim in self.first_file.dimensions.keys():
dim_len = None if dim == self.concat_dimension else self.first_file.dimensions[dim].size
nc.createDimension(dim, dim_len)
return nc
def _copy_attributes(source: netCDF4.Dataset, target: netCDF4.Dataset) -> None:
for attr in source.ncattrs():
if attr != '_FillValue':
value = getattr(source, attr)
setattr(target, attr, value)
def _find_valid_time_indices(nc_old: netCDF4.Dataset, nc_new:netCDF4.Dataset):
return np.where(nc_new.variables['time'][:] > nc_old.variables['time'][-1])[0]
def _update_fields(nc_old: netCDF4.Dataset, nc_new: netCDF4.Dataset, valid_ind: list):
ind0 = len(nc_old.variables['time'])
idx = [ind0 + x for x in valid_ind]
concat_dimension = nc_old.variables['time'].dimensions[0]
for field in nc_new.variables:
if field not in nc_old.variables:
continue
dimensions = nc_new.variables[field].dimensions
if concat_dimension in dimensions:
concat_ind = dimensions.index(concat_dimension)
if len(dimensions) == 1:
nc_old.variables[field][idx] = nc_new.variables[field][valid_ind]
elif len(dimensions) == 2 and concat_ind == 0:
nc_old.variables[field][idx, :] = nc_new.variables[field][valid_ind, :]
elif len(dimensions) == 2 and concat_ind == 1:
nc_old.variables[field][:, idx] = nc_new.variables[field][:, valid_ind]
|
the-stack_106_17929
|
import codecs
from functools import wraps
import re
import textwrap
from typing import TYPE_CHECKING, Any, Callable, Dict, List
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_re,
is_scalar,
is_string_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
from pandas._typing import ArrayLike, Dtype
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
import pandas.core.common as com
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas.arrays import StringArray
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
_shared_docs: Dict[str, str] = dict()
def cat_core(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
if sep == "":
# no need to interleave sep if it is empty
return np.sum(list_of_columns, axis=0)
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
def cat_safe(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
"column {}".format(dtype)
) from None
return result
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
if is_extension_array_dtype(arr.dtype):
# just StringDtype
arr = extract_array(arr)
return _map_stringarray(f, arr, na_value=na_result, dtype=dtype)
return _map_object(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map_stringarray(
func: Callable[[str], Any], arr: "StringArray", na_value: Any, dtype: Dtype
) -> ArrayLike:
"""
Map a callable over valid elements of a StringArrray.
Parameters
----------
func : Callable[[str], Any]
Apply to each valid element.
arr : StringArray
na_value : Any
The value to use for missing values. By default, this is
the original value (NA).
dtype : Dtype
The result dtype to use. Specifying this aviods an intermediate
object-dtype allocation.
Returns
-------
ArrayLike
An ExtensionArray for integer or string dtypes, otherwise
an ndarray.
"""
from pandas.arrays import IntegerArray, StringArray
mask = isna(arr)
assert isinstance(arr, StringArray)
arr = np.asarray(arr)
if is_integer_dtype(dtype):
na_value_is_na = isna(na_value)
if na_value_is_na:
na_value = 1
result = lib.map_infer_mask(
arr,
func,
mask.view("uint8"),
convert=False,
na_value=na_value,
dtype=np.dtype("int64"),
)
if not na_value_is_na:
mask[:] = False
return IntegerArray(result, mask)
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
# i.e. StringDtype
result = lib.map_infer_mask(
arr, func, mask.view("uint8"), convert=False, na_value=na_value
)
return StringArray(result)
# TODO: BooleanArray
else:
# This is when the result type is object. We reach this when
# -> We know the result type is truly object (e.g. .encode returns bytes
# or .findall returns a list).
# -> We don't know the result type. E.g. `.get` can return anything.
return lib.map_infer_mask(arr, func, mask.view("uint8"))
def _map_object(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
convert = not np.all(mask)
try:
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
# FIXME: this should be totally avoidable
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map_object(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn(
"This pattern has match groups. To actually get the"
" groups, use str.extract.",
UserWarning,
stacklevel=3,
)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : str or compiled regex
String can be a character sequence or regular expression.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start.
case : bool, default None
Determines if replace is case sensitive:
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE. Cannot be set if `pat` is a compiled
regex.
regex : bool, default True
Determines if assumes the passed-in pattern is a regular expression:
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (isinstance(repl, str) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set when pat is a compiled regex"
)
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement pattern with regex=False"
)
if callable(repl):
raise ValueError("Cannot use a callable replacement when regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr, dtype=str)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return _na_map(scalar_rep, arr, dtype=str)
else:
def rep(x, r):
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _result_dtype(arr):
# workaround #27953
# ideally we just pass `dtype=arr.dtype` unconditionally, but this fails
# when the list of values is empty.
if arr.dtype.name == "string":
return "string"
else:
return object
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, ABCIndexClass):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object,
)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
dtype = _result_dtype(arr)
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=dtype,
)
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndexClass):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
dtype = _result_dtype(arr)
result = arr._constructor_expanddim(
match_list, index=index, columns=columns, dtype=dtype
)
return result
def str_get_dummies(arr, sep="|"):
"""
Split each string in the Series by sep and return a DataFrame
of dummy/indicator variables.
Parameters
----------
sep : str, default "|"
String to split on.
Returns
-------
DataFrame
Dummy variables corresponding to values of the Series.
See Also
--------
get_dummies : Convert categorical variable into dummy/indicator
variables.
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
"""
arr = arr.fillna("")
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.to_numpy(), lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
return _na_map(sep.join, arr, dtype=str)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0
Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
means no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side="left"):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side="left"):
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "index"
elif side == "right":
method = "rindex"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side="left", fillchar=" "):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = "fillchar must be a character, not {0}"
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = "width must be of integer type, not {0}"
raise TypeError(msg.format(type(width).__name__))
if side == "left":
f = lambda x: x.rjust(width, fillchar)
elif side == "right":
f = lambda x: x.ljust(width, fillchar)
elif side == "both":
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr, dtype=str)
def str_split(arr, pat=None, n=None):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(start=-1)
0 a
1 x
2 n
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr, dtype=str)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ""
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr, dtype=str)
def str_strip(arr, to_strip=None, side="both"):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index
"""
if side == "both":
f = lambda x: x.strip(to_strip)
elif side == "left":
f = lambda x: x.lstrip(to_strip)
elif side == "right":
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr, dtype=str)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
kwargs["width"] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: "\n".join(tw.wrap(s)), arr, dtype=str)
def str_translate(arr, table):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or
None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series or Index
"""
return _na_map(lambda x: x.translate(table), arr, dtype=str)
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
Series or Index
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def forbid_nonstring_types(forbidden, name=None):
"""
Decorator to forbid specific types for a method of StringMethods.
For calling `.str.{method}` on a Series or Index, it is necessary to first
initialize the :class:`StringMethods` object, and then call the method.
However, different methods allow different input types, and so this can not
be checked during :meth:`StringMethods.__init__`, but must be done on a
per-method basis. This decorator exists to facilitate this process, and
make it explicit which (inferred) types are disallowed by the method.
:meth:`StringMethods.__init__` allows the *union* of types its different
methods allow (after skipping NaNs; see :meth:`StringMethods._validate`),
namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer'].
The default string types ['string', 'empty'] are allowed for all methods.
For the additional types ['bytes', 'mixed', 'mixed-integer'], each method
then needs to forbid the types it is not intended for.
Parameters
----------
forbidden : list-of-str or None
List of forbidden non-string types, may be one or more of
`['bytes', 'mixed', 'mixed-integer']`.
name : str, default None
Name of the method to use in the error message. By default, this is
None, in which case the name from the method being wrapped will be
copied. However, for working with further wrappers (like _pat_wrapper
and _noarg_wrapper), it is necessary to specify the name.
Returns
-------
func : wrapper
The method to which the decorator is applied, with an added check that
enforces the inferred type to not be in the list of forbidden types.
Raises
------
TypeError
If the inferred type of the underlying data is in `forbidden`.
"""
# deal with None
forbidden = [] if forbidden is None else forbidden
allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set(
forbidden
)
def _forbid_nonstring_types(func):
func_name = func.__name__ if name is None else name
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
f"Cannot use .str.{func_name} with values of inferred dtype "
f"{repr(self._inferred_dtype)}."
)
raise TypeError(msg)
return func(self, *args, **kwargs)
wrapper.__name__ = func_name
return wrapper
return _forbid_nonstring_types
def _noarg_wrapper(
f,
name=None,
docstring=None,
forbidden_types=["bytes"],
returns_string=True,
**kargs,
):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper(self):
result = _na_map(f, self._parent, **kargs)
return self._wrap_result(result, returns_string=returns_string)
wrapper.__name__ = f.__name__ if name is None else name
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError("Provide docstring")
return wrapper
def _pat_wrapper(
f,
flags=False,
na=False,
name=None,
forbidden_types=["bytes"],
returns_string=True,
**kwargs,
):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper1(self, pat):
result = f(self._parent, pat)
return self._wrap_result(result, returns_string=returns_string)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._parent, pat, flags=flags, **kwargs)
return self._wrap_result(result, returns_string=returns_string)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper3(self, pat, na=np.nan):
result = f(self._parent, pat, na=na)
return self._wrap_result(result, returns_string=returns_string)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__ if name is None else name
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._inferred_dtype = self._validate(data)
self._is_categorical = is_categorical_dtype(data)
self._is_string = data.dtype.name == "string"
# .values.categories works for both Series/Index
self._parent = data.values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
"""
Auxiliary function for StringMethods, infers and checks dtype of data.
This is a "first line of defence" at the creation of the StringMethods-
object (see _make_accessor), and just checks that the dtype is in the
*union* of the allowed types over all string methods below; this
restriction is then refined on a per-method basis using the decorator
@forbid_nonstring_types (more info in the corresponding docstring).
This really should exclude all series/index with any non-string values,
but that isn't practical for performance reasons until we have a str
dtype (GH 9343 / 13877)
Parameters
----------
data : The content of the Series
Returns
-------
dtype : inferred dtype of data
"""
from pandas import StringDtype
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
values = getattr(data, "values", data) # Series / Index
values = getattr(values, "categories", values) # categorical / normal
# explicitly allow StringDtype
if isinstance(values.dtype, StringDtype):
return "string"
try:
inferred_dtype = lib.infer_dtype(values, skipna=True)
except ValueError:
# GH#27571 mostly occurs with ExtensionArray
inferred_dtype = None
if inferred_dtype not in allowed_types:
raise AttributeError("Can only use .str accessor with string values!")
return inferred_dtype
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(
self,
result,
use_codes=True,
name=None,
expand=None,
fill_value=np.nan,
returns_string=True,
):
from pandas import Index, Series, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
# if self._orig is a CategoricalIndex, there is no .cat-accessor
result = take_1d(
result, Series(self._orig, copy=False).cat.codes, fill_value=fill_value
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
# We can be wrapping a string / object / categorical result, in which
# case we'll want to return the same dtype as the input.
# Or we can be wrapping a numeric output, in which case we don't want
# to return a StringArray.
if self._is_string and returns_string:
dtype = "string"
else:
dtype = None
if expand is None:
# infer from ndim if expand is not specified
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, ABCIndexClass):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, ABCIndexClass):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
result = cons(result, columns=name, index=index, dtype=dtype)
else:
# Must be a Series
cons = self._orig._constructor
result = cons(result, name=name, index=index, dtype=dtype)
return result
def _get_series_list(self, others):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, DataFrame, np.ndarray, list-like or list-like of
Objects that are either Series, Index or np.ndarray (1-dim).
Returns
-------
list of Series
Others transformed into list of Series.
"""
from pandas import Series, DataFrame
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, ABCIndexClass) else self._orig.index
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own.
if isinstance(others, ABCSeries):
return [others]
elif isinstance(others, ABCIndexClass):
return [Series(others.values, index=others)]
elif isinstance(others, ABCDataFrame):
return [others[x] for x in others]
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return [others[x] for x in others]
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either Series/Index/np.ndarray (1-dim)...
if all(
isinstance(x, (ABCSeries, ABCIndexClass))
or (isinstance(x, np.ndarray) and x.ndim == 1)
for x in others
):
los = []
while others: # iterate through list and append each element
los = los + self._get_series_list(others.pop(0))
return los
# ... or just strings
elif all(not is_list_like(x) for x in others):
return [Series(others, index=idx)]
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarrary "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(self, others=None, sep=None, na_rep=None, join="left"):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarray or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). To disable
alignment, use `.values` on any Series/Index/DataFrame in `others`.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed default of `join` from None to `'left'`.
Returns
-------
str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
from pandas import Index, Series, concat
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, ABCIndexClass):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others = self._get_series_list(others)
except ValueError: # do not catch TypeError raised by _get_series_list
raise ValueError(
"If `others` contains arrays or lists (or other "
"list-likes without an index), these must all be "
"of the same length as the calling Series/Index."
)
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
copy=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [
np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)
]
result = cat_safe(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
if isinstance(self._orig, ABCIndexClass):
# add dtype for case that result is all-NA
result = Index(result, dtype=object, name=self._orig.name)
else: # Series
if is_categorical_dtype(self._orig.dtype):
# We need to infer the new categories.
dtype = None
else:
dtype = self._orig.dtype
result = Series(result, dtype=dtype, index=data.index, name=self._orig.name)
return result
_shared_docs[
"str_split"
] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(["this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan])
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3
0 this is a regular
1 https://docs.python.org/3/tutorial/index.html None None None
2 NaN NaN NaN NaN \
4
0 sentence
1 None
2 NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
Remember to escape special characters when explicitly using regular
expressions.
>>> s = pd.Series(["1+1=2"])
>>> s.str.split(r"\+|=", expand=True)
0 1 2
0 1 1 2
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand, returns_string=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand, returns_string=expand)
_shared_docs[
"str_partition"
] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
pat : str, default whitespace
.. deprecated:: 0.24.0
Use ``sep`` instead.
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
dtype='object')
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of `sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
f = lambda x: x.partition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand, returns_string=expand)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of `sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
f = lambda x: x.rpartition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand, returns_string=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._parent, i)
return self._wrap_result(result)
@copy(str_join)
@forbid_nonstring_types(["bytes"])
def join(self, sep):
result = str_join(self._parent, sep)
return self._wrap_result(result)
@copy(str_contains)
@forbid_nonstring_types(["bytes"])
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(
self._parent, pat, case=case, flags=flags, na=na, regex=regex
)
return self._wrap_result(result, fill_value=na, returns_string=False)
@copy(str_match)
@forbid_nonstring_types(["bytes"])
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@copy(str_replace)
@forbid_nonstring_types(["bytes"])
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
result = str_replace(
self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex
)
return self._wrap_result(result)
@copy(str_repeat)
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
result = str_repeat(self._parent, repeats)
return self._wrap_result(result)
@copy(str_pad)
@forbid_nonstring_types(["bytes"])
def pad(self, width, side="left", fillchar=" "):
result = str_pad(self._parent, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs[
"str_pad"
] = """
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
filled : Series/Index of objects.
"""
@Appender(_shared_docs["str_pad"] % dict(side="left and right", method="center"))
@forbid_nonstring_types(["bytes"])
def center(self, width, fillchar=" "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="right", method="ljust"))
@forbid_nonstring_types(["bytes"])
def ljust(self, width, fillchar=" "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="left", method="rjust"))
@forbid_nonstring_types(["bytes"])
def rjust(self, width, fillchar=" "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side="left", fillchar="0")
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._parent, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._parent, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
# need to allow bytes here
result = str_decode(self._parent, encoding, errors)
# TODO: Not sure how to handle this.
return self._wrap_result(result, returns_string=False)
@copy(str_encode)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors="strict"):
result = str_encode(self._parent, encoding, errors)
return self._wrap_result(result, returns_string=False)
_shared_docs[
"str_strip"
] = r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series or Index of object
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"] % dict(side="left and right sides", method="strip")
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="both")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="left side", method="lstrip"))
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="left")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="right side", method="rstrip"))
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="right")
return self._wrap_result(result)
@copy(str_wrap)
@forbid_nonstring_types(["bytes"])
def wrap(self, width, **kwargs):
result = str_wrap(self._parent, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
@forbid_nonstring_types(["bytes"])
def get_dummies(self, sep="|"):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._parent
result, name = str_get_dummies(data, sep)
return self._wrap_result(
result,
use_codes=(not self._is_categorical),
name=name,
expand=True,
returns_string=False,
)
@copy(str_translate)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
result = str_translate(self._parent, table)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True, name="count", returns_string=False)
startswith = _pat_wrapper(
str_startswith, na=True, name="startswith", returns_string=False
)
endswith = _pat_wrapper(
str_endswith, na=True, name="endswith", returns_string=False
)
findall = _pat_wrapper(
str_findall, flags=True, name="findall", returns_string=False
)
@copy(str_extract)
@forbid_nonstring_types(["bytes"])
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs[
"find"
] = """
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int.
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["find"]
% dict(
side="lowest",
method="find",
also="rfind : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["find"]
% dict(
side="highest",
method="rfind",
also="find : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form.
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, x)
result = _na_map(f, self._parent, dtype=str)
return self._wrap_result(result)
_shared_docs[
"index"
] = """
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["index"]
% dict(
side="lowest",
similar="find",
method="index",
also="rindex : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["index"]
% dict(
side="highest",
similar="rfind",
method="rindex",
also="index : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result, returns_string=False)
_shared_docs[
"len"
] = """
Compute the length of each element in the Series/Index. The element may be
a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
len = _noarg_wrapper(
len,
docstring=_shared_docs["len"],
forbidden_types=None,
dtype=int,
returns_string=False,
)
_shared_docs[
"casemethods"
] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series or Index of object
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
# _doc_args holds dict of strings to use in substituting casemethod docs
_doc_args: Dict[str, Dict[str, str]] = {}
_doc_args["lower"] = dict(type="lowercase", method="lower", version="")
_doc_args["upper"] = dict(type="uppercase", method="upper", version="")
_doc_args["title"] = dict(type="titlecase", method="title", version="")
_doc_args["capitalize"] = dict(
type="be capitalized", method="capitalize", version=""
)
_doc_args["swapcase"] = dict(type="be swapcased", method="swapcase", version="")
_doc_args["casefold"] = dict(
type="be casefolded",
method="casefold",
version="\n .. versionadded:: 0.25.0\n",
)
lower = _noarg_wrapper(
lambda x: x.lower(),
name="lower",
docstring=_shared_docs["casemethods"] % _doc_args["lower"],
dtype=str,
)
upper = _noarg_wrapper(
lambda x: x.upper(),
name="upper",
docstring=_shared_docs["casemethods"] % _doc_args["upper"],
dtype=str,
)
title = _noarg_wrapper(
lambda x: x.title(),
name="title",
docstring=_shared_docs["casemethods"] % _doc_args["title"],
dtype=str,
)
capitalize = _noarg_wrapper(
lambda x: x.capitalize(),
name="capitalize",
docstring=_shared_docs["casemethods"] % _doc_args["capitalize"],
dtype=str,
)
swapcase = _noarg_wrapper(
lambda x: x.swapcase(),
name="swapcase",
docstring=_shared_docs["casemethods"] % _doc_args["swapcase"],
dtype=str,
)
casefold = _noarg_wrapper(
lambda x: x.casefold(),
name="casefold",
docstring=_shared_docs["casemethods"] % _doc_args["casefold"],
dtype=str,
)
_shared_docs[
"ismethods"
] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_doc_args["isalnum"] = dict(type="alphanumeric", method="isalnum")
_doc_args["isalpha"] = dict(type="alphabetic", method="isalpha")
_doc_args["isdigit"] = dict(type="digits", method="isdigit")
_doc_args["isspace"] = dict(type="whitespace", method="isspace")
_doc_args["islower"] = dict(type="lowercase", method="islower")
_doc_args["isupper"] = dict(type="uppercase", method="isupper")
_doc_args["istitle"] = dict(type="titlecase", method="istitle")
_doc_args["isnumeric"] = dict(type="numeric", method="isnumeric")
_doc_args["isdecimal"] = dict(type="decimal", method="isdecimal")
# force _noarg_wrapper return type with dtype=bool (GH 29624)
isalnum = _noarg_wrapper(
lambda x: x.isalnum(),
name="isalnum",
docstring=_shared_docs["ismethods"] % _doc_args["isalnum"],
returns_string=False,
dtype=bool,
)
isalpha = _noarg_wrapper(
lambda x: x.isalpha(),
name="isalpha",
docstring=_shared_docs["ismethods"] % _doc_args["isalpha"],
returns_string=False,
dtype=bool,
)
isdigit = _noarg_wrapper(
lambda x: x.isdigit(),
name="isdigit",
docstring=_shared_docs["ismethods"] % _doc_args["isdigit"],
returns_string=False,
dtype=bool,
)
isspace = _noarg_wrapper(
lambda x: x.isspace(),
name="isspace",
docstring=_shared_docs["ismethods"] % _doc_args["isspace"],
returns_string=False,
dtype=bool,
)
islower = _noarg_wrapper(
lambda x: x.islower(),
name="islower",
docstring=_shared_docs["ismethods"] % _doc_args["islower"],
returns_string=False,
dtype=bool,
)
isupper = _noarg_wrapper(
lambda x: x.isupper(),
name="isupper",
docstring=_shared_docs["ismethods"] % _doc_args["isupper"],
returns_string=False,
dtype=bool,
)
istitle = _noarg_wrapper(
lambda x: x.istitle(),
name="istitle",
docstring=_shared_docs["ismethods"] % _doc_args["istitle"],
returns_string=False,
dtype=bool,
)
isnumeric = _noarg_wrapper(
lambda x: x.isnumeric(),
name="isnumeric",
docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"],
returns_string=False,
dtype=bool,
)
isdecimal = _noarg_wrapper(
lambda x: x.isdecimal(),
name="isdecimal",
docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"],
returns_string=False,
dtype=bool,
)
@classmethod
def _make_accessor(cls, data):
cls._validate(data)
return cls(data)
|
the-stack_106_17930
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "purple-surf-29179.botics.co"
site_params = {
"name": "Purple Surf",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
the-stack_106_17932
|
# -*- coding: utf-8 -*-
import re
import requests
from bs4 import BeautifulSoup
import jieba.posseg as pseg
import matplotlib.pyplot as plt
from wordcloud import WordCloud,ImageColorGenerator
from PIL import Image
import numpy as np
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
}
def bv2av(url):
"""
convert a bilibili video url with newest encoding bv to its original av number
:param url: bilibili video url <str>
:return: av_id <int>
"""
r = requests.get(url, headers=HEADERS)
try:
av = re.findall(r"video/av(\d+)/", r.text)[0]
return av
except Exception as e:
print(e)
raise
def parse_words(all_texts):
# res = pseg.cut("我爱北京天安门")
# for word, flag in res:
# print(str(word))
words = []
for entry in all_texts:
parse_result = pseg.cut(entry)
for word, _ in parse_result:
words.append(str(word))
return words
def generate_word_cloud(words):
text = "/".join(words)
print(text)
wordcloud = WordCloud(max_font_size=50, max_words=100, background_color="white", font_path='fonts/MSYH.TTC').generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
def main():
av_id = bv2av('https://www.bilibili.com/video/BV1cC4y1a7xA')
resp = requests.get('https://www.bilibili.com/video/av' + av_id, headers=HEADERS)
match_rule = r'cid=(.*?)&aid'
oid = re.search(match_rule, resp.text).group().replace('cid=', '').replace('&aid', '')
print('oid=' + oid)
# xml_url = 'https://api.bilibili.com/x/v1/dm/list.so?oid=' + oid
xml_url = 'https://comment.bilibili.com/{}.xml'.format(oid)
resp = requests.get(xml_url, headers=HEADERS)
content = resp.content
content_text = content.decode('utf-8')
xml_soup = BeautifulSoup(content_text, 'xml')
all_ds = xml_soup.find_all('d')
all_live_comments = [item.text for item in all_ds]
parsed_words = parse_words(all_live_comments)
generate_word_cloud(parsed_words)
if __name__ == "__main__":
main()
|
the-stack_106_17934
|
import pyclassifiers.values
import config.general
import config.helpers
project_github_username = "veltzer"
project_name = "pyapt"
github_repo_name = project_name
project_website = f"https://{project_github_username}.github.io/{project_name}"
project_website_source = f"https://github.com/{project_github_username}/{project_name}"
project_website_git = f"git://github.com/{project_github_username}/{project_name}.git"
project_website_download_ppa = "https://launchpanet/~mark-veltzer/+archive/ubuntu/ppa"
project_website_download_src = project_website_source
# noinspection SpellCheckingInspection
project_paypal_donate_button_id = "ASPRXR59H2NTQ"
project_google_analytics_tracking_id = "UA-56436979-1"
project_short_description = "module to help you maintain third party apt repos in a sane way"
project_long_description = project_short_description
# keywords to put on html pages or for search, dont put the name of the project or my details
# as they will be added automatically...
project_keywords = [
"pyapt",
"apt",
"apt-key",
]
project_license = "MIT"
project_year_started = "2017"
project_description = project_short_description
project_platforms = [
"python3",
]
project_classifiers = [
pyclassifiers.values.DevelopmentStatus__4_Beta,
pyclassifiers.values.Environment__Console,
pyclassifiers.values.OperatingSystem__OSIndependent,
pyclassifiers.values.ProgrammingLanguage__Python,
pyclassifiers.values.ProgrammingLanguage__Python__3,
pyclassifiers.values.ProgrammingLanguage__Python__3__Only,
pyclassifiers.values.ProgrammingLanguage__Python__36,
pyclassifiers.values.ProgrammingLanguage__Python__37,
pyclassifiers.values.ProgrammingLanguage__Python__38,
pyclassifiers.values.Topic__Utilities,
pyclassifiers.values.License__OSIApproved__MITLicense,
]
project_data_files = []
codacy_id = None
project_google_analytics_tracking_id = None
project_paypal_donate_button_id = None
project_copyright_years = config.helpers.get_copyright_years(project_year_started)
project_google_analytics_snipplet = config.helpers.get_google_analytics(project_google_analytics_tracking_id)
project_paypal_donate_button_snipplet = config.helpers.get_paypal(project_paypal_donate_button_id)
|
the-stack_106_17935
|
import heapq
import itertools
from abc import ABC, abstractmethod
from collections import defaultdict
from operator import itemgetter
from typing import List, Dict, Tuple
from typing import Sequence
import numpy as np
import torch
from bert_score import BERTScorer
from nltk import PorterStemmer
from spacy.tokens import Doc, Span
from toolz import itertoolz
from transformers import AutoTokenizer
from transformers.tokenization_utils_base import PaddingStrategy
class EmbeddingModel(ABC):
@abstractmethod
def embed(
self,
sents: List[Span]
):
pass
class ContextualEmbedding(EmbeddingModel):
def __init__(self, model, tokenizer_name, max_length):
self.model = model
self.tokenizer = SpacyHuggingfaceTokenizer(tokenizer_name, max_length)
self._device = model.device
def embed(
self,
sents: List[Span]
):
encoded_input, special_tokens_masks, token_alignments = self.tokenizer.batch_encode(sents)
encoded_input = {k: v.to(self._device) for k, v in encoded_input.items()}
with torch.no_grad():
model_output = self.model(**encoded_input)
embeddings = model_output[0].cpu()
spacy_embs_list = []
for embs, mask, token_alignment \
in zip(embeddings, special_tokens_masks, token_alignments):
mask = torch.tensor(mask)
embs = embs[mask == 0] # Filter embeddings at special token positions
spacy_embs = []
for hf_idxs in token_alignment:
if hf_idxs is None:
pooled_embs = torch.zeros_like(embs[0])
else:
pooled_embs = embs[hf_idxs].mean(dim=0) # Pool embeddings that map to the same spacy token
spacy_embs.append(pooled_embs.numpy())
spacy_embs = np.stack(spacy_embs)
spacy_embs = spacy_embs / np.linalg.norm(spacy_embs, axis=-1, keepdims=True) # Normalize
spacy_embs_list.append(spacy_embs)
for embs, sent in zip(spacy_embs_list, sents):
assert len(embs) == len(sent)
return spacy_embs_list
class StaticEmbedding(EmbeddingModel):
def embed(
self,
sents: List[Span]
):
return [
np.stack([t.vector / (t.vector_norm or 1) for t in sent])
for sent in sents
]
class EmbeddingAligner():
def __init__(
self,
embedding: EmbeddingModel,
threshold: float,
top_k: int,
baseline_val=0
):
self.threshold = threshold
self.top_k = top_k
self.embedding = embedding
self.baseline_val = baseline_val
def align(
self,
source: Doc,
targets: Sequence[Doc]
) -> List[Dict]:
"""Compute alignment from summary tokens to doc tokens with greatest semantic similarity
Args:
source: Source spaCy document
targets: Target spaCy documents
Returns: List of alignments, one for each target document
"""
if len(source) == 0:
return [{} for _ in targets]
all_sents = list(source.sents) + list(itertools.chain.from_iterable(target.sents for target in targets))
chunk_sizes = [_iter_len(source.sents)] + \
[_iter_len(target.sents) for target in targets]
all_sents_token_embeddings = self.embedding.embed(all_sents)
chunked_sents_token_embeddings = _split(all_sents_token_embeddings, chunk_sizes)
source_sent_token_embeddings = chunked_sents_token_embeddings[0]
source_token_embeddings = np.concatenate(source_sent_token_embeddings)
for token_idx, token in enumerate(source):
if token.is_stop or token.is_punct:
source_token_embeddings[token_idx] = 0
alignments = []
for i, target in enumerate(targets):
target_sent_token_embeddings = chunked_sents_token_embeddings[i + 1]
target_token_embeddings = np.concatenate(target_sent_token_embeddings)
for token_idx, token in enumerate(target):
if token.is_stop or token.is_punct:
target_token_embeddings[token_idx] = 0
alignment = defaultdict(list)
for score, target_idx, source_idx in self._emb_sim_sparse(
target_token_embeddings,
source_token_embeddings,
):
alignment[target_idx].append((source_idx, score))
# TODO used argpartition to get nlargest
for j in list(alignment):
alignment[j] = heapq.nlargest(self.top_k, alignment[j], itemgetter(1))
alignments.append(alignment)
return alignments
def _emb_sim_sparse(self, embs_1, embs_2):
sim = embs_1 @ embs_2.T
sim = (sim - self.baseline_val) / (1 - self.baseline_val)
keep = sim > self.threshold
keep_idxs_1, keep_idxs_2 = np.where(keep)
keep_scores = sim[keep]
return list(zip(keep_scores, keep_idxs_1, keep_idxs_2))
class BertscoreAligner(EmbeddingAligner):
def __init__(
self,
threshold,
top_k
):
scorer = BERTScorer(lang="en", rescale_with_baseline=True)
model = scorer._model
embedding = ContextualEmbedding(model, "roberta-large", 510)
baseline_val = scorer.baseline_vals[2].item()
super(BertscoreAligner, self).__init__(
embedding, threshold, top_k, baseline_val
)
class StaticEmbeddingAligner(EmbeddingAligner):
def __init__(
self,
threshold,
top_k
):
embedding = StaticEmbedding()
super(StaticEmbeddingAligner, self).__init__(
embedding, threshold, top_k
)
class NGramAligner():
def __init__(self):
self.stemmer = PorterStemmer()
def align(
self,
source: Doc,
targets: List[Doc],
) -> List[Dict]:
alignments = []
source_ngram_spans = self._get_ngram_spans(source)
for target in targets:
target_ngram_spans = self._get_ngram_spans(target)
alignments.append(
self._align_ngrams(target_ngram_spans, source_ngram_spans)
)
return alignments
def _get_ngram_spans(
self,
doc: Doc,
):
ngrams = []
for sent in doc.sents:
for n in range(1, len(list(sent))):
tokens = [t for t in sent if not (t.is_stop or t.is_punct)]
ngrams.extend(_ngrams(tokens, n))
def ngram_key(ngram):
return tuple(self.stemmer.stem(token.text).lower() for token in ngram)
key_to_ngrams = itertoolz.groupby(ngram_key, ngrams)
key_to_spans = {}
for k, grouped_ngrams in key_to_ngrams.items():
key_to_spans[k] = [
(ngram[0].i, ngram[-1].i + 1)
for ngram in grouped_ngrams
]
return key_to_spans
def _align_ngrams(
self,
ngram_spans_1: Dict[Tuple[str], List[Tuple[int, int]]],
ngram_spans_2: Dict[Tuple[str], List[Tuple[int, int]]]
) -> Dict[Tuple[int, int], List[Tuple[int, int]]]:
"""Align ngram spans between two documents
Args:
ngram_spans_1: Map from (normalized_token1, normalized_token2, ...) n-gram tuple to a list of token spans
of format (start_pos, end_pos)
ngram_spans_2: Same format as above, but for second text
Returns: map from each (start, end) span in text 1 to list of aligned (start, end) spans in text 2
"""
if not ngram_spans_1 or not ngram_spans_2:
return {}
max_span_end_1 = max(span[1] for span in itertools.chain.from_iterable(ngram_spans_1.values()))
max_span_end_2 = max(span[1] for span in itertools.chain.from_iterable(ngram_spans_2.values()))
token_is_available_1 = [True] * max_span_end_1 #
token_is_available_2 = [True] * max_span_end_2
matched_keys = list(set(ngram_spans_1.keys()) & set(ngram_spans_2.keys())) # Matched normalized ngrams betwee
matched_keys.sort(key=len, reverse=True) # Process n-grams from longest to shortest
alignment = defaultdict(list) # Map from each matched span in text 1 to list of aligned spans in text 2
for key in matched_keys:
spans_1 = ngram_spans_1[key]
spans_2 = ngram_spans_2[key]
available_spans_1 = [span for span in spans_1 if all(token_is_available_1[slice(*span)])]
available_spans_2 = [span for span in spans_2 if all(token_is_available_2[slice(*span)])]
matched_spans_1 = []
matched_spans_2 = []
if available_spans_1 and available_spans_2:
# if ngram can be matched to available spans in both sequences
for span in available_spans_1:
# It's possible that these newly matched spans may be overlapping with one another, so
# check that token positions still available (only one span allowed ber token in text 1):
if all(token_is_available_1[slice(*span)]):
matched_spans_1.append(span)
token_is_available_1[slice(*span)] = [False] * (span[1] - span[0])
for span1 in matched_spans_1:
alignment[span1] = matched_spans_2
return alignment
class SpacyHuggingfaceTokenizer:
def __init__(
self,
model_name,
max_length
):
self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
self.max_length = max_length
def batch_encode(
self,
sents: List[Span]
):
token_alignments = []
token_ids_list = []
# Tokenize each sentence and special tokens.
for sent in sents:
hf_tokens, token_alignment = self.tokenize(sent)
token_alignments.append(token_alignment)
token_ids = self.tokenizer.convert_tokens_to_ids(hf_tokens)
encoding = self.tokenizer.prepare_for_model(
token_ids,
add_special_tokens=True,
padding=False,
)
token_ids_list.append(encoding['input_ids'])
# Add padding
max_length = max(map(len, token_ids_list))
attention_mask = []
input_ids = []
special_tokens_masks = []
for token_ids in token_ids_list:
encoding = self.tokenizer.prepare_for_model(
token_ids,
padding=PaddingStrategy.MAX_LENGTH,
max_length=max_length,
add_special_tokens=False
)
input_ids.append(encoding['input_ids'])
attention_mask.append(encoding['attention_mask'])
special_tokens_masks.append(
self.tokenizer.get_special_tokens_mask(
encoding['input_ids'],
already_has_special_tokens=True
)
)
encoded = {
'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(attention_mask)
}
return encoded, special_tokens_masks, token_alignments
def tokenize(
self,
sent
):
"""Convert spacy sentence to huggingface tokens and compute the alignment"""
hf_tokens = []
token_alignment = []
for i, token in enumerate(sent):
# "Tokenize" each word individually, so as to track the alignment between spaCy/HF tokens
# Prefix all tokens with a space except the first one in the sentence
if i == 0:
token_text = token.text
else:
token_text = ' ' + token.text
start_hf_idx = len(hf_tokens)
word_tokens = self.tokenizer.tokenize(token_text)
end_hf_idx = len(hf_tokens) + len(word_tokens)
if end_hf_idx < self.max_length:
hf_tokens.extend(word_tokens)
hf_idxs = list(range(start_hf_idx, end_hf_idx))
else:
hf_idxs = None
token_alignment.append(hf_idxs)
return hf_tokens, token_alignment
def _split(data, sizes):
it = iter(data)
return [[next(it) for _ in range(size)] for size in sizes]
def _iter_len(it):
return sum(1 for _ in it)
# TODO set up batching
# To get top K axis and value per row: https://stackoverflow.com/questions/42832711/using-np-argpartition-to-index-values-in-a-multidimensional-array
def _ngrams(tokens, n):
for i in range(len(tokens) - n + 1):
yield tokens[i:i + n]
|
the-stack_106_17936
|
def extractHomescribbleMybluemixNet(item):
'''
Parser for 'homescribble.mybluemix.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Great Tang Idyll Translations', 'Great Tang Idyll', 'translated'),
('Great Tang Idyll Chapters', 'Great Tang Idyll', 'translated'),
('GTI', 'Great Tang Idyll', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
the-stack_106_17937
|
#!/usr/bin/env python
#
# Electrum - lightweight Futurocoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from . import bitcoin
from .bitcoin import *
import struct
import traceback
import sys
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class UnknownTxinType(Exception):
pass
class NotRecognizedRedeemScript(Exception):
pass
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Futurocoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
class EnumException(Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if isinstance(x, tuple):
x, i = x
if not isinstance(x, str):
raise EnumException("enum name is not a string: " + x)
if not isinstance(i, int):
raise EnumException("enum value is not an integer: " + i)
if x in uniqueNames:
raise EnumException("enum name is not unique: " + x)
if i in uniqueValues:
raise EnumException("enum value is not unique for " + x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_NOP1", 0xB0),
("OP_CHECKLOCKTIMEVERIFY", 0xB1), ("OP_CHECKSEQUENCEVERIFY", 0xB2),
"OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10",
("OP_INVALIDOPCODE", 0xFF),
])
def script_GetOp(_bytes):
i = 0
while i < len(_bytes):
vch = None
opcode = _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = _bytes[i]
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', _bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', _bytes, i)
i += 4
vch = _bytes[i:i + nSize]
i += nSize
yield opcode, vch, i
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = [ x for x in script_GetOp(_bytes) ]
except Exception as e:
# coinbase transactions raise an exception
print_error("parse_scriptSig: cannot find address in input script (coinbase?)",
bh2u(_bytes))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == 0:
# segwit embedded into p2sh
# witness version 0
d['address'] = bitcoin.hash160_to_p2sh(bitcoin.hash_160(item))
if len(item) == 22:
d['type'] = 'p2wpkh-p2sh'
elif len(item) == 34:
d['type'] = 'p2wsh-p2sh'
else:
print_error("unrecognized txin type", bh2u(item))
elif opcodes.OP_1 <= item[0] <= opcodes.OP_16:
# segwit embedded into p2sh
# witness version 1-16
pass
else:
# assert item[0] == 0x30
# pay-to-pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# p2pkh TxIn transactions push a signature
# (71-73 bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("parse_scriptSig: cannot find address in input script (p2pkh?)",
bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if match_decoded(decoded, match):
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
try:
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
except NotRecognizedRedeemScript:
print_error("parse_scriptSig: cannot find address in input script (p2sh?)",
bh2u(_bytes))
# we could still guess:
# d['address'] = hash160_to_p2sh(hash_160(decoded[-1][1]))
return
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash160_to_p2sh(hash_160(bfh(redeemScript)))
return
print_error("parse_scriptSig: cannot find address in input script (unknown)",
bh2u(_bytes))
def parse_redeemScript(s):
dec2 = [ x for x in script_GetOp(s) ]
try:
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
except IndexError:
raise NotRecognizedRedeemScript()
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
raise NotRecognizedRedeemScript()
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = multisig_script(pubkeys, m)
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes, *, net=None):
decoded = [x for x in script_GetOp(_bytes)]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_PUBKEY, bh2u(decoded[0][1])
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1], net=net)
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1], net=net)
# segwit address
possible_witness_versions = [opcodes.OP_0] + list(range(opcodes.OP_1, opcodes.OP_16 + 1))
for witver, opcode in enumerate(possible_witness_versions):
match = [ opcode, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=witver, net=net)
return TYPE_SCRIPT, bh2u(_bytes)
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['num_sig'] = 0
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['type'] = 'unknown'
if scriptSig:
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except BaseException:
traceback.print_exc(file=sys.stderr)
print_error('failed to parse scriptSig', bh2u(scriptSig))
else:
d['scriptSig'] = ''
return d
def parse_witness(vds, txin):
n = vds.read_compact_size()
if n == 0:
return
if n == 0xffffffff:
txin['value'] = vds.read_uint64()
n = vds.read_compact_size()
# now 'n' is the number of items in the witness
w = list(bh2u(vds.read_bytes(vds.read_compact_size())) for i in range(n))
add_w = lambda x: var_int(len(x) // 2) + x
txin['witness'] = var_int(n) + ''.join(add_w(i) for i in w)
# FIXME: witness version > 0 will probably fail here.
# For native segwit, we would need the scriptPubKey of the parent txn
# to determine witness program version, and properly parse the witness.
# In case of p2sh-segwit, we can tell based on the scriptSig in this txn.
# The code below assumes witness version 0.
# p2sh-segwit should work in that case; for native segwit we need to tell
# between p2wpkh and p2wsh; we do this based on number of witness items,
# hence (FIXME) p2wsh with n==2 (maybe n==1 ?) will probably fail.
# If v==0 and n==2, we need parent scriptPubKey to distinguish between p2wpkh and p2wsh.
try:
if txin['type'] == 'coinbase':
pass
elif txin['type'] == 'p2wsh-p2sh' or n > 2:
try:
m, n, x_pubkeys, pubkeys, witnessScript = parse_redeemScript(bfh(w[-1]))
except NotRecognizedRedeemScript:
raise UnknownTxinType()
txin['signatures'] = parse_sig(w[1:-1])
txin['num_sig'] = m
txin['x_pubkeys'] = x_pubkeys
txin['pubkeys'] = pubkeys
txin['witnessScript'] = witnessScript
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wsh'
txin['address'] = bitcoin.script_to_p2wsh(txin['witnessScript'])
elif txin['type'] == 'p2wpkh-p2sh' or n == 2:
txin['num_sig'] = 1
txin['x_pubkeys'] = [w[1]]
txin['pubkeys'] = [safe_parse_pubkey(w[1])]
txin['signatures'] = parse_sig([w[0]])
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wpkh'
txin['address'] = bitcoin.public_key_to_p2wpkh(bfh(txin['pubkeys'][0]))
else:
raise UnknownTxinType()
except UnknownTxinType:
txin['type'] = 'unknown'
# FIXME: GUI might show 'unknown' address (e.g. for a non-multisig p2wsh)
except BaseException:
txin['type'] = 'unknown'
traceback.print_exc(file=sys.stderr)
print_error('failed to parse witness', txin.get('witness'))
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
if marker != b'\x01':
raise ValueError('invalid txn marker byte: {}'.format(marker))
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
if is_segwit:
for i in range(n_vin):
txin = d['inputs'][i]
parse_witness(vds, txin)
d['lockTime'] = vds.read_uint32()
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise Exception("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
if txin['type'] == 'coinbase':
return [], []
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(bfh(sig[:-2]), order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, pre_hash, curve = SECP256k1)
pubkey = bh2u(point_to_ser(public_key.pubkey.point, compressed))
if pubkey in pubkeys:
public_key.verify_digest(sig_string, pre_hash, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self._inputs[i]['signatures'][j] = sig
#self._inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
return self
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return addr
elif output_type == TYPE_ADDRESS:
return bitcoin.address_to_script(addr)
elif output_type == TYPE_PUBKEY:
return bitcoin.public_key_to_p2pk_script(addr)
else:
raise TypeError('Unknown output type')
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
if txin['type'] == 'coinbase':
return [], []
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin, estimate_size=False):
if not self.is_segwit_input(txin):
return '00'
if txin['type'] == 'coinbase':
return txin['witness']
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
add_w = lambda x: var_int(len(x) // 2) + x
if txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
witness = var_int(2) + add_w(sig_list[0]) + add_w(pubkeys[0])
elif txin['type'] in ['p2wsh', 'p2wsh-p2sh']:
n = len(sig_list) + 2
witness_script = multisig_script(pubkeys, txin['num_sig'])
witness = var_int(n) + '00' + ''.join(add_w(x) for x in sig_list) + add_w(witness_script)
else:
witness = txin.get('witness', None)
if not witness:
raise Exception('wrong txin type:', txin['type'])
if self.is_txin_complete(txin) or estimate_size:
value_field = ''
else:
value_field = var_int(0xffffffff) + int_to_hex(txin['value'], 8)
return value_field + witness
@classmethod
def is_segwit_input(cls, txin):
has_nonzero_witness = txin.get('witness', '00') != '00'
return cls.is_segwit_inputtype(txin['type']) or has_nonzero_witness
@classmethod
def is_segwit_inputtype(cls, txin_type):
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type in ['p2wpkh', 'p2wsh']:
return ''
elif _type == 'p2wpkh-p2sh':
pubkey = safe_parse_pubkey(pubkeys[0])
scriptSig = bitcoin.p2wpkh_nested_script(pubkey)
return push_script(scriptSig)
elif _type == 'p2wsh-p2sh':
witness_script = self.get_preimage_script(txin)
scriptSig = bitcoin.p2wsh_nested_script(witness_script)
return push_script(scriptSig)
elif _type == 'address':
script += push_script(pubkeys[0])
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
if txin['type'] == 'p2pkh':
return bitcoin.address_to_script(txin['address'])
elif txin['type'] in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
pubkey = pubkeys[0]
pkh = bh2u(bitcoin.hash_160(bfh(pubkey)))
return '76a9' + push_script(pkh) + '88ac'
elif txin['type'] == 'p2pk':
pubkey = pubkeys[0]
return bitcoin.public_key_to_p2pk_script(pubkey)
else:
raise TypeError('Unknown txin type', txin['type'])
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def get_outpoint_from_txin(cls, txin):
if txin['type'] == 'coinbase':
return None
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
return prevout_hash + ':%d' % prevout_n
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 1)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(output_type, addr)
s += var_int(len(script)//2)
s += script
return s
def serialize_preimage(self, i):
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
# TODO: py3 hex
if self.is_segwit_input(txin):
hashPrevouts = bh2u(Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(Hash(bfh(''.join(self.serialize_output(o) for o in outputs))))
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if i==k else '') for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self):
return any(self.is_segwit_input(x) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
if witness and self.is_segwit():
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x, estimate_size) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def hash(self):
print("warning: deprecated tx.hash()")
return self.txid()
def txid(self):
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize(witness=False)
return bh2u(Hash(bfh(ser))[::-1])
def wtxid(self):
ser = self.serialize(witness=True)
return bh2u(Hash(bfh(ser))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1 for x in self.inputs()])
@profiler
def estimated_size(self):
"""Return an estimated virtual tx size in vbytes.
BIP-0141 defines 'Virtual transaction size' to be weight/4 rounded up.
This definition is only for humans, and has little meaning otherwise.
If we wanted sub-byte precision, fee calculation should use transaction
weights, but for simplicity we approximate that with (virtual_size)x4
"""
weight = self.estimated_weight()
return self.virtual_size_from_weight(weight)
@classmethod
def estimated_input_weight(cls, txin, is_segwit_tx):
'''Return an estimate of serialized input weight in weight units.'''
script = cls.input_script(txin, True)
input_size = len(cls.serialize_input(txin, script)) // 2
if cls.is_segwit_input(txin):
assert is_segwit_tx
witness_size = len(cls.serialize_witness(txin, True)) // 2
else:
witness_size = 1 if is_segwit_tx else 0
return 4 * input_size + witness_size
@classmethod
def estimated_output_size(cls, address):
"""Return an estimate of serialized output size in bytes."""
script = bitcoin.address_to_script(address)
# 8 byte value + 1 byte script len + script
return 9 + len(script) // 2
@classmethod
def virtual_size_from_weight(cls, weight):
return weight // 4 + (weight % 4 > 0)
def estimated_total_size(self):
"""Return an estimated total transaction size in bytes."""
return len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None else len(self.raw) // 2 # ASCII hex string
def estimated_witness_size(self):
"""Return an estimate of witness size in bytes."""
if not self.is_segwit():
return 0
inputs = self.inputs()
estimate = not self.is_complete()
witness = ''.join(self.serialize_witness(x, estimate) for x in inputs)
witness_size = len(witness) // 2 + 2 # include marker and flag
return witness_size
def estimated_base_size(self):
"""Return an estimated base transaction size in bytes."""
return self.estimated_total_size() - self.estimated_witness_size()
def estimated_weight(self):
"""Return an estimate of transaction weight."""
total_tx_size = self.estimated_total_size()
base_tx_size = self.estimated_base_size()
return 3 * base_tx_size + total_tx_size
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, x_pubkey in enumerate(x_pubkeys):
signatures = list(filter(None, txin['signatures']))
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
sec, compressed = keypairs.get(x_pubkey)
pubkey = public_key_from_private_key(sec, compressed)
# add signature
pre_hash = Hash(bfh(self.serialize_preimage(i)))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = bitcoin.MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
if not public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der):
raise Exception('Sanity check verifying our own signature failed.')
txin['signatures'][j] = bh2u(sig) + '01'
#txin['x_pubkeys'][j] = pubkey
txin['pubkeys'][j] = pubkey # needed for fd keys
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs():
if type == TYPE_ADDRESS:
addr = x
elif type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(x))
else:
addr = 'SCRIPT ' + x
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
|
the-stack_106_17938
|
import threading
import datetime
import PySimpleGUIQt as sg
from PySimpleGUIQt.PySimpleGUIQt import (
BUTTON_TYPE_BROWSE_FILE,
BUTTON_TYPE_BROWSE_FILES,
BUTTON_TYPE_SAVEAS_FILE,
BUTTON_TYPE_BROWSE_FOLDER,
POPUP_BUTTONS_NO_BUTTONS,
WIN_CLOSED,
)
from toolbox_creator.globe_icon import globe_icon
from toolbox_creator.date_picker import popup_get_date
from toolbox_creator.function_validation import (
validate_inputs,
update_inputs,
)
from toolbox_creator.utils import (
get_default_date,
get_first_key,
parse_date,
get_today_date,
default_texts,
)
def create_layout(name, tools, create_console=False, scalar=1.0):
if name not in tools:
raise Exception("Tool not found")
layout = []
listeners = []
radio_group_id = 1
for parameter in tools[name]["parameters"]:
parameter_name = list(parameter.keys())[0]
parameter_type = parameter[parameter_name]["type"]
if "default" in parameter[parameter_name]:
default = parameter[parameter_name]["default"]
else:
default = False
if "tooltip" in parameter[parameter_name]:
tooltip = parameter[parameter_name]["tooltip"]
else:
tooltip = None
if "default_extension" in parameter[parameter_name]:
default_extension = parameter[parameter_name]["default_extension"]
else:
default_extension = [("*", "All Files")]
if "default_date" in parameter[parameter_name]:
default_date = parse_date(parameter[parameter_name]["default_date"])
else:
default_date = get_today_date()
if "display_name" in parameter[parameter_name]:
display_name = parameter[parameter_name]["display_name"]
else:
display_name = parameter_name
if "enabled_by" in parameter[parameter_name]:
enabled_by = True
enabled_by_key = get_first_key(parameter[parameter_name]["enabled_by"])
enabled_by_val = parameter[parameter_name]["enabled_by"][enabled_by_key]
listeners.append(
{
"parameter": parameter_name,
"enabled_by": enabled_by_key,
"values": enabled_by_val,
}
)
else:
enabled_by = False
default_date_str = datetime.datetime(
default_date[2], default_date[0], default_date[1]
).strftime("%Y%m%d")
text_height = scalar * 1.2
input_pad = ((0, round(10 * scalar)), (0, 0))
button_size = (round(16 * scalar), text_height)
input_size = (round(54 * scalar), text_height)
text_size = (round(24 * scalar), text_height)
param_input = None
path_input = None
justification = "center"
if parameter_type == "file_open":
param_input = sg.Button(
"Browse",
button_type=BUTTON_TYPE_BROWSE_FILE,
key=parameter_name + "_picker",
border_width=0,
enable_events=True,
size=button_size,
tooltip=tooltip,
target=parameter_name,
)
path_input = sg.In(
default_text=default_texts[0],
key=parameter_name,
justification=justification,
enable_events=True,
disabled=False,
tooltip=tooltip,
size=(input_size[0] - button_size[0] - 1, input_size[1]),
pad=input_pad,
)
elif parameter_type == "file_open_multiple":
param_input = sg.Button(
"Browse",
button_type=BUTTON_TYPE_BROWSE_FILES,
key=parameter_name + "_picker",
enable_events=True,
border_width=0,
size=button_size,
tooltip=tooltip,
target=parameter_name,
)
path_input = sg.In(
default_text=default_texts[1],
key=parameter_name,
enable_events=True,
disabled=False,
tooltip=tooltip,
justification=justification,
size=(input_size[0] - button_size[0] - 1, input_size[1]),
pad=input_pad,
)
elif parameter_type == "file_save":
param_input = sg.Button(
"Save As",
button_type=BUTTON_TYPE_SAVEAS_FILE,
border_width=0,
key=parameter_name + "_picker",
enable_events=True,
size=button_size,
tooltip=tooltip,
target=parameter_name,
file_types=default_extension,
)
path_input = sg.In(
default_text=default_texts[2],
key=parameter_name,
enable_events=True,
disabled=False,
tooltip=tooltip,
justification=justification,
size=(input_size[0] - button_size[0] - 1, input_size[1]),
pad=input_pad,
)
elif parameter_type == "folder_save" or parameter_type == "folder_open":
param_input = sg.Button(
"Browse",
button_type=BUTTON_TYPE_BROWSE_FOLDER,
border_width=0,
key=parameter_name + "_picker",
enable_events=True,
size=button_size,
tooltip=tooltip,
target=parameter_name,
)
path_input = sg.In(
default_text=default_texts[3],
key=parameter_name,
enable_events=True,
disabled=False,
tooltip=tooltip,
justification=justification,
size=(input_size[0] - button_size[0] - 1, input_size[1]),
pad=input_pad,
)
elif (
parameter_type == "number"
or parameter_type == "string"
or parameter_type == "password"
):
param_input = sg.InputText(
key=parameter_name,
enable_events=True,
password_char="*" if parameter_type == "password" else "",
default_text=default,
tooltip=tooltip,
background_color="#f1f1f1",
size=input_size,
pad=input_pad,
)
elif parameter_type == "boolean":
param_input = sg.Checkbox(
"",
key=parameter_name,
enable_events=True,
default=default,
tooltip=tooltip,
pad=((0, 0), (round(8 * scalar), 0)),
)
elif parameter_type == "slider":
param_args = parameter[parameter_name].keys()
min_value = (
parameter[parameter_name]["min_value"]
if "min_value" in param_args
else 0
)
max_value = (
parameter[parameter_name]["max_value"]
if "max_value" in param_args
else 100
)
default_value = (
parameter[parameter_name]["default"] if "default" in param_args else 50
)
step = parameter[parameter_name]["step"] if "step" in param_args else 1
if default < min_value or default > max_value:
default = min_value
param_input = sg.Slider(
range=(min_value, max_value),
orientation="h",
default_value=default_value,
enable_events=True,
tick_interval=step,
key="slider_" + parameter_name,
tooltip=tooltip,
size_px=(round(360 * scalar), round(38 * scalar)),
pad=input_pad,
)
path_input = sg.In(
default_text=default,
key=parameter_name,
enable_events=True,
disabled=False,
tooltip=tooltip,
size=button_size,
pad=input_pad,
justification=justification,
)
elif parameter_type == "dropdown":
param_options = parameter[parameter_name]["options"]
labels = []
selected = None
for idx, option in enumerate(param_options):
labels.append(option["label"])
if "default" in option.keys() and option["default"] == True:
selected = option["label"]
param_input = sg.Combo(
labels,
default_value=selected,
key=parameter_name,
metadata=option["value"],
background_color="#f1f1f1",
readonly=True,
enable_events=True,
visible_items=10,
tooltip=tooltip,
size=(input_size[0] - button_size[0] - 1, input_size[1]),
)
elif parameter_type == "radio":
param_options = parameter[parameter_name]["options"]
param_input = []
for idx, option in enumerate(param_options):
if "default" in option.keys() and option["default"] == True:
selected = True
else:
selected = False
left_pad = 0 if idx == 0 else round(16 * scalar)
param_input.append(
sg.Radio(
option["label"],
radio_group_id,
default=selected,
key=parameter_name + "_" + option["key"],
metadata=option["value"],
tooltip=tooltip,
pad=((left_pad, 0), (0, 0)),
)
)
radio_group_id += 1
elif parameter_type == "date_year":
param_input = sg.Button(
"Date",
key="date_picker_" + parameter_name,
button_type=sg.BUTTON_TYPE_READ_FORM,
enable_events=True,
tooltip=tooltip,
bind_return_key=True,
border_width=0,
size=button_size,
)
path_input = sg.Input(
default_date_str,
key=parameter_name,
enable_events=True,
tooltip=tooltip,
visible=True,
disabled=False,
justification=justification,
size=(input_size[0] - button_size[0] - 1, input_size[1]),
pad=input_pad,
)
if param_input is not None:
param_text = sg.Text(
display_name,
tooltip=tooltip,
key=parameter_name + "_text",
background_color=sg.theme_background_color(),
size=text_size,
pad=((0, 0), (0, 0)),
margins=(0, 0, round(4 * scalar), 0),
justification="right",
)
if not isinstance(param_input, list):
param_inputs = [param_input]
else:
param_inputs = param_input
if parameter_type != "radio" and path_input is not None:
if parameter_type in [
"date_year",
"file_open",
"file_open_multiple",
"folder_save",
"folder_open",
"file_save",
"slider",
]:
param_inputs = [path_input, param_input]
else:
param_inputs = [param_input, path_input]
show_row = True
if enabled_by:
found = False
found_val = None
for param in tools[name]["parameters"]:
if get_first_key(param) == enabled_by_key:
found_keys = param[enabled_by_key]
if "default" not in found_keys and "options" not in found_keys:
raise Exception(
f"No default value for {get_first_key(param)} required by 'enabled_by' on {parameter_name}"
)
if "options" in found_keys and "default" not in found_keys:
found_default = False
for option in found_keys["options"]:
if "default" in option and option["default"] is True:
found_val = option["value"]
found_default = True
if found_default is False:
raise Exception(
f"No default value for {get_first_key(param)} required by 'enabled_by' on {parameter_name}"
)
else:
found_val = found_keys["default"]
found = True
break
if found is False:
raise Exception(
f"No parameter found for 'enabled_by' on {parameter_name}. Searched for: {enabled_by_key}"
)
if found_val not in enabled_by_val:
show_row = False
append = [
sg.Column(
[
[param_text],
],
size=(round(120 * scalar), round(36 * scalar)),
pad=((0, 0), (0, 0)),
element_justification="r",
visible=show_row,
key=parameter_name + "_col1",
),
sg.Column(
[
param_inputs,
],
size=(round(260 * scalar), round(36 * scalar)),
pad=((0, 0), (0, 0)),
visible=show_row,
key=parameter_name + "_col2",
),
]
layout.append(append)
layout.append(
[
sg.Column(
[
[
sg.Text("", size=(round(26 * scalar), button_size[1])),
sg.Button(
"Run",
size=button_size,
key="-RUN-BUTTON-",
visible=True,
border_width=0,
),
sg.Text("", size=(1, button_size[1])),
sg.Button(
"Exit",
size=button_size,
button_color=(sg.theme_background_color(), "#B22222"),
key="-EXIT-BUTTON-",
border_width=0,
),
]
],
)
]
)
layout.append(
[
sg.Text("", size=(round(36 * scalar), None)),
sg.Text(
"Progress:",
key="-PROGRESS-TEXT-",
pad=((round(20 * scalar), round(100 * scalar)), (0, 0)),
),
sg.Column(
[
[
sg.ProgressBar(
1,
orientation="h",
key="-PROGRESS-",
pad=((0, round(24 * scalar)), (0, 0)),
size=(
input_size[0] - round(4 * scalar),
round(36 * scalar),
),
),
sg.Button(
"Cancel",
key="-CANCEL-BUTTON-",
button_color=(sg.theme_background_color(), "#d7a824"),
border_width=0,
size=button_size,
pad=((round(10 * scalar), 0), (0, 0)),
),
],
],
pad=((round(10 * scalar), round(10 * scalar)), (0, 0)),
size=(round(520 * scalar), round(36 * scalar)),
),
]
)
if create_console:
layout.append(
[
sg.Output(
pad=((0, 0), (round(10 * scalar), round(10 * scalar))),
size_px=(None, round(200 * scalar)),
background_color="#f1f1f1",
),
]
)
layout = [
[
sg.Column(
layout,
size=(round(900 * scalar), None),
scrollable=True,
element_justification="left",
pad=((0, 0), (0, 0)),
),
sg.Button("-THREAD-", visible=False),
]
]
return (layout, tools[name]["function"], listeners)
def create_function_window(
function_name,
tools,
create_console=False,
icon=globe_icon,
theme="Reddit",
scalar=1.0,
):
sg.theme(theme)
sg.set_options(
element_padding=(0, 0),
margins=(0, 0),
font=("Helvetica", 10),
border_width=0,
)
layout, buteo_function, listeners = create_layout(
function_name, tools, create_console=create_console, scalar=scalar
)
window_func = sg.Window(
function_name,
layout,
resizable=True,
size=(int(900 * scalar), int(1100 * scalar)),
finalize=True,
icon=icon,
element_justification="center",
border_depth=0,
element_padding=(0, 0),
)
progress_bar = window_func["-PROGRESS-"]
progress_bar.UpdateBar(0, 100)
print("Opening function:", function_name)
thread = None
run_clicked = False
while True:
event_func, values_func = window_func.read()
if (
event_func == "-EXIT-BUTTON-"
or event_func == WIN_CLOSED
or event_func is None
):
break
elif event_func == "-RUN-BUTTON-":
run_clicked = True
try:
validation = validate_inputs(
function_name, values_func, window_func, tools
)
if False in validation["valid"]:
sg.popup(
"\n".join(validation["message"]),
title="Error",
keep_on_top=True,
no_titlebar=False,
grab_anywhere=True,
button_type=POPUP_BUTTONS_NO_BUTTONS,
non_blocking=True,
)
progress_bar.UpdateBar(0, 100)
else:
args = validation["cast_args"]
kwargs = validation["cast_kwargs"]
def long_operation_thread(window):
global thread_message
buteo_return = None
try:
buteo_return = buteo_function(*args, **kwargs)
thread_message = buteo_return
except Exception as e:
thread_message = ("Error", e)
window["-THREAD-"].click()
return buteo_return
progress_bar.UpdateBar(10, 100)
window_func["-PROGRESS-TEXT-"].update("Running..")
window_func["-RUN-BUTTON-"].update(
button_color=(sg.theme_element_text_color(), "#999999")
)
thread = threading.Thread(
target=long_operation_thread,
args=(window_func,),
daemon=True,
)
thread.start()
except Exception as e:
progress_bar.UpdateBar(0, 100)
window_func["-PROGRESS-TEXT-"].update("Progress:")
window_func["-RUN-BUTTON-"].update(button_color=sg.theme_button_color())
sg.Popup("Error", str(e))
elif event_func == "-THREAD-":
try:
thread.join(timeout=0)
print(thread_message)
except:
print("Error joining thread")
if isinstance(thread_message, list) and thread_message[0] == "Error":
sg.Popup("Error", str(thread_message[1]))
window_func["-PROGRESS-TEXT-"].update("Progress:")
progress_bar.UpdateBar(0, 100)
else:
window_func["-PROGRESS-TEXT-"].update("Completed.")
progress_bar.UpdateBar(100, 100)
window_func["-RUN-BUTTON-"].update(button_color=sg.theme_button_color())
elif (
isinstance(event_func, str)
and len(event_func) > 12
and event_func[:12] == "date_picker_"
):
target_param = event_func[12:]
try:
default_date = get_default_date(target_param, window_func)
date = popup_get_date(
icon=globe_icon,
start_year=default_date[0],
start_mon=default_date[1],
start_day=default_date[2],
)
if date is not None:
window_func[event_func[12:]].update(value=date)
if run_clicked:
validate_inputs(function_name, values_func, window_func, tools)
except Exception as e:
sg.Popup("Error", str(e))
elif (
isinstance(event_func, str)
and len(event_func) > len("slider_")
and event_func[: len("slider_")] == "slider_"
):
target_param = event_func[len("slider_") :]
window_func[target_param].update(value=values_func[event_func])
if run_clicked:
validate_inputs(function_name, values_func, window_func, tools)
else:
update_inputs(
event_func,
values_func,
window_func,
listeners,
function_name,
tools,
)
if run_clicked:
validate_inputs(function_name, values_func, window_func, tools)
window_func.close()
|
the-stack_106_17939
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import AccessError
class Digest(models.Model):
_inherit = 'digest.digest'
kpi_crm_lead_created = fields.Boolean('New Leads/Opportunities')
kpi_crm_lead_created_value = fields.Integer(compute='_compute_kpi_crm_lead_created_value')
kpi_crm_opportunities_won = fields.Boolean('Opportunities Won')
kpi_crm_opportunities_won_value = fields.Integer(compute='_compute_kpi_crm_opportunities_won_value')
def _compute_kpi_crm_lead_created_value(self):
if not self.env.user.has_group('sales_team.group_sale_salesman'):
raise AccessError(_("Do not have access, skip this data for user's digest email"))
for record in self:
start, end, company = record._get_kpi_compute_parameters()
record.kpi_crm_lead_created_value = self.env['crm.lead'].search_count([
('create_date', '>=', start),
('create_date', '<', end),
('company_id', '=', company.id)
])
def _compute_kpi_crm_opportunities_won_value(self):
if not self.env.user.has_group('sales_team.group_sale_salesman'):
raise AccessError(_("Do not have access, skip this data for user's digest email"))
for record in self:
start, end, company = record._get_kpi_compute_parameters()
record.kpi_crm_opportunities_won_value = self.env['crm.lead'].search_count([
('type', '=', 'opportunity'),
('probability', '=', '100'),
('date_closed', '>=', start),
('date_closed', '<', end),
('company_id', '=', company.id)
])
def compute_kpis_actions(self, company, user):
res = super(Digest, self).compute_kpis_actions(company, user)
res['kpi_crm_lead_created'] = 'crm.crm_lead_opportunities_tree_view&menu_id=%s' % self.env.ref('crm.crm_menu_root').id
res['kpi_crm_opportunities_won'] = 'crm.crm_lead_opportunities_tree_view&menu_id=%s' % self.env.ref('crm.crm_menu_root').id
if user.has_group('crm.group_use_lead'):
res['kpi_crm_lead_created'] = 'crm.crm_lead_all_leads&menu_id=%s' % self.env.ref('crm.crm_menu_root').id
return res
|
the-stack_106_17940
|
# Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tasks for multi-clip mocap tracking with RL."""
import abc
import collections
import typing
from typing import Any, Callable, Mapping, Optional, Sequence, Text, Union
from absl import logging
from dm_control import composer
from dm_control.composer.observation import observable as base_observable
from dm_control.locomotion.mocap import loader
from dm_control.locomotion.tasks.reference_pose import datasets
from dm_control.locomotion.tasks.reference_pose import types
from dm_control.locomotion.tasks.reference_pose import utils
from dm_control.locomotion.tasks.reference_pose import rewards
from dm_control.mujoco.wrapper import mjbindings
from dm_control.utils import transformations as tr
from dm_env import specs
import numpy as np
import six
import tree
if typing.TYPE_CHECKING:
from dm_control.locomotion.walkers import legacy_base
from dm_control import mjcf
mjlib = mjbindings.mjlib
DEFAULT_PHYSICS_TIMESTEP = 0.005
_MAX_END_STEP = 10000
def _strip_reference_prefix(dictionary: Mapping[Text, Any], prefix: Text):
new_dictionary = dict()
for key in list(dictionary.keys()):
if key.startswith(prefix):
key_without_prefix = key.split(prefix)[1]
# note that this will not copy the underlying array.
new_dictionary[key_without_prefix] = dictionary[key]
return new_dictionary
@six.add_metaclass(abc.ABCMeta)
class ReferencePosesTask(composer.Task):
"""Abstract base class for task that uses reference data."""
def __init__(
self,
walker: Callable[..., 'legacy_base.Walker'],
arena: composer.Arena,
ref_path: Text,
ref_steps: Sequence[int],
dataset: Union[Text, types.ClipCollection],
termination_error_threshold: float = 0.3,
min_steps: int = 10,
reward_type: Text = 'termination_reward',
physics_timestep: float = DEFAULT_PHYSICS_TIMESTEP,
always_init_at_clip_start: bool = False,
proto_modifier: Optional[Any] = None,
ghost_offset: Optional[Sequence[Union[int, float]]] = None,
body_error_multiplier: Union[int, float] = 1.0,
):
"""Abstract task that uses reference data.
Args:
walker: Walker constructor to be used.
arena: Arena to be used.
ref_path: Path to the dataset containing reference poses.
ref_steps: tuples of indices of reference observation. E.g if
ref_steps=(1, 2, 3) the walker/reference observation at time t will
contain information from t+1, t+2, t+3.
dataset: A ClipCollection instance or a name of a dataset that appears as
a key in DATASETS in datasets.py
termination_error_threshold: Error threshold for episode terminations.
min_steps: minimum number of steps within an episode. This argument
determines the latest allowable starting point within a given reference
trajectory.
reward_type: type of reward to use, must be a string that appears as a key
in the REWARD_FN dict in rewards.py.
physics_timestep: Physics timestep to use for simulation.
always_init_at_clip_start: only initialize epsidodes at the start of a
reference trajectory.
proto_modifier: Optional proto modifier to modify reference trajectories,
e.g. adding a vertical offset.
ghost_offset: if not None, include a ghost rendering of the walker with
the reference pose at the specified position offset.
body_error_multiplier: A multiplier that is applied to the body error term
when determining failure termination condition.
"""
self._ref_steps = np.sort(ref_steps)
self._max_ref_step = self._ref_steps[-1]
self._termination_error_threshold = termination_error_threshold
self._reward_fn = rewards.get_reward(reward_type)
self._reward_keys = rewards.get_reward_channels(reward_type)
self._min_steps = min_steps
self._always_init_at_clip_start = always_init_at_clip_start
self._ghost_offset = ghost_offset
self._body_error_multiplier = body_error_multiplier
logging.info('Reward type %s', reward_type)
if isinstance(dataset, Text):
try:
dataset = datasets.DATASETS[dataset]
except KeyError:
logging.error('Dataset %s not found in datasets.py', dataset)
raise
self._load_reference_data(
ref_path=ref_path, proto_modifier=proto_modifier, dataset=dataset)
self._get_possible_starts()
logging.info('%d starting points found.', len(self._possible_starts))
# load a dummy trajectory
self._current_clip_index = 0
self._current_clip = self._loader.get_trajectory(
self._dataset.ids[0], zero_out_velocities=False)
# Create the environment.
self._arena = arena
self._walker = utils.add_walker(walker, self._arena)
self.set_timesteps(
physics_timestep=physics_timestep,
control_timestep=self._current_clip.dt)
# Identify the desired body components.
try:
walker_bodies = self._walker.mocap_tracking_bodies
except AttributeError:
logging.info('Walker must implement mocap bodies for this task.')
raise
walker_bodies_names = [bdy.name for bdy in walker_bodies]
self._body_idxs = np.array(
[walker_bodies_names.index(bdy) for bdy in walker_bodies_names])
# Create the observables.
self._add_observables()
# initialize counters etc.
self._time_step = 0
self._current_start_time = 0.0
self._last_step = 0
self._current_clip_index = 0
self._end_mocap = False
self._should_truncate = False
# Set up required dummy quantities for observations
self._clip_reference_features = self._current_clip.as_dict()
self._clip_reference_features = _strip_reference_prefix(
self._clip_reference_features, 'walker/')
self._walker_joints = self._clip_reference_features['joints'][0]
self._walker_features = tree.map_structure(lambda x: x[0],
self._clip_reference_features)
self._walker_features_prev = tree.map_structure(
lambda x: x[0], self._clip_reference_features)
self._current_reference_features = dict()
# if requested add ghost body to visualize motion capture reference.
if self._ghost_offset is not None:
self._ghost = utils.add_walker(walker, self._arena, 'ghost', ghost=True)
# initialize reward channels
self._reset_reward_channels()
def _load_reference_data(self, ref_path, proto_modifier,
dataset: types.ClipCollection):
self._loader = loader.HDF5TrajectoryLoader(
ref_path, proto_modifier=proto_modifier)
self._dataset = dataset
self._num_clips = len(self._dataset.ids)
if self._dataset.end_steps is None:
# load all trajectories to infer clip end steps.
self._all_clips = [
self._loader.get_trajectory( # pylint: disable=g-complex-comprehension
clip_id,
start_step=clip_start_step,
end_step=_MAX_END_STEP) for clip_id, clip_start_step in zip(
self._dataset.ids, self._dataset.start_steps)
]
# infer clip end steps to set sampling distribution
self._dataset.end_steps = tuple(clip.end_step for clip in self._all_clips)
else:
self._all_clips = [None] * self._num_clips
def _add_observables(self):
observables = []
observables += self._walker.observables.proprioception
observables += self._walker.observables.kinematic_sensors
observables += self._walker.observables.dynamic_sensors
for observable in observables:
observable.enabled = True
self._walker.observables.add_observable(
'clip_id', base_observable.Generic(self.get_clip_id))
self._walker.observables.add_observable(
'reference_rel_joints',
base_observable.Generic(self.get_reference_rel_joints))
self._walker.observables.add_observable(
'reference_rel_bodies_pos_global',
base_observable.Generic(self.get_reference_rel_bodies_pos_global))
self._walker.observables.add_observable(
'reference_rel_bodies_quats',
base_observable.Generic(self.get_reference_rel_bodies_quats))
self._walker.observables.add_observable(
'reference_rel_bodies_pos_local',
base_observable.Generic(self.get_reference_rel_bodies_pos_local))
self._walker.observables.add_observable(
'reference_ego_bodies_quats',
base_observable.Generic(self.get_reference_ego_bodies_quats))
self._walker.observables.add_observable(
'reference_rel_root_quat',
base_observable.Generic(self.get_reference_rel_root_quat))
self._walker.observables.add_observable(
'reference_rel_root_pos_local',
base_observable.Generic(self.get_reference_rel_root_pos_local))
self._walker.observables.add_observable(
'reference_appendages_pos',
base_observable.Generic(self.get_reference_appendages_pos))
self._walker.observables.add_observable(
'velocimeter_control', base_observable.Generic(self.get_veloc_control))
self._walker.observables.add_observable(
'gyro_control', base_observable.Generic(self.get_gyro_control))
self._walker.observables.add_observable(
'joints_vel_control',
base_observable.Generic(self.get_joints_vel_control))
def _get_possible_starts(self):
# List all possible (clip, step) starting points.
self._possible_starts = []
self._start_probabilities = []
dataset = self._dataset
for clip_number, (start, end, weight) in enumerate(
zip(dataset.start_steps, dataset.end_steps, dataset.weights)):
# length - required lookahead - minimum number of steps
last_possible_start = end - self._max_ref_step - self._min_steps
if self._always_init_at_clip_start:
self._possible_starts += [(clip_number, start)]
self._start_probabilities += [weight]
else:
self._possible_starts += [
(clip_number, j) for j in range(start, last_possible_start)
]
self._start_probabilities += [
weight for _ in range(start, last_possible_start)
]
# normalize start probabilities
self._start_probabilities = np.array(self._start_probabilities) / np.sum(
self._start_probabilities)
def initialize_episode_mjcf(self, random_state: np.random.RandomState):
if hasattr(self._arena, 'regenerate'):
self._arena.regenerate(random_state)
def _get_clip_to_track(self, random_state: np.random.RandomState):
# Randomly select a starting point.
index = random_state.choice(
len(self._possible_starts), p=self._start_probabilities)
clip_index, start_step = self._possible_starts[index]
self._current_clip_index = clip_index
clip_id = self._dataset.ids[self._current_clip_index]
if self._all_clips[self._current_clip_index] is None:
# fetch selected trajectory
logging.info('Loading clip %s', clip_id)
self._all_clips[self._current_clip_index] = self._loader.get_trajectory(
clip_id,
start_step=self._dataset.start_steps[self._current_clip_index],
end_step=self._dataset.end_steps[self._current_clip_index],
zero_out_velocities=False)
self._current_clip = self._all_clips[self._current_clip_index]
self._clip_reference_features = self._current_clip.as_dict()
self._clip_reference_features = _strip_reference_prefix(
self._clip_reference_features, 'walker/')
# The reference features are already restricted to
# clip_start_step:clip_end_step. However start_step is in
# [clip_start_step:clip_end_step]. Hence we subtract clip_start_step to
# obtain a valid index for the reference features.
self._time_step = start_step - self._dataset.start_steps[
self._current_clip_index]
self._current_start_time = (start_step - self._dataset.start_steps[
self._current_clip_index]) * self._current_clip.dt
self._last_step = len(
self._clip_reference_features['joints']) - self._max_ref_step - 1
logging.info('Mocap %s at step %d with remaining length %d.', clip_id,
start_step, self._last_step - start_step)
def initialize_episode(self, physics: 'mjcf.Physics',
random_state: np.random.RandomState):
"""Randomly selects a starting point and set the walker."""
self._get_clip_to_track(random_state)
# Set the walker at the beginning of the clip.
self._set_walker(physics)
self._walker_features = utils.get_features(physics, self._walker)
self._walker_features_prev = utils.get_features(physics, self._walker)
self._walker_joints = np.array(physics.bind(self._walker.mocap_joints).qpos) # pytype: disable=attribute-error
# compute initial error
self._compute_termination_error()
# assert error is 0 at initialization. In particular this will prevent
# a proto/walker mismatch.
if self._termination_error > 1e-2:
raise ValueError(('The termination exceeds 1e-2 at initialization. '
'This is likely due to a proto/walker mismatch.'))
self._update_ghost(physics)
# reset reward channels
self._reset_reward_channels()
def _reset_reward_channels(self):
if self._reward_keys:
self.last_reward_channels = collections.OrderedDict([
(k, 0.0) for k in self._reward_keys
])
else:
self.last_reward_channels = None
def _compute_termination_error(self):
target_joints = self._clip_reference_features['joints'][self._time_step]
error_joints = np.mean(np.abs(target_joints - self._walker_joints))
target_bodies = self._clip_reference_features['body_positions'][
self._time_step]
error_bodies = np.mean(
np.abs((target_bodies -
self._walker_features['body_positions'])[self._body_idxs]))
self._termination_error = (
0.5 * self._body_error_multiplier * error_bodies + 0.5 * error_joints)
def before_step(self, physics: 'mjcf.Physics', action,
random_state: np.random.RandomState):
self._walker.apply_action(physics, action, random_state)
def after_step(self, physics: 'mjcf.Physics',
random_state: np.random.RandomState):
"""Update the data after step."""
del random_state # unused by after_step.
self._walker_features_prev = self._walker_features.copy()
def should_terminate_episode(self, physics: 'mjcf.Physics'):
del physics # physics unused by should_terminate_episode.
if self._should_truncate:
logging.info('Truncate with error %f.', self._termination_error)
return True
if self._end_mocap:
logging.info('End of mocap.')
return True
return False
def get_discount(self, physics: 'mjcf.Physics'):
del physics # unused by get_discount.
if self._should_truncate:
return 0.0
return 1.0
def get_reference_rel_joints(self, physics: 'mjcf.Physics'):
"""Observation of the reference joints relative to walker."""
del physics # physics unused by reference observations.
time_steps = self._time_step + self._ref_steps
diff = (self._clip_reference_features['joints'][time_steps] -
self._walker_joints)
return diff[:, self._walker.mocap_to_observable_joint_order].flatten()
def get_reference_rel_bodies_pos_global(self, physics: 'mjcf.Physics'):
"""Observation of the reference bodies relative to walker."""
del physics # physics unused by reference observations.
time_steps = self._time_step + self._ref_steps
return (self._clip_reference_features['body_positions'][time_steps] -
self._walker_features['body_positions'])[:,
self._body_idxs].flatten()
def get_reference_rel_bodies_quats(self, physics: 'mjcf.Physics'):
"""Observation of the reference bodies quats relative to walker."""
del physics # physics unused by reference observations.
time_steps = self._time_step + self._ref_steps
obs = []
for t in time_steps:
for b in self._body_idxs:
obs.append(
tr.quat_diff(
self._walker_features['body_quaternions'][b, :],
self._clip_reference_features['body_quaternions'][t, b, :]))
return np.concatenate([o.flatten() for o in obs])
def get_reference_rel_bodies_pos_local(self, physics: 'mjcf.Physics'):
"""Observation of the reference bodies relative to walker in local frame."""
time_steps = self._time_step + self._ref_steps
obs = self._walker.transform_vec_to_egocentric_frame(
physics, (self._clip_reference_features['body_positions'][time_steps] -
self._walker_features['body_positions'])[:, self._body_idxs])
return np.concatenate([o.flatten() for o in obs])
def get_reference_ego_bodies_quats(self, physics: 'mjcf.Physics'):
"""Body quat of the reference relative to the reference root quat."""
del physics # physics unused by reference observations.
time_steps = self._time_step + self._ref_steps
obs = []
for t in time_steps:
for b in self._body_idxs:
obs.append(
tr.quat_diff(
self._clip_reference_features['quaternion'][t, :],
self._clip_reference_features['body_quaternions'][t, b, :]))
return np.concatenate([o.flatten() for o in obs])
def get_reference_rel_root_quat(self, physics: 'mjcf.Physics'):
"""Root quaternion of reference relative to current root quat."""
del physics # physics unused by reference observations.
time_steps = self._time_step + self._ref_steps
obs = []
for t in time_steps:
obs.append(
tr.quat_diff(self._walker_features['quaternion'],
self._clip_reference_features['quaternion'][t, :]))
return np.concatenate([o.flatten() for o in obs])
def get_reference_appendages_pos(self, physics: 'mjcf.Physics'):
"""Reference appendage positions in reference frame."""
del physics # physics unused by reference observations.
time_steps = self._time_step + self._ref_steps
return self._clip_reference_features['appendages'][time_steps].flatten()
def get_reference_rel_root_pos_local(self, physics: 'mjcf.Physics'):
"""Reference position relative to current root position in root frame."""
time_steps = self._time_step + self._ref_steps
obs = self._walker.transform_vec_to_egocentric_frame(
physics, (self._clip_reference_features['position'][time_steps] -
self._walker_features['position']))
return np.concatenate([o.flatten() for o in obs])
def get_veloc_control(self, physics: 'mjcf.Physics'):
"""Velocity measurements in the prev root frame at the control timestep."""
del physics # physics unused by get_veloc_control.
rmat_prev = tr.quat_to_mat(self._walker_features_prev['quaternion'])[:3, :3]
veloc_world = (
self._walker_features['position'] -
self._walker_features_prev['position']) / self._control_timestep
return np.dot(veloc_world, rmat_prev)
def get_gyro_control(self, physics: 'mjcf.Physics'):
"""Gyro measurements in the prev root frame at the control timestep."""
del physics # physics unused by get_gyro_control.
quat_curr, quat_prev = (self._walker_features['quaternion'],
self._walker_features_prev['quaternion'])
normed_diff = tr.quat_diff(quat_prev, quat_curr)
normed_diff /= np.linalg.norm(normed_diff)
return tr.quat_to_axisangle(normed_diff) / self._control_timestep
def get_joints_vel_control(self, physics: 'mjcf.Physics'):
"""Joint velocity measurements at the control timestep."""
del physics # physics unused by get_joints_vel_control.
joints_curr, joints_prev = (self._walker_features['joints'],
self._walker_features_prev['joints'])
return (joints_curr - joints_prev)[
self._walker.mocap_to_observable_joint_order]/self._control_timestep
def get_clip_id(self, physics: 'mjcf.Physics'):
"""Observation of the clip id."""
del physics # physics unused by get_clip_id.
return np.array([self._current_clip_index])
def get_all_reference_observations(self, physics: 'mjcf.Physics'):
reference_observations = dict()
reference_observations[
'walker/reference_rel_bodies_pos_local'] = self.get_reference_rel_bodies_pos_local(
physics)
reference_observations[
'walker/reference_rel_joints'] = self.get_reference_rel_joints(physics)
reference_observations[
'walker/reference_rel_bodies_pos_global'] = self.get_reference_rel_bodies_pos_global(
physics)
reference_observations[
'walker/reference_ego_bodies_quats'] = self.get_reference_ego_bodies_quats(
physics)
reference_observations[
'walker/reference_rel_root_quat'] = self.get_reference_rel_root_quat(
physics)
reference_observations[
'walker/reference_rel_bodies_quats'] = self.get_reference_rel_bodies_quats(
physics)
reference_observations[
'walker/reference_rel_root_pos_local'] = self.get_reference_rel_root_pos_local(
physics)
return reference_observations
def get_reward(self, physics: 'mjcf.Physics') -> float:
reference_observations = self.get_all_reference_observations(physics)
reward, unused_debug_outputs, reward_channels = self._reward_fn(
termination_error=self._termination_error,
termination_error_threshold=self._termination_error_threshold,
reference_features=self._current_reference_features,
walker_features=self._walker_features,
reference_observations=reference_observations)
self._should_truncate = self._termination_error > self._termination_error_threshold
self.last_reward_channels = reward_channels
return reward
def _set_walker(self, physics: 'mjcf.Physics'):
timestep_features = tree.map_structure(lambda x: x[self._time_step],
self._clip_reference_features)
utils.set_walker_from_features(physics, self._walker, timestep_features)
mjlib.mj_kinematics(physics.model.ptr, physics.data.ptr)
def _update_ghost(self, physics: 'mjcf.Physics'):
if self._ghost_offset is not None:
target = tree.map_structure(lambda x: x[self._time_step],
self._clip_reference_features)
utils.set_walker_from_features(physics, self._ghost, target,
self._ghost_offset)
mjlib.mj_kinematics(physics.model.ptr, physics.data.ptr)
def action_spec(self, physics: 'mjcf.Physics'):
"""Action spec of the walker only."""
ctrl = physics.bind(self._walker.actuators).ctrl # pytype: disable=attribute-error
shape = ctrl.shape
dtype = ctrl.dtype
minimum = []
maximum = []
for actuator in self._walker.actuators:
if physics.bind(actuator).ctrllimited: # pytype: disable=attribute-error
ctrlrange = physics.bind(actuator).ctrlrange # pytype: disable=attribute-error
minimum.append(ctrlrange[0])
maximum.append(ctrlrange[1])
else:
minimum.append(-float('inf'))
maximum.append(float('inf'))
return specs.BoundedArray(
shape=shape,
dtype=dtype,
minimum=np.asarray(minimum, dtype=dtype),
maximum=np.asarray(maximum, dtype=dtype),
name='\t'.join(actuator.full_identifier # pytype: disable=attribute-error
for actuator in self._walker.actuators))
@abc.abstractproperty
def name(self):
raise NotImplementedError
@property
def root_entity(self):
return self._arena
class MultiClipMocapTracking(ReferencePosesTask):
"""Task for multi-clip mocap tracking."""
def __init__(
self,
walker: Callable[..., 'legacy_base.Walker'],
arena: composer.Arena,
ref_path: Text,
ref_steps: Sequence[int],
dataset: Union[Text, Sequence[Any]],
termination_error_threshold: float = 0.3,
min_steps: int = 10,
reward_type: Text = 'termination_reward',
physics_timestep: float = DEFAULT_PHYSICS_TIMESTEP,
always_init_at_clip_start: bool = False,
proto_modifier: Optional[Any] = None,
ghost_offset: Optional[Sequence[Union[int, float]]] = None,
body_error_multiplier: Union[int, float] = 1.0,
):
"""Mocap tracking task.
Args:
walker: Walker constructor to be used.
arena: Arena to be used.
ref_path: Path to the dataset containing reference poses.
ref_steps: tuples of indices of reference observation. E.g if
ref_steps=(1, 2, 3) the walker/reference observation at time t will
contain information from t+1, t+2, t+3.
dataset: dataset: A ClipCollection instance or a named dataset that
appears as a key in DATASETS in datasets.py
termination_error_threshold: Error threshold for episode terminations.
min_steps: minimum number of steps within an episode. This argument
determines the latest allowable starting point within a given reference
trajectory.
reward_type: type of reward to use, must be a string that appears as a key
in the REWARD_FN dict in rewards.py.
physics_timestep: Physics timestep to use for simulation.
always_init_at_clip_start: only initialize epsidodes at the start of a
reference trajectory.
proto_modifier: Optional proto modifier to modify reference trajectories,
e.g. adding a vertical offset.
ghost_offset: if not None, include a ghost rendering of the walker with
the reference pose at the specified position offset.
body_error_multiplier: A multiplier that is applied to the body error term
when determining failure termination condition.
"""
super(MultiClipMocapTracking, self).__init__(
walker=walker,
arena=arena,
ref_path=ref_path,
ref_steps=ref_steps,
termination_error_threshold=termination_error_threshold,
min_steps=min_steps,
dataset=dataset,
reward_type=reward_type,
physics_timestep=physics_timestep,
always_init_at_clip_start=always_init_at_clip_start,
proto_modifier=proto_modifier,
ghost_offset=ghost_offset,
body_error_multiplier=body_error_multiplier)
self._walker.observables.add_observable(
'time_in_clip',
base_observable.Generic(self.get_normalized_time_in_clip))
def after_step(self, physics: 'mjcf.Physics', random_state):
"""Update the data after step."""
super(MultiClipMocapTracking, self).after_step(physics, random_state)
self._time_step += 1
# Update the walker's data for this timestep.
self._walker_features = utils.get_features(physics, self._walker)
# features for default error
self._walker_joints = np.array(physics.bind(self._walker.mocap_joints).qpos) # pytype: disable=attribute-error
self._current_reference_features = {
k: v[self._time_step].copy()
for k, v in self._clip_reference_features.items()
}
# Error.
self._compute_termination_error()
# Terminate based on the error.
self._end_mocap = self._time_step == self._last_step
self._update_ghost(physics)
def get_normalized_time_in_clip(self, physics: 'mjcf.Physics'):
"""Observation of the normalized time in the mocap clip."""
normalized_time_in_clip = (self._current_start_time +
physics.time()) / self._current_clip.duration
return np.array([normalized_time_in_clip])
@property
def name(self):
return 'MultiClipMocapTracking'
|
the-stack_106_17941
|
import random
import numpy as np
import math
import torch
from .video import CenterCropVideo, ToTensorVideo, NormalizeVideo, RandomHorizontalFlipVideo, BinarizeVideo, ColorJitterVideo
from .audio import AmplitudeToDB
from .landmarks import RandomHorizontalFlipLandmarks
class CutMix(object):
def __init__(self, p=0.5, alpha=1.0, beta=1.0):
self.alpha = alpha
self.beta = beta
self.p = p
def __call__(self, img_batch1, img_batch2):
mixed_img = img_batch1.clone()
batch = mixed_img.size(0)
width = mixed_img.size(2)
height = mixed_img.size(3)
for i in range(batch):
if random.uniform(0, 1) > self.p:
continue
ratio = np.random.beta(self.alpha, self.beta)
tl, br = self.get_bbox(width, height, ratio)
mixed_img[i, :, tl[0]:br[0], tl[1]:br[1]] = img_batch2[i, :, tl[0]:br[0], tl[1]:br[1]].clone()
return mixed_img
def get_bbox(self, w, h, ratio):
cut_rat = math.sqrt(1.0 - ratio)
cut_w = int(w * cut_rat)
cut_h = int(h * cut_rat)
bbxl = int(random.uniform(0, 1) * (w - cut_w))
bbyt = int(random.uniform(0, 1) * (h - cut_h))
bbxr = bbxl + cut_w
bbyb = bbyt + cut_h
return (bbxl, bbyt), (bbxr, bbyb)
class RandomCrop(object):
"""Crops given PIL.Images at a random location to have a region that is proportional
to the original size
"""
def __init__(self, proportion=0.9):
if not isinstance(proportion, tuple):
self.proportion = (proportion, proportion)
def __call__(self, source, proportion=None):
if proportion is None:
proportion = self.proportion
try: # If img is iterable
img_iterator = iter(source)
except TypeError:
img_iterator = iter([source])
tl_ratio_x = random.uniform(0, 1)
tl_ratio_y = random.uniform(0, 1)
target = []
for img in img_iterator:
w, h = img.size
new_w = proportion[0] * w
new_h = proportion[1] * h
x_tl = int(tl_ratio_x * (w - new_w))
y_tl = int(tl_ratio_y * (h - new_h))
target.append(img.crop((x_tl, y_tl, x_tl + new_w, y_tl + new_h)))
if len(target) == 1:
return target[0]
else:
return target
def normalize_vector(v):
batch = v.shape[0]
v_mag = torch.sqrt(v.pow(2).sum(1))
v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).cuda()))
v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1])
v = v / v_mag
return v
def cross_product(u, v):
batch = u.shape[0]
i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) # batch*3
return out
def rot_to_ortho6d(rot_matrix):
return np.concatenate((rot_matrix[:, 0], rot_matrix[:, 1]))
def ortho6d_to_rot(ortho6d):
x_raw = ortho6d[:, 0:3]
y_raw = ortho6d[:, 3:6]
x = normalize_vector(x_raw)
z = cross_product(x, y_raw)
z = normalize_vector(z)
y = cross_product(z, x)
x = x.view(-1, 3, 1)
y = y.view(-1, 3, 1)
z = z.view(-1, 3, 1)
matrix = torch.cat((x, y, z), 2)
return matrix
def get_transform_matrix(rotation, translation, scale=None):
batch_size = rotation.size(0)
num_coordinates = rotation.size(1)
trans = torch.zeros((batch_size, num_coordinates + 1, num_coordinates + 1), device=rotation.device)
if scale is None:
trans[:, :num_coordinates, :num_coordinates] = rotation
else:
trans[:, :num_coordinates, :num_coordinates] = scale.unsqueeze(-1).unsqueeze(-1) * rotation
trans[:, :num_coordinates, num_coordinates] = translation.squeeze()
trans[:, num_coordinates, num_coordinates] = 1
return trans
def procrustes(s1, s2):
if len(s1.size()) < 3:
s1 = s1.unsqueeze(0)
if len(s2.size()) < 3:
s1 = s1.unsqueeze(0)
coordinates = s1.size(2)
mu1 = s1.mean(axis=1, keepdims=True)
mu2 = s2.mean(axis=1, keepdims=True)
x1 = s1 - mu1
x2 = s2 - mu2
var1 = torch.sum(x1 ** 2, dim=1).sum(dim=1)
cov = x1.transpose(1, 2).bmm(x2)
u, s, v = torch.svd(cov.float())
z = torch.eye(u.shape[1], device=s1.device).unsqueeze(0)
z = z.repeat(u.shape[0], 1, 1)
z[:, -1, -1] *= torch.sign(torch.det(u.bmm(v.transpose(1, 2)).float()))
r = v.bmm(z.bmm(u.permute(0, 2, 1)))
scale = torch.cat([torch.trace(x).unsqueeze(0) for x in r.bmm(cov)]) / var1
t = mu2.view(-1, coordinates, 1) - (scale.unsqueeze(-1).unsqueeze(-1) * (r.bmm(mu1.view(-1, coordinates, 1))))
return scale, r, t.squeeze()
def transform_landmarks(ref, transformation):
ret_np = False
if isinstance(ref, np.ndarray):
ret_np = True
ref = torch.from_numpy(ref)
transformation = torch.from_numpy(transformation)
ref = ref.view(-1, ref.size(-2), ref.size(-1))
transformation = transformation.view(-1, transformation.size(-3), transformation.size(-2), transformation.size(-1))
seq_length = transformation.shape[1]
no_points = ref.shape[-2]
coordinates = ref.shape[-1]
rot_matrix = transformation[:, :, :coordinates, :coordinates]
out_translation = transformation[:, :, :coordinates, coordinates]
out_landmarks = torch.bmm(ref[:, None, :, :].repeat(1, seq_length, 1, 1).view(-1, no_points, 3),
rot_matrix.view(-1, 3, 3).transpose(1, 2)).contiguous()
out_landmarks = out_landmarks.view(-1, seq_length, no_points, coordinates) + out_translation[:, :, None, :]
if ret_np:
return out_landmarks.squeeze().numpy()
else:
return out_landmarks.squeeze()
|
the-stack_106_17942
|
import logging
import os
import numpy as np
import rasterio
from rasterio.crs import CRS
from rasterio.features import rasterize
from rasterio.warp import calculate_default_transform
from rasterio.windows import bounds
from satlomasproc.chips.utils import (
rescale_intensity,
sliding_windows,
write_chips_geojson,
)
from shapely.geometry import box, shape
from shapely.ops import transform, unary_union
from shapely.validation import explain_validity
from skimage import exposure
from skimage.io import imsave
from tqdm import tqdm
# Workaround: Load fiona at the end to avoid segfault on box (???)
import fiona
__author__ = "Damián Silvani"
__copyright__ = "Dymaxion Labs"
__license__ = "apache-2.0"
_logger = logging.getLogger(__name__)
def get_shape(feature):
geom = feature["geometry"]
try:
return shape(geom)
except Exception as err:
_logger.warn("Failed to get shape from feature %s: %s", feature, err)
return None
def mask_from_polygons(polygons, *, win, t):
transform = rasterio.windows.transform(win, t)
if polygons is None or len(polygons) == 0:
mask = np.zeros((win.height, win.width), dtype=np.uint8)
else:
mask = rasterize(
polygons, (win.height, win.width), default_value=255, transform=transform
)
return mask
def multiband_chip_mask_by_classes(
classes,
transform,
window,
mask_path,
label_property,
polys_dict=None,
label_path=None,
window_shape=None,
metadata={},
):
multi_band_mask = []
if polys_dict is None and label_path is not None:
polys_dict = classify_polygons(label_path, label_property, classes)
if window_shape is None:
window_shape = box(*rasterio.windows.bounds(window, transform))
for k in classes:
multi_band_mask.append(
mask_from_polygons(polys_dict[k], win=window, t=transform)
)
kwargs = metadata.copy()
kwargs.update(
driver="GTiff",
dtype=rasterio.uint8,
count=len(multi_band_mask),
nodata=0,
transform=rasterio.windows.transform(window, transform),
width=window.width,
height=window.height,
)
os.makedirs(os.path.dirname(mask_path), exist_ok=True)
with rasterio.open(mask_path, "w", **kwargs) as dst:
for i in range(len(multi_band_mask)):
dst.write(multi_band_mask[i], i + 1)
def classify_polygons(labels, label_property, classes):
with fiona.open(labels) as blocks:
polys_dict = {}
for block in blocks:
if label_property in block['properties']:
c = str(block['properties'][label_property])
try:
geom = shape(block['geometry'])
except:
_logger.warning("Failed to get geometry shape for feature: %s", block)
continue
if c in polys_dict:
polys_dict[c].append(geom)
else:
polys_dict[c] = [geom]
if classes:
for c in classes:
if c not in polys_dict:
polys_dict[c] = []
_logger.warn(
f"No features of class '{c}' found. Will generate empty masks."
)
return polys_dict
def prepare_aoi_shape(aoi):
with fiona.open(aoi) as src:
aoi_polys = [get_shape(f) for f in src]
aoi_polys = [shp for shp in aoi_polys if shp and shp.is_valid]
aoi_poly = unary_union(aoi_polys)
return aoi_poly
def prepare_label_shapes(
labels, mask_type="class", label_property="class", classes=None
):
if mask_type == "class":
polys_dict = classify_polygons(labels, label_property, classes)
return polys_dict
else:
raise RuntimeError(f"mask type '{mask_type}' not supported")
def extract_chips(
rasters,
aoi=None,
labels=None,
label_property="class",
mask_type="class",
rescale_mode=None,
rescale_range=None,
bands=None,
type="tif",
write_geojson=True,
classes=None,
crs=None,
skip_existing=True,
within=False,
dry_run=False,
*,
size,
step_size,
output_dir,
):
if aoi:
_logger.info("Prepare AOI shape")
aoi_poly = prepare_aoi_shape(aoi)
else:
aoi_poly = None
if labels:
_logger.info("Prepare label shapes")
polys_dict = prepare_label_shapes(
labels, mask_type=mask_type, label_property=label_property, classes=classes
)
else:
polys_dict = None
for raster in tqdm(rasters):
extract_chips_from_raster(
raster,
size=size,
step_size=step_size,
rescale_mode=rescale_mode,
rescale_range=rescale_range,
bands=bands,
output_dir=output_dir,
type=type,
within=within,
write_geojson=write_geojson,
crs=crs,
labels=labels,
label_property=label_property,
classes=classes,
mask_type=mask_type,
aoi_poly=aoi_poly,
polys_dict=polys_dict,
dry_run=dry_run,
)
def extract_chips_from_raster(
raster,
rescale_mode=None,
rescale_range=None,
bands=None,
type="tif",
write_geojson=True,
labels=None,
label_property="class",
mask_type="class",
classes=None,
crs=None,
skip_existing=True,
within=False,
aoi_poly=None,
polys_dict=None,
dry_run=False,
*,
size,
step_size,
output_dir,
):
basename, _ = os.path.splitext(os.path.basename(raster))
masks_folder = os.path.join(output_dir, "masks")
image_folder = os.path.join(output_dir, "images")
with rasterio.open(raster) as ds:
_logger.info("Raster size: %s", (ds.width, ds.height))
if any(b > ds.count for b in bands):
raise RuntimeError(
f"Raster has {ds.count} bands, but you asked to use {bands} band indexes"
)
if bands is None:
bands = list(range(1, min(ds.count, 3) + 1))
_logger.info("Building windows")
win_size = (size, size)
win_step_size = (step_size, step_size)
windows = list(
sliding_windows(win_size, win_step_size, ds.width, ds.height, whole=True)
)
_logger.info("Total windows: %d", len(windows))
_logger.info("Building window shapes")
window_shapes = [
box(*rasterio.windows.bounds(w, ds.transform)) for w, _ in windows
]
window_and_shapes = zip(windows, window_shapes)
# Filter windows by AOI shape
if aoi_poly:
_logger.info("Filtering windows by AOI")
_logger.info("Using \"%s\" function",
'within' if within else 'intersects')
filter_fn = lambda w, aoi: w.within(
aoi) if within else w.intersects(aoi)
window_and_shapes = [(w, s) for w, s in window_and_shapes
if filter_fn(s, aoi_poly)]
_logger.info("Total windows after filtering: %d",
len(window_and_shapes))
meta = ds.meta.copy()
if crs:
meta["crs"] = CRS.from_string(crs)
chips = []
for c, ((window, (i, j)), win_shape) in tqdm(
list(enumerate(window_and_shapes))
):
_logger.debug("%s %s", window, (i, j))
img_path = os.path.join(image_folder, f"{basename}_{i}_{j}.{type}")
mask_path = os.path.join(masks_folder, f"{basename}_{i}_{j}.{type}")
# Store chip window for generating GeoJSON later
chip = (win_shape, (c, i, j))
chips.append(chip)
if dry_run:
continue
if (
skip_existing
and os.path.exists(img_path)
and (not labels or os.path.exists(mask_path))
):
continue
# Extract chip image from original image
img = ds.read(window=window)
img = np.nan_to_num(img)
img = np.array([img[b - 1, :, :] for b in bands])
# Rescale intensity (if needed)
if rescale_mode:
img = rescale_intensity(img, rescale_mode, rescale_range)
# Write chip image
if type == "tif":
image_was_saved = write_tif(
img,
img_path,
window=window,
meta=meta.copy(),
transform=ds.transform,
bands=bands,
)
else:
image_was_saved = write_image(img, img_path)
# If there are labels, and chip was extracted succesfully, generate a mask
if image_was_saved and labels:
if mask_type == "class":
keys = classes if classes is not None else polys_dict.keys()
multiband_chip_mask_by_classes(
classes=keys,
transform=ds.transform,
window=window,
window_shape=win_shape,
polys_dict=polys_dict,
metadata=meta,
mask_path=mask_path,
label_property=label_property,
)
if write_geojson:
geojson_path = os.path.join(output_dir, "{}.geojson".format(basename))
write_chips_geojson(
geojson_path, chips, type=type, crs=str(meta["crs"]), basename=basename
)
def write_image(img, path, percentiles=None):
rgb = np.dstack(img[:3, :, :]).astype(np.uint8)
if exposure.is_low_contrast(rgb):
return False
os.makedirs(os.path.dirname(path), exist_ok=True)
if not os.path.exists(path):
imsave(path, rgb)
return True
def write_tif(img, path, *, window, meta, transform, bands):
if exposure.is_low_contrast(img):
return False
os.makedirs(os.path.dirname(path), exist_ok=True)
meta.update(
{
"driver": "GTiff",
"dtype": img.dtype,
"height": window.height,
"width": window.width,
"transform": rasterio.windows.transform(window, transform),
"count": len(bands),
}
)
img = np.array([img[b - 1, :, :] for b in bands])
with rasterio.open(path, "w", **meta) as dst:
dst.write(img)
return True
|
the-stack_106_17943
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from google.protobuf.json_format import MessageToJson, Parse
from dc.generated import dc_pb2
from dc.core.misc import logger
from dc.core.State import State
from dc.core.txs.TokenTransaction import TokenTransaction
from dc.core.txs.TransferTokenTransaction import TransferTokenTransaction
class TokenMetadata(object):
def __init__(self, protobuf_data=None):
self._data = protobuf_data
if protobuf_data is None:
self._data = dc_pb2.TokenMetadata()
@property
def pbdata(self):
"""
Returns a protobuf object that contains persistable data representing this object
:return: A protobuf TokenMetadata object
:rtype: dc_pb2.TokenMetadata
"""
return self._data
@property
def token_txhash(self):
return self._data.token_txhash
@property
def transfer_token_tx_hashes(self):
return self._data.transfer_token_tx_hashes
@staticmethod
def create(token_txhash: bytes, transfer_token_txhashes: list):
token_metadata = TokenMetadata()
token_metadata._data.token_txhash = token_txhash
token_metadata.update(transfer_token_txhashes)
return token_metadata
def update(self, transfer_token_txhashes: list):
for transfer_token_txhash in transfer_token_txhashes:
self._data.transfer_token_tx_hashes.extend([transfer_token_txhash])
def remove(self, transfer_token_txhash: bytes):
i = 0
while i < len(self._data.transfer_token_tx_hashes):
if self._data.transfer_token_tx_hashes[i] == transfer_token_txhash:
del self._data.transfer_token_tx_hashes[i]
return
i += 1
def to_json(self):
return MessageToJson(self._data, sort_keys=True)
@staticmethod
def from_json(json_data):
pbdata = dc_pb2.TokenMetadata()
Parse(json_data, pbdata)
return TokenMetadata(pbdata)
def serialize(self) -> str:
return self._data.SerializeToString()
@staticmethod
def deserialize(data):
pbdata = dc_pb2.TokenMetadata()
pbdata.ParseFromString(bytes(data))
return TokenMetadata(pbdata)
@staticmethod
def get_token_metadata(state: State, token_txhash: bytes):
try:
data = state._db.get_raw(b'token_' + token_txhash)
return TokenMetadata.deserialize(data)
except KeyError:
pass
except Exception as e:
logger.error('[get_token_metadata] %s', e)
return None
@staticmethod
def update_token_metadata(state: State, transfer_token: TransferTokenTransaction, batch):
token_metadata = TokenMetadata.get_token_metadata(state, transfer_token.token_txhash)
token_metadata.update([transfer_token.txhash])
state._db.put_raw(b'token_' + transfer_token.token_txhash,
token_metadata.serialize(),
batch)
@staticmethod
def create_token_metadata(state: State, token: TokenTransaction, batch):
token_metadata = TokenMetadata.create(token_txhash=token.txhash, transfer_token_txhashes=[token.txhash])
state._db.put_raw(b'token_' + token.txhash,
token_metadata.serialize(),
batch)
@staticmethod
def remove_transfer_token_metadata(state: State, transfer_token: TransferTokenTransaction, batch):
token_metadata = TokenMetadata.get_token_metadata(state, transfer_token.token_txhash)
token_metadata.remove(transfer_token.txhash)
state._db.put_raw(b'token_' + transfer_token.token_txhash,
token_metadata.serialize(),
batch)
@staticmethod
def remove_token_metadata(state: State, token: TokenTransaction, batch):
state._db.delete(b'token_' + token.txhash, batch)
|
the-stack_106_17944
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import os.path as osp
import shutil
import requests
import hashlib
import tarfile
import zipfile
import time
from collections import OrderedDict
try:
from tqdm import tqdm
except:
class tqdm(object):
def __init__(self, total=None):
self.total = total
self.n = 0
def update(self, n):
self.n += n
if self.total is None:
sys.stderr.write("\r{0:.1f} bytes".format(self.n))
else:
sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(
self.total)))
sys.stderr.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr.write('\n')
from .log import logger
__all__ = ['get_weights_path_from_url']
WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/hapi/weights")
DOWNLOAD_RETRY_LIMIT = 3
nlp_models = OrderedDict((
('RoBERTa-zh-base',
'https://bert-models.bj.bcebos.com/chinese_roberta_wwm_ext_L-12_H-768_A-12.tar.gz'
),
('RoBERTa-zh-large',
'https://bert-models.bj.bcebos.com/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16.tar.gz'
),
('ERNIE-v2-en-base',
'https://ernie.bj.bcebos.com/ERNIE_Base_en_stable-2.0.0.tar.gz'),
('ERNIE-v2-en-large',
'https://ernie.bj.bcebos.com/ERNIE_Large_en_stable-2.0.0.tar.gz'),
('XLNet-cased-base',
'https://xlnet.bj.bcebos.com/xlnet_cased_L-12_H-768_A-12.tgz'),
('XLNet-cased-large',
'https://xlnet.bj.bcebos.com/xlnet_cased_L-24_H-1024_A-16.tgz'),
('ERNIE-v1-zh-base',
'https://baidu-nlp.bj.bcebos.com/ERNIE_stable-1.0.1.tar.gz'),
('ERNIE-v1-zh-base-max-len-512',
'https://ernie.bj.bcebos.com/ERNIE_1.0_max-len-512.tar.gz'),
('BERT-en-uncased-large-whole-word-masking',
'https://bert-models.bj.bcebos.com/wwm_uncased_L-24_H-1024_A-16.tar.gz'),
('BERT-en-cased-large-whole-word-masking',
'https://bert-models.bj.bcebos.com/wwm_cased_L-24_H-1024_A-16.tar.gz'),
('BERT-en-uncased-base',
'https://bert-models.bj.bcebos.com/uncased_L-12_H-768_A-12.tar.gz'),
('BERT-en-uncased-large',
'https://bert-models.bj.bcebos.com/uncased_L-24_H-1024_A-16.tar.gz'),
('BERT-en-cased-base',
'https://bert-models.bj.bcebos.com/cased_L-12_H-768_A-12.tar.gz'),
('BERT-en-cased-large',
'https://bert-models.bj.bcebos.com/cased_L-24_H-1024_A-16.tar.gz'),
('BERT-multilingual-uncased-base',
'https://bert-models.bj.bcebos.com/multilingual_L-12_H-768_A-12.tar.gz'),
('BERT-multilingual-cased-base',
'https://bert-models.bj.bcebos.com/multi_cased_L-12_H-768_A-12.tar.gz'),
('BERT-zh-base',
'https://bert-models.bj.bcebos.com/chinese_L-12_H-768_A-12.tar.gz'), ))
def is_url(path):
"""
Whether path is URL.
Args:
path (string): URL string or not.
"""
return path.startswith('http://') or path.startswith('https://')
def get_weights_path_from_url(url, md5sum=None):
"""Get weights path from WEIGHT_HOME, if not exists,
download it from url.
Args:
url (str): download url
md5sum (str): md5 sum of download package
Returns:
str: a local path to save downloaded weights.
Examples:
.. code-block:: python
from paddle.utils.download import get_weights_path_from_url
resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams'
local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url)
"""
path = get_path_from_url(url, WEIGHTS_HOME, md5sum)
return path
def _map_path(url, root_dir):
# parse path after download under root_dir
fname = osp.split(url)[-1]
fpath = fname
return osp.join(root_dir, fpath)
def get_path_from_url(url, root_dir, md5sum=None, check_exist=True):
""" Download from given url to root_dir.
if file or directory specified by url is exists under
root_dir, return the path directly, otherwise download
from url and decompress it, return the path.
Args:
url (str): download url
root_dir (str): root dir for downloading, it should be
WEIGHTS_HOME or DATASET_HOME
md5sum (str): md5 sum of download package
Returns:
str: a local path to save downloaded models & weights & datasets.
"""
from paddle.fluid.dygraph.parallel import ParallelEnv
assert is_url(url), "downloading from {} not a url".format(url)
# parse path after download to decompress under root_dir
fullpath = _map_path(url, root_dir)
if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum):
logger.info("Found {}".format(fullpath))
else:
if ParallelEnv().local_rank % 8 == 0:
fullpath = _download(url, root_dir, md5sum)
else:
while not os.path.exists(fullpath):
time.sleep(1)
if ParallelEnv().local_rank % 8 == 0:
if tarfile.is_tarfile(fullpath) or zipfile.is_zipfile(fullpath):
fullpath = _decompress(fullpath)
return fullpath
def _download(url, path, md5sum=None):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
"""
if not osp.exists(path):
os.makedirs(path)
fname = osp.split(url)[-1]
fullname = osp.join(path, fname)
retry_cnt = 0
while not (osp.exists(fullname) and _md5check(fullname, md5sum)):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
raise RuntimeError("Download from {} failed. "
"Retry limit reached".format(url))
logger.info("Downloading {} from {}".format(fname, url))
req = requests.get(url, stream=True)
if req.status_code != 200:
raise RuntimeError("Downloading from {} failed with code "
"{}!".format(url, req.status_code))
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
with tqdm(total=(int(total_size) + 1023) // 1024) as pbar:
for chunk in req.iter_content(chunk_size=1024):
f.write(chunk)
pbar.update(1)
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
return fullname
def _md5check(fullname, md5sum=None):
if md5sum is None:
return True
logger.info("File {} md5 checking...".format(fullname))
md5 = hashlib.md5()
with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
calc_md5sum = md5.hexdigest()
if calc_md5sum != md5sum:
logger.info("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum))
return False
return True
def _decompress(fname):
"""
Decompress for zip and tar file
"""
logger.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
if tarfile.is_tarfile(fname):
uncompressed_path = _uncompress_file_tar(fname)
elif zipfile.is_zipfile(fname):
uncompressed_path = _uncompress_file_zip(fname)
else:
raise TypeError("Unsupport compress file type {}".format(fname))
return uncompressed_path
def _uncompress_file_zip(filepath):
files = zipfile.ZipFile(filepath, 'r')
file_list = files.namelist()
file_dir = os.path.dirname(filepath)
if _is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
for item in file_list:
files.extract(item, file_dir)
elif _is_a_single_dir(file_list):
rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
for item in file_list:
files.extract(item, file_dir)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
if not os.path.exists(uncompressed_path):
os.makedirs(uncompressed_path)
for item in file_list:
files.extract(item, os.path.join(file_dir, rootpath))
files.close()
return uncompressed_path
def _uncompress_file_tar(filepath, mode="r:*"):
files = tarfile.open(filepath, mode)
file_list = files.getnames()
file_dir = os.path.dirname(filepath)
if _is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
for item in file_list:
files.extract(item, file_dir)
elif _is_a_single_dir(file_list):
rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
for item in file_list:
files.extract(item, file_dir)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
if not os.path.exists(uncompressed_path):
os.makedirs(uncompressed_path)
for item in file_list:
files.extract(item, os.path.join(file_dir, rootpath))
files.close()
return uncompressed_path
def _is_a_single_file(file_list):
if len(file_list) == 1 and file_list[0].find(os.sep) < -1:
return True
return False
def _is_a_single_dir(file_list):
new_file_list = []
for file_path in file_list:
if '/' in file_path:
file_path = file_path.replace('/', os.sep)
elif '\\' in file_path:
file_path = file_path.replace('\\', os.sep)
new_file_list.append(file_path)
file_name = new_file_list[0].split(os.sep)[0]
for i in range(1, len(new_file_list)):
if file_name != new_file_list[i].split(os.sep)[0]:
return False
return True
|
the-stack_106_17946
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import sys
import time
from typing import Any, Callable, Dict, List
def timeit(
num_iters: int = -1, warmup_iters: int = 0
) -> Callable[[], Callable[[], Dict[str, float]]]:
"""
This is intened to be used as a decorator to time any function.
Args:
num_iters (int): number of iterations used to compute the average time
(sec) required to run the function. If negative, the number of
iterations is determined dynamically by running the function a few
times to make sure the estimate is stable.
warmup_iters (int): number of iterations used to warm up the function.
This is useful for functions that exhibit poor performance during
the first few times they run (due to caches, autotuning, etc).
Returns:
Dict[str, float]: dictionary of the aggregated timing estimates.
"iterations": number of iterations used to compute the estimated
time.
"mean": averate time (sec) used to run the function.
"median": median time (sec) used to run the function.
"min": minimal time (sec) used to run the function.
"max": maximal time (sec) used to run the function.
"stddev": standard deviation of the time (sec) used to run the
function.
"""
# pyre-ignore
def decorator(func: Callable[[], Any]) -> Callable[[], Dict[str, float]]:
def decorated(*args: Any, **kwargs: Any) -> Dict[str, float]:
# Warmup phase.
for _ in range(warmup_iters):
func(*args, **kwargs)
# Estimate the run time of the function.
total_time: float = 0
count = 0
run_times: List[float] = []
max_num_iters = num_iters if num_iters > 0 else sys.maxsize
for _ in range(max_num_iters):
start_time = time.time()
func(*args, **kwargs)
run_time = time.time() - start_time
run_times.append(run_time)
total_time += run_time
count += 1
if num_iters < 0 and total_time >= 0.5:
# If num_iters is negative, run the function enough times so
# that we can have a more robust estimate of the average time.
break
assert count == len(run_times)
ret: Dict[str, float] = {}
ret["iterations"] = count
ret["mean"] = total_time / count
ret["median"] = np.median(run_times)
ret["min"] = np.min(run_times)
ret["max"] = np.max(run_times)
ret["stddev"] = np.std(run_times)
return ret
return decorated
return decorator # pyre-ignore
def benchmark(
func: Callable[[], Any], # pyre-ignore
bm_name: str,
kwargs_list: List[Any], # pyre-ignore
*,
num_iters: int = -1,
warmup_iters: int = 0
) -> None:
"""
Benchmark the input function and print out the results.
Args:
func (callable): a closure that returns a function for benchmarking,
where initialization can be done before the function to benchmark.
bm_name (str): name of the benchmark to print out, e.g. "BM_UPDATE".
kwargs_list (list): a list of argument dict to pass to the function. The
intput function will be timed separately for each argument dict.
num_iters (int): number of iterations to run. Defaults to run until 0.5s.
warmup_iters (int): number of iterations used to warm up the function.
Outputs:
For each argument dict, print out the time (in microseconds) required
to run the function along with the number of iterations used to get
the timing estimate. Example output:
Benchmark Avg Time(μs) Peak Time(μs) Iterations
-------------------------------------------------------------------
BM_UPDATE_100 820 914 610
BM_UPDATE_1000 7655 8709 66
BM_UPDATE_10000 78062 81748 7
-------------------------------------------------------------------
"""
print("")
outputs = []
for kwargs in kwargs_list:
func_bm = func(**kwargs)
# pyre-ignore
time_func = timeit(num_iters=num_iters, warmup_iters=warmup_iters)(
func_bm
)
ret = time_func()
name = bm_name
if kwargs:
name += "_" + "_".join(str(v) for k, v in kwargs.items())
outputs.append(
[
name,
str(ret["mean"] * 1000000),
str(ret["max"] * 1000000),
str(ret["iterations"]),
]
)
outputs = np.array(outputs)
# Calculate column widths for metrics table.
c1 = len(max(outputs[:, 0], key=len))
c2 = len(max(outputs[:, 1], key=len))
c3 = len(max(outputs[:, 2], key=len))
c4 = len(max(outputs[:, 3], key=len))
dash = "-" * 80
print(
"{:{}s} {:>{}s} {:>{}s} {:>{}s}".format(
"Benchmark",
c1,
"Avg Time(μs)",
c2,
"Peak Time(μs)",
c3,
"Iterations",
c4,
)
)
print(dash)
for output in outputs:
print(
"{:{}s} {:15.0f} {:15.0f} {:14d}".format(
output[0],
c1,
float(output[1]),
float(output[2]),
int(output[3]),
)
)
print(dash)
|
the-stack_106_17947
|
# -*- coding: utf-8 -*-
# This file is part of hoa-utils.
#
# hoa-utils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# hoa-utils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hoa-utils. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains test utils."""
import os
from contextlib import contextmanager
from pathlib import Path
@contextmanager
def cd(new_dir: Path):
"""
Change directory with a context manager.
:param new_dir: the new directory where to go.
:return: None
"""
old_dir = os.getcwd()
try:
os.chdir(str(new_dir))
yield
finally:
os.chdir(old_dir)
|
the-stack_106_17949
|
from django.urls import path
from .views import groups, not_joined, join_group, detail_group_user, detail_group_wordwall, create_post, list_posts, detail_post, comment_like, user_detail
app_name = 'share'
urlpatterns = [
path('groups', groups, name='groups'),
path('notjoined', not_joined, name='not_joined'),
path('join/<int:pk>', join_group, name='join_group'),
path('detail/users/<int:pk>', detail_group_user, name='detail_group_user'),
path('detail/wordwall/<int:pk>', detail_group_wordwall, name='detail_group_wordwall'),
path('detail/user/personal/<int:pk>', user_detail, name='user_detail'),
path('post/create/<int:pk>', create_post, name="create_post"),
path('post/listings/<int:pk>', list_posts, name="list_posts"),
path('post/detail/<int:pk>', detail_post, name="detail_post"),
path('post/comment/like/<int:cpk>/<int:ppk>', comment_like, name="comment_like"),
]
|
the-stack_106_17950
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import power_state
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt.baremetal import nodes
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
def read_domains(fname):
try:
f = open(fname, 'r')
json = f.read()
f.close()
domains = utils.loads(json)
return domains
except IOError:
raise exception.NotFound()
def write_domains(fname, domains):
json = utils.dumps(domains)
f = open(fname, 'w')
f.write(json)
f.close()
class BareMetalDom(object):
"""
BareMetalDom class handles fake domain for bare metal back ends.
This implements the singleton pattern.
"""
_instance = None
_is_init = False
def __new__(cls, *args, **kwargs):
"""
Returns the BareMetalDom singleton.
"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(BareMetalDom, cls).__new__(cls)
return cls._instance
def __init__(self,
fake_dom_file="/tftpboot/test_fake_dom_file"):
"""
Only call __init__ the first time object is instantiated.
Sets and Opens domain file: /tftpboot/test_fake_dom_file. Even though
nova-compute service is rebooted, this file should retain the
existing domains.
"""
if self._is_init:
return
self._is_init = True
self.fake_dom_file = fake_dom_file
self.domains = []
self.fake_dom_nums = 0
self.baremetal_nodes = nodes.get_baremetal_nodes()
self._read_domain_from_file()
def _read_domain_from_file(self):
"""
Reads the domains from a file.
"""
try:
self.domains = read_domains(self.fake_dom_file)
except IOError:
dom = []
LOG.debug(_("No domains exist."))
return
msg = _("============= initial domains =========== : %s")
LOG.debug(msg % (self.domains))
for dom in self.domains[:]:
if dom['status'] == power_state.BUILDING:
LOG.debug(_("Building domain: to be removed"))
self.destroy_domain(dom['name'])
continue
elif dom['status'] != power_state.RUNNING:
LOG.debug(_("Not running domain: remove"))
self.domains.remove(dom)
continue
res = self.baremetal_nodes.set_status(dom['node_id'],
dom['status'])
if res > 0:
self.fake_dom_nums = self.fake_dom_nums + 1
else:
LOG.debug(_("domain running on an unknown node: discarded"))
self.domains.remove(dom)
continue
LOG.debug(self.domains)
self.store_domain()
def reboot_domain(self, name):
"""
Finds domain and deactivates (power down) bare-metal node.
Activates the node again. In case of fail,
destroys the domain from domains list.
"""
fd = self.find_domain(name)
if fd == []:
msg = _("No such domain (%s)")
raise exception.NotFound(msg % name)
node_ip = self.baremetal_nodes.get_ip_by_id(fd['node_id'])
try:
self.baremetal_nodes.deactivate_node(fd['node_id'])
except Exception:
msg = _("Failed power down Bare-metal node %s")
raise exception.NotFound(msg % fd['node_id'])
self.change_domain_state(name, power_state.BUILDING)
try:
state = self.baremetal_nodes.activate_node(fd['node_id'],
node_ip, name, fd['mac_address'], fd['ip_address'])
self.change_domain_state(name, state)
return state
except Exception:
LOG.debug(_("deactivate -> activate fails"))
self.destroy_domain(name)
raise
def destroy_domain(self, name):
"""
Removes domain from domains list and deactivates node.
"""
fd = self.find_domain(name)
if fd == []:
LOG.debug(_("destroy_domain: no such domain"))
msg = _("No such domain %s")
raise exception.NotFound(msg % name)
try:
self.baremetal_nodes.deactivate_node(fd['node_id'])
self.domains.remove(fd)
msg = _("Domains: %s")
LOG.debug(msg % (self.domains))
msg = _("Nodes: %s")
LOG.debug(msg % (self.baremetal_nodes.nodes))
self.store_domain()
msg = _("After storing domains: %s")
LOG.debug(msg % (self.domains))
except Exception:
LOG.debug(_("deactivation/removing domain failed"))
raise
def create_domain(self, xml_dict, bpath):
"""
Adds a domain to domains list and activates an idle bare-metal node.
"""
LOG.debug(_("===== Domain is being created ====="))
fd = self.find_domain(xml_dict['name'])
if fd != []:
msg = _("Same domain name already exists")
raise exception.NotFound(msg)
LOG.debug(_("create_domain: before get_idle_node"))
node_id = self.baremetal_nodes.get_idle_node()
node_ip = self.baremetal_nodes.get_ip_by_id(node_id)
new_dom = {'node_id': node_id,
'name': xml_dict['name'],
'memory_kb': xml_dict['memory_kb'],
'vcpus': xml_dict['vcpus'],
'mac_address': xml_dict['mac_address'],
'user_data': xml_dict['user_data'],
'ip_address': xml_dict['ip_address'],
'image_id': xml_dict['image_id'],
'kernel_id': xml_dict['kernel_id'],
'ramdisk_id': xml_dict['ramdisk_id'],
'status': power_state.BUILDING}
self.domains.append(new_dom)
msg = _("Created new domain: %s")
LOG.debug(msg % (new_dom))
self.change_domain_state(new_dom['name'], power_state.BUILDING)
self.baremetal_nodes.set_image(bpath, node_id)
state = power_state.NOSTATE
try:
state = self.baremetal_nodes.activate_node(node_id,
node_ip, new_dom['name'], new_dom['mac_address'],
new_dom['ip_address'], new_dom['user_data'])
self.change_domain_state(new_dom['name'], state)
except Exception:
self.domains.remove(new_dom)
self.baremetal_nodes.free_node(node_id)
LOG.debug(_("Failed to boot Bare-metal node %s"), node_id)
return state
def change_domain_state(self, name, state):
"""
Changes domain state by the given state and updates domain file.
"""
l = self.find_domain(name)
if l == []:
msg = _("No such domain exists")
raise exception.NotFound(msg)
i = self.domains.index(l)
self.domains[i]['status'] = state
LOG.debug(_("change_domain_state: to new state %s"), str(state))
self.store_domain()
def store_domain(self):
"""
Stores fake domains to the file.
"""
msg = _("Stored fake domains to the file: %s")
LOG.debug(msg % (self.domains))
write_domains(self.fake_dom_file, self.domains)
def find_domain(self, name):
"""
Finds domain by the given name and returns the domain.
"""
for item in self.domains:
if item['name'] == name:
return item
LOG.debug(_("domain does not exist"))
return []
def list_domains(self):
"""
Returns the instance name from domains list.
"""
if self.domains == []:
return []
return [x['name'] for x in self.domains]
def get_domain_info(self, instance_name):
"""
Finds domain by the given instance_name and returns informaiton.
For example, status, memory_kb, vcpus, etc.
"""
domain = self.find_domain(instance_name)
if domain != []:
return [domain['status'], domain['memory_kb'],
domain['memory_kb'],
domain['vcpus'],
100]
else:
return [power_state.NOSTATE, '', '', '', '']
|
the-stack_106_17951
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Guenter Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_data(k):
k.dte.set_prefixes([])
def answer_greeting_att(c):
def action_attention_on(c):
c.kernal.mem_set(c.realm, 'action', XSBFunctor('attention', [XSBAtom('on')]))
if c.lang == 'en':
c.resp(u"Hello!", action=action_attention_on)
c.resp(u"Hi!", action=action_attention_on)
c.resp(u"Greetings!", action=action_attention_on)
c.resp(u"Hey!", action=action_attention_on)
elif c.lang == 'de':
c.resp(u"Hallo!", action=action_attention_on)
c.resp(u"Hi!", action=action_attention_on)
c.resp(u"Grüß Dich!", action=action_attention_on)
c.resp(u"Hey!", action=action_attention_on)
else:
raise Exception ('sorry, language %s not implemented yet.' % lang)
k.dte.dt('en', u"ok, {my_forename:W}", answer_greeting_att)
k.dte.dt('de', u"ok, {my_forename:W}", answer_greeting_att)
def check_att_on(c):
action = c.kernal.mem_get(c.realm, 'action')
assert action.name == 'attention'
assert action.args[0].name == 'on'
k.dte.ts('en', 't0010', [(u"ok, computer", u"hello!", check_att_on)])
k.dte.ts('de', 't0011', [(u"OK, HAL!", u"Hallo!", check_att_on)])
def answer_greeting(c):
if c.lang == 'en':
c.resp("Hello!")
c.resp("Hi!")
c.resp("Greetings!")
c.resp("Hey!")
elif c.lang == 'de':
c.resp("Hallo!")
c.resp("Hi!")
c.resp("Grüß Dich!")
c.resp("Hey!")
else:
raise Exception ('sorry, language %s not implemented yet.' % lang)
k.dte.dt('en', u"(greetings| good morning | hello | hallo | hi | good day | morning | good evening | good night | Cooee| Cooey | hi there) {self_address:W}",
answer_greeting)
k.dte.dt('en', u"{self_address:W} (greetings| good morning | hello | hallo | hi | good day | morning | good evening | good night | Cooee| Cooey | hi there)",
answer_greeting)
k.dte.dt('de', u"(grüß dich|guten morgen | hallo | hi | guten tag | tag | morgen | guten abend | gute nacht | huhu) {self_address:W}",
answer_greeting)
k.dte.dt('de', u"{self_address:W} (grüß dich|guten morgen | hallo | hi | guten tag | tag | morgen | guten abend | gute nacht | huhu)",
answer_greeting)
k.dte.dt('en', [u"day",
u"g'day",
u"here i am",
u"hey you",
u"hey",
u"tach"],
answer_greeting)
k.dte.dt('de', [u"tag",
u"tach auch",
u"da bin ich wieder",
u"hey du",
u"hey",
u"tach"],
answer_greeting)
def answer_bye(c):
def action_attention_off(c):
c.kernal.mem_set(c.realm, 'action', XSBFunctor('attention', [XSBAtom('off')]))
if c.lang == 'en':
c.resp(u"Bye", action=action_attention_off)
c.resp(u"So long", action=action_attention_off)
c.resp(u"See you later", action=action_attention_off)
c.resp(u"Bye for now", action=action_attention_off)
elif c.lang == 'de':
c.resp(u"Ade", action=action_attention_off)
c.resp(u"Tschüss", action=action_attention_off)
c.resp(u"Bis bald", action=action_attention_off)
c.resp(u"Ciao", action=action_attention_off)
else:
raise Exception ('sorry, language %s not implemented yet.' % lang)
k.dte.dt('en', u"(goodbye | bye | ciao | so long | bye for now | see ya | see you later | till next time) {self_address:W}",
answer_bye)
k.dte.dt('en', u"{self_address:W} (goodbye | bye | ciao | so long | bye for now | see ya | see you later | till next time)",
answer_bye)
k.dte.dt('de', u"(auf wiedersehen | tschüss | ciao | ade | bye | cu | bis bald | bis zum nächsten mal|schluss) {self_address:W}",
answer_bye)
k.dte.dt('de', u"{self_address:W} (auf wiedersehen | tschüss | ciao | ade | bye | cu | bis bald | bis zum nächsten mal|schluss)",
answer_bye)
k.dte.dt('en', [u"cu later",
u"i am going to sleep now",
u"i go to bed",
u"i have to go now",
u"i have to go",
u"i will leave you now",
u"i'll stop now",
u"i'll turn you off now",
u"i'm going",
u"i'm leaving again now",
u"i'm leaving now",
u"sleep well",
u"take care",
u"that's enough",
u"until next time",
u"we are done"],
answer_bye)
k.dte.dt('de', [u"cu later",
u"ich gehe jetzt schlafen",
u"ich gehe ins bett",
u"ich muss jetzt gehen",
u"ich muss gehen",
u"ich werde dich jetzt verlassen",
u"ich höre jetzt auf",
u"ich mach dich jetzt aus",
u"ich geh jetzt",
u"ich gehe jetzt wieder",
u"ich gehe jetzt",
u"schlaf gut",
u"machs gut",
u"das reicht",
u"bis zum nächsten mal",
u"sind wir fertig"],
answer_bye)
k.dte.ts('en', 't0000', [(u"hi", u"hello!")])
k.dte.ts('de', 't0001', [(u"hi", u"Hallo!")])
k.dte.ts('en', 't0002', [(u"computer hello", u"Hi!")])
k.dte.ts('de', 't0003', [(u"computer hallo", u"Hi!")])
def check_att_off(c):
action = c.kernal.mem_get(c.realm, 'action')
assert action.name == 'attention'
assert action.args[0].name == 'off'
k.dte.ts('en', 't0004', [(u"bye computer", u"bye", check_att_off)])
k.dte.ts('de', 't0005', [(u"Tschüss computer", u"Tschüss!", check_att_off)])
k.dte.ts('en', 't0006', [(u"bye", u"so long", check_att_off)])
k.dte.ts('de', 't0007', [(u"Ciao", u"Bis bald", check_att_off)])
k.dte.dt('en', u"(ah|) there you are!", u"Hi there!")
k.dte.dt('de', u"(ah|) da bist du (ja|)", u"Hallo hallo")
k.dte.dt('en', [u"but i have no time",
u"i'm a bit tired",
u"i'm out of time",
u"i'm tired",
u"leave me alone"],
[u"Shall we call it a day?",
u"Ok, another time maybe?"])
k.dte.dt('de', [u"ich habe aber keine zeit",
u"ich bin ein bischen müde",
u"ich habe keine zeit mehr",
u"ich bin müde",
u"lass mich in ruhe"],
[u"Wollen wir für heute Schluss machen?",
u"OK, vielleicht ein andermal?"])
|
the-stack_106_17953
|
from __future__ import print_function
import mxnet as mx
import logging
import os
import time
def _get_lr_scheduler(args, adv=False):
lr = args.adv_lr if adv else args.lr
lr_factor = args.adv_lr_factor if adv and args.adv_lr_factor else args.lr_factor
lr_step_epochs = args.adv_lr_step_epochs if adv and args.adv_lr_step_epochs else args.lr_step_epochs
logging.info('[%slr] init-lr=%f lr_factor=%f lr_steps=%s', 'adv-' if adv else '', lr, lr_factor, lr_step_epochs)
if lr_factor >= 1:
return (lr, None)
step_epochs = [int(l) for l in lr_step_epochs.split(',')] # e.g., [20, 40, 60]
step_lr = [lr * (lr_factor ** (n + 1)) for n in range(len(step_epochs))]
def _get_lr(epoch):
if not step_epochs or epoch < step_epochs[0]:
return lr
if epoch >= step_epochs[-1]:
return step_lr[-1]
for k in range(len(step_epochs) - 1):
if epoch >= step_epochs[k] and epoch < step_epochs[k + 1]:
return step_lr[k]
return (lr, _get_lr)
def _load_model(args):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
softmaxD = mx.sym.load('%s-symbol-softmax.json' % model_prefix)
symAdv = None
param_file = '%s-%04d.params' % (model_prefix, args.load_epoch)
adv_param_file = '%s-adv-%04d.params' % (model_prefix, args.load_epoch)
logging.info('Load model from %s and %s', param_file, adv_param_file)
return (softmaxD, symAdv, param_file, adv_param_file)
def _save_model(args, epoch, netD, netAdv, symD, symAdv, softmax=None):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
model_prefix = args.model_prefix
if softmax is not None:
softmax.save('%s-symbol-softmax.json' % model_prefix)
param_name = '%s-%04d.params' % (model_prefix, epoch)
netD.export(model_prefix)
os.rename('%s-0000.params' % model_prefix, param_name)
logging.info('Saving model parameter to %s' % param_name)
adv_param_name = '%s-adv-%04d.params' % (model_prefix, epoch)
netAdv.save_parameters(adv_param_name)
logging.info('Saving adversarial net parameter to %s' % adv_param_name)
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--gpus', type=str, default='0',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--gpus-work-load', type=str, default=None,
help='list of gpus workload')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=500,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--beta1', type=float, default=0.9,
help='beta1 for adam')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', action='store_true', default=False,
help='test reading speed without training')
train.add_argument('--predict', action='store_true', default=False,
help='run prediction instead of training')
train.add_argument('--predict-all', action='store_true', default=False,
help='run all predictions')
train.add_argument('--predict-output', type=str,
help='predict output')
train.add_argument('--predict-epochs', type=str,
help='epochs to run predictions, e.g., 30,50')
train.add_argument('--adv-lambda', type=float, default=10.,
help='weight of adversarial loss')
train.add_argument('--adv-qcd-start-label', type=int, default=12,
help='qcd start label')
train.add_argument('--adv-train-freq', type=int, default=1,
help='adv training frequency w.r.t classifier')
train.add_argument('--adv-lr', type=float, default=0.001, # lr=0.001 seems good
help='adv lr')
train.add_argument('--adv-lr-factor', type=float, default=None,
help='the ratio to reduce lr on each step for adv')
train.add_argument('--adv-lr-step-epochs', type=str, default=None,
help='the epochs to reduce the adv-lr, e.g. 30,60')
train.add_argument('--adv-mass-min', type=float, default=0.,
help='min fatjet mass')
train.add_argument('--adv-mass-max', type=float, default=250.,
help='max fatjet mass')
train.add_argument('--adv-mass-nbins', type=int, default=25,
help='nbins for fatjet mass')
return train
def fit(args, symbol, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
ndevs = len(devs)
# logging
head = '%(asctime)-15s Node[0] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args)
if args.test_io:
for i_epoch in range(args.num_epochs):
train.reset()
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Epoch [%d]/Batch [%d]\tSpeed: %.2f samples/sec' % (
i_epoch, i, args.disp_batches * args.batch_size / (time.time() - tic)))
tic = time.time()
return
logging.info('Data shape:\n' + str(train.provide_data))
logging.info('Label shape:\n' + str(train.provide_label))
# load model
netD, netAdv, symD, symAdv, symSoftmax = symbol.get_net(train._data_format.num_classes, use_softmax=True, **vars(args))
# load existing model
_softmaxD, _symAdv, _param_file, _adv_param_file = _load_model(args)
if _softmaxD is not None:
assert symSoftmax.tojson() == _softmaxD.tojson()
# assert symAdv.tojson() == _symAdv.tojson()
try:
netD.load_parameters(_param_file, ctx=devs) # works with block.save_parameters()
except AssertionError:
netD.collect_params().load(_param_file, ctx=devs) # work with block.export()
netAdv.load_parameters(_adv_param_file, ctx=devs)
else:
# init
netD.collect_params().initialize(mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2), ctx=devs)
netAdv.collect_params().initialize(mx.init.Normal(0.02), ctx=devs)
logging.debug('-' * 50)
logging.debug(netD.collect_params())
logging.debug('-' * 50)
logging.debug(netAdv.collect_params())
# loss
lossD, lossAdv = symbol.get_loss(**vars(args))
# trainer
# learning rate
lr, lr_getter = _get_lr_scheduler(args)
optimizer_params = {'learning_rate': lr}
if args.optimizer == 'adam':
optimizer_params['beta1'] = args.beta1
elif args.optimizer == 'sgd':
optimizer_params['momentum'] = args.mom
optimizer_params['wd'] = args.wd
trainerD = mx.gluon.Trainer(netD.collect_params(), args.optimizer, optimizer_params)
# adv. trainer
lr_adv, lr_getter_adv = _get_lr_scheduler(args, adv=True)
optimizer_params_adv = {'learning_rate': lr_adv}
if args.optimizer == 'adam':
optimizer_params_adv['beta1'] = args.beta1
elif args.optimizer == 'sgd':
optimizer_params_adv['momentum'] = args.mom
optimizer_params_adv['wd'] = args.wd
trainerAdv = mx.gluon.Trainer(netAdv.collect_params(), args.optimizer, optimizer_params_adv)
# evaluation metric
eval_metrics = ['accuracy', 'ce']
if args.top_k > 0:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=args.top_k))
if not isinstance(eval_metrics, mx.metric.EvalMetric):
eval_metric = mx.metric.create(eval_metrics)
eval_metric_adv = mx.metric.create(['accuracy', 'ce'])
# callbacks that run after each batch
batch_end_callback = [mx.callback.Speedometer(args.batch_size, args.disp_batches, auto_reset=True)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callback += cbs if isinstance(cbs, list) else [cbs]
eval_batch_end_callback = [mx.callback.Speedometer(args.batch_size, args.disp_batches * 10, False)]
# save model
save_model = False if args.dryrun or args.model_prefix is None else True
# extra label var
mass_label_name = 'label_%s' % train._data_format.extra_label_vars[0]
################################################################################
# training loop
################################################################################
train_data, eval_data = train, val
for epoch in range(args.num_epochs):
if args.load_epoch is not None and epoch <= args.load_epoch:
continue
if lr_getter:
trainerD.set_learning_rate(lr_getter(epoch))
if lr_getter_adv:
trainerAdv.set_learning_rate(lr_getter_adv(epoch))
logging.info('Epoch[%d] lrD=%g, lrAdv=%g', epoch, trainerD.learning_rate, trainerAdv.learning_rate)
tic = time.time()
eval_metric.reset()
eval_metric_adv.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
# prepare data
_data = [mx.gluon.utils.split_and_load(data_batch.data[idx], devs) for idx, meta in enumerate(train.provide_data)]
data = [[_data[idx][idev] for idx in range(len(train.provide_data))] for idev in range(ndevs)]
_labels = {meta[0]:data_batch.label[idx] for idx, meta in enumerate(train.provide_label)}
label = mx.gluon.utils.split_and_load(_labels['softmax_label'], devs)
_nuis = mx.gluon.utils.split_and_load(_labels[mass_label_name], devs)
nuis = [mx.nd.round(mx.nd.clip((_n - args.adv_mass_min) / (float(args.adv_mass_max - args.adv_mass_min) / args.adv_mass_nbins), 0, args.adv_mass_nbins - 1)) for _n in _nuis]
sample_weight = None
sample_weight_sum = data_batch.data[0].shape[0]
if args.adv_qcd_start_label is not None:
sample_weight = [mx.nd.cast(l >= args.adv_qcd_start_label, data_batch.data[0].dtype) for l in label]
sample_weight_sum = mx.nd.sum(_labels['softmax_label'] >= args.adv_qcd_start_label).asscalar()
# from the training of the classifier
errD = 0
errAdv = 0
err = 0
# from the training of the adversary
errMDN = 0
############################
# (1) first train the adversary
############################
with mx.autograd.record():
features = []
outD = []
for d in data:
_feature, _pred = netD(*d)
features.append(_feature)
outD.append(_pred)
outputR = [netAdv(_feature.detach()) for _feature in features]
lossesR = [lossAdv(outputR[idev], nuis[idev], sample_weight[idev]) for idev in range(ndevs)]
for l in lossesR:
l.backward()
errMDN += mx.nd.mean(l).asscalar()
trainerAdv.step(int(sample_weight_sum))
############################
############################
# (2) then update classifier
############################
if nbatch % args.adv_train_freq == 0:
with mx.autograd.record():
lossesD = [lossD(o, l) for o, l in zip(outD, label)]
outAdv = [netAdv(_feature) for _feature in features]
lossesAdv = [lossAdv(outAdv[idev], nuis[idev], sample_weight[idev]) for idev in range(ndevs)]
losses = [lD - args.adv_lambda * lAdv for lD, lAdv in zip(lossesD, lossesAdv)]
for l in losses:
l.backward()
for idev in range(ndevs):
errD += mx.nd.mean(lossesD[idev]).asscalar()
errAdv += mx.nd.mean(lossesAdv[idev]).asscalar()
err += mx.nd.mean(losses[idev]).asscalar()
trainerD.step(data_batch.data[0].shape[0])
############################
# pre fetch next batch
try:
next_data_batch = next(data_iter)
except StopIteration:
end_of_batch = True
for idev in range(ndevs):
eval_metric.update_dict({'softmax_label':label[idev]}, {'softmax_label':mx.nd.exp(outD[idev])})
eval_metric_adv.update_dict({mass_label_name:nuis[idev]}, {mass_label_name:mx.nd.exp(outputR[idev])})
if batch_end_callback is not None:
batch_end_params = mx.model.BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in mx.base._as_list(batch_end_callback):
callback(batch_end_params)
if nbatch > 1 and nbatch % args.disp_batches == 1:
logging.debug('errD=%f, errAdv=%f, err=%f' % (errD / ndevs, errAdv / ndevs, err / ndevs))
for name, val in eval_metric_adv.get_name_value():
logging.debug('MDN-%s=%f', name, val)
logging.debug('wgtAdv=%f, qcdSumWgt=%f', args.adv_lambda, sample_weight_sum)
nbatch += 1
# one epoch of training is finished
for name, val in eval_metric.get_name_value():
logging.info('Epoch[%d] Train-%s=%f', epoch, name, val)
# adversarial info
logging.info('Epoch[%d] Train-%s=%f', epoch, 'MDN loss', errMDN / ndevs)
logging.info('Epoch[%d] Train-%s=%f, wgtAdv=%f', epoch, 'sum loss', err / ndevs, args.adv_lambda)
# timing
toc = time.time()
logging.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
# epoch end callbacks, e.g., checkpoint
if save_model:
_save_model(args, epoch, netD, netAdv, symD, symAdv, symSoftmax)
#----------------------------------------
# evaluation on validation set
if eval_data:
eval_data.reset()
eval_metric.reset()
actual_num_batch = 0
num_batch = None
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
# prepare data
_data = [mx.gluon.utils.split_and_load(eval_batch.data[idx], devs) for idx, meta in enumerate(eval_data.provide_data)]
data = [[_data[idx][idev] for idx in range(len(eval_data.provide_data))] for idev in range(ndevs)]
_labels = {meta[0]:eval_batch.label[idx] for idx, meta in enumerate(eval_data.provide_label)}
label = mx.gluon.utils.split_and_load(_labels['softmax_label'], devs)
# forward
with mx.autograd.predict_mode():
predD = [netD(*d)[1] for d in data]
for idev in range(ndevs):
eval_metric.update_dict({'softmax_label':label[idev]}, {'softmax_label':mx.nd.exp(predD[idev])})
if eval_batch_end_callback is not None:
batch_end_params = mx.model.BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in mx.base._as_list(eval_batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
for name, val in eval_metric.get_name_value():
logging.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
##########################################################################################
def predict(args, symbol, data_loader, **kwargs):
"""
predict with a trained a model
args : argparse returns
data_loader : function that returns the train and val data iterators
"""
# logging
head = '%(asctime)-15s Node[0] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
data_iter = data_loader(args)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
if len(devs) == 1:
devs = devs[0]
# load model
netD, netAdv, symD, symAdv, symSoftmax = symbol.get_net(data_iter._data_format.num_classes, use_softmax=True, **vars(args))
def _predict(args):
# data iterators
data_iter = data_loader(args)
_softmaxD, _symAdv, _param_file, _adv_param_file = _load_model(args)
if _softmaxD is not None:
if symSoftmax.tojson() != _softmaxD.tojson():
print(symSoftmax.tojson())
print('-' * 50)
print(_softmaxD.tojson())
logging.warning('Inconsistent json!')
raise RuntimeError
try:
netD.load_parameters(_param_file, ctx=devs) # works with block.save_parameters()
except AssertionError:
netD.collect_params().load(_param_file, ctx=devs) # work with block.export()
# prediction loop
preds = []
for eval_batch in data_iter:
# prepare data
data = [eval_batch.data[idx].as_in_context(devs) for idx, meta in enumerate(data_iter.provide_data)]
# forward
with mx.autograd.predict_mode():
predD = netD(*data)[1]
probs = mx.nd.exp(predD)
preds.append(probs.asnumpy())
import numpy as np
preds = np.concatenate(preds)
truths = data_iter.get_truths()
observers = data_iter.get_observers()
print(preds.shape, truths.shape, observers.shape)
pred_output = {}
for i, label in enumerate(data_iter._data_format.class_labels):
pred_output['class_%s' % label] = truths[:, i]
pred_output['score_%s' % label] = preds[:, i]
for i, obs in enumerate(data_iter._data_format.obs_vars):
pred_output[obs] = observers[:, i]
import pandas as pd
df = pd.DataFrame(pred_output)
if args.predict_output:
logging.info('Write prediction file to %s' % args.predict_output)
outdir = os.path.dirname(args.predict_output)
if not os.path.exists(outdir):
os.makedirs(outdir)
# df.to_hdf(args.predict_output, 'Events', format='table')
from common.util import plotROC
plotROC(preds, truths, output=os.path.join(outdir, 'roc.pdf'))
from root_numpy import array2root
array2root(df.to_records(index=False), filename=args.predict_output.rsplit('.', 1)[0] + '.root', treename='Events', mode='RECREATE')
epochs = [args.load_epoch]
if args.predict_epochs:
epochs = [int(i) for i in args.predict_epochs.split(',')]
if args.predict_all:
import re
import glob
test_input = re.sub(r'\/JMAR.*\/.*\/', '/_INPUT_/', args.data_test)
pred_output = re.sub(r'\/JMAR.*\/.+h5', '/_OUTPUT_', args.predict_output)
for epoch in epochs:
args.load_epoch = epoch
for a in ['JMAR', 'JMAR_lowM']:
for b in ['Top', 'W', 'Z', 'Higgs', 'Hbb', 'Hcc', 'H4q', 'QCD', 'QCD_Flat']:
args.data_test = test_input.replace('_INPUT_', '%s/%s' % (a, b))
args.predict_output = pred_output.replace('_OUTPUT_', 'epoch%d/%s/mx-pred_%s.h5' % (epoch, a, b))
if len(glob.glob(args.data_test)) == 0:
logging.warning('No files found in %s, ignoring...', args.data_test)
continue
_predict(args)
else:
_predict(args)
|
the-stack_106_17954
|
from baseline.utils import exporter
from baseline.model import register_decoder, register_arc_policy, create_seq2seq_arc_policy
from baseline.tf.embeddings import *
from baseline.tf.seq2seq.encoders import TransformerEncoderOutput
from functools import partial
__all__ = []
export = exporter(__all__)
class ArcPolicy(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
encoder_outputs, hsz, beam_width = inputs
return self.forward(encoder_outputs, hsz, beam_width)
def forward(self, encoder_outputs, hsz, beam_width=1):
pass
class AbstractArcPolicy(ArcPolicy):
def get_state(self, encoder_outputs):
pass
def forward(self, encoder_output, hsz, beam_width=1):
h_i = self.get_state(encoder_output)
context = encoder_output.output
context = repeat_batch(context, beam_width)
# What does the multi-RNN look like in old TF again?
if type(h_i) is tuple:
h_i = repeat_batch(h_i[0], beam_width, dim=1), repeat_batch(h_i[1], beam_width, dim=1)
else:
h_i = repeat_batch(h_i, beam_width, dim=1)
batch_size = get_shape_as_list(context)[0]
init_zeros = tf.zeros((batch_size, hsz), dtype=context.dtype)
return h_i, init_zeros, context
@register_arc_policy(name='default')
class TransferLastHiddenPolicy(AbstractArcPolicy):
def get_state(self, encoder_outputs):
return encoder_outputs.hidden
@register_arc_policy(name='no_arc')
class NoArcPolicy(AbstractArcPolicy):
def _zero_state(self, final_encoder_state):
num_rnns = len(final_encoder_state)
batchsz = get_shape_as_list(final_encoder_state)[0]
zstate = []
for i, _ in enumerate(self.rnns):
zstate.append((np.zeros((batchsz, num_rnns), dtype=np.float32),
np.zeros((batchsz, num_rnns), dtype=np.float32)))
return zstate
def get_state(self, encoder_outputs):
final_encoder_state = encoder_outputs.hidden
return self._zero_state(final_encoder_state)
@register_decoder(name='vanilla')
class RNNDecoder(tf.keras.layers.Layer):
def __init__(self, tgt_embeddings, **kwargs):
"""Construct an RNN decoder. It provides the input size, the rest is up to the impl.
The default implementation provides an RNN cell, followed by a linear projection, out to a softmax
:param input_dim: The input size
:param kwargs:
:return: void
"""
super().__init__()
self.hsz = kwargs['hsz']
self.arc_policy = create_seq2seq_arc_policy(**kwargs)
self.tgt_embeddings = tgt_embeddings
rnntype = kwargs.get('rnntype', 'lstm')
layers = kwargs.get('layers', 1)
feed_input = kwargs.get('feed_input', True)
dsz = tgt_embeddings.get_dsz()
if feed_input:
self.input_i = self._feed_input
dsz += self.hsz
else:
self.input_i = self._basic_input
pdrop = kwargs.get('dropout', 0.5)
self.decoder_rnn = rnn_cell(dsz, self.hsz, rnntype, layers, pdrop)
self.dropout = tf.keras.layers.Dropout(pdrop)
self.init_attn(**kwargs)
do_weight_tying = bool(kwargs.get('tie_weights', True))
if do_weight_tying:
if self.hsz != self.tgt_embeddings.get_dsz():
raise ValueError("weight tying requires hsz == embedding dsz, got {} hsz and {} dsz".format(self.hsz, self.tgt_embedding.get_dsz()))
self.preds = WeightTieDense(self.tgt_embeddings)
else:
self.preds = tf.keras.layers.Dense(self.tgt_embeddings.get_vsz())
@staticmethod
def _basic_input(dst_embed_i, _):
"""
In this function the destination embedding is passed directly to into the decoder. The output of previous H
is ignored. This is implemented using a bound method to a field in the class for speed so that this decision
is handled at initialization, not as a conditional in the training or inference
:param embed_i: The embedding at i
:param _: Ignored
:return: basic input
"""
return dst_embed_i.squeeze(0)
@staticmethod
def _feed_input(embed_i, attn_output_i):
"""
In this function the destination embedding is concatenated with the previous attentional output and
passed to the decoder. This is implemented using a bound method to a field in the class for speed
so that this decision is handled at initialization, not as a conditional in the training or inference
:param embed_i: The embedding at i
:param output_i: This is the last H state
:return: an input that is a concatenation of previous state and destination embedding
"""
return tf.concat([embed_i, attn_output_i], 1)
def call(self, encoder_outputs, dst):
src_mask = encoder_outputs.src_mask
# TODO where to get beam size?
h_i, output_i, context_bth = self.arc_policy((encoder_outputs, self.hsz, 1))
output_bth, _ = self.decode_rnn(context_bth, h_i, output_i, dst, src_mask)
pred = self.output(output_bth)
return pred
def decode_rnn(self, context_bth, h_i, output_i, dst_bth, src_mask):
embed_out_bth = self.tgt_embeddings(dst_bth)
outputs = []
num_steps = get_shape_as_list(embed_out_bth)[1]
for i in range(num_steps):
embed_i = embed_out_bth[:, i, :]
# Input feeding would use previous attentional output in addition to destination embeddings
embed_i = self.input_i(embed_i, output_i)
output_i, h_i = self.decoder_rnn(embed_i, h_i)
output_i = self.attn(output_i, context_bth, src_mask)
output_i = self.dropout(output_i)
# Attentional outputs
outputs.append(output_i)
outputs_tbh = tf.stack(outputs, axis=1)
return outputs_tbh, h_i
def attn(self, output_t, context, src_mask=None):
return output_t
def init_attn(self, **kwargs):
pass
def output(self, x):
return self.preds(x)
class BeamSearch(BeamSearchBase):
def __init__(self, parent, **kwargs):
super().__init__(**kwargs)
self.parent = parent
def init(self, encoder_outputs):
"""Tile batches for encoder inputs and the likes."""
src_mask = repeat_batch(encoder_outputs.src_mask, self.K)
h_i, dec_out, context = self.parent.arc_policy((encoder_outputs, self.parent.hsz, self.K))
return h_i, dec_out, context, src_mask
def step(self, paths, extra):
"""Calculate the probs of the next output and update state."""
h_i, dec_out, context, src_mask = extra
# Our RNN decoder is now batch-first, so we need to expand the time dimension
last = tf.reshape(paths[:, :, -1], (-1, 1))
dec_out, h_i = self.parent.decode_rnn(context, h_i, dec_out, last, src_mask)
probs = self.parent.output(dec_out)
log_probs = tf.nn.log_softmax(probs, axis=-1)
# Collapse over time
dec_out = tf.squeeze(dec_out, 1)
return log_probs, (h_i, dec_out, context, src_mask)
def update(self, beams, extra):
"""Select the correct hidden states and outputs to used based on the best performing beams."""
h_i, dec_out, context, src_mask = extra
h_i = tuple(tf.gather(hc, beams, axis=1) for hc in h_i)
dec_out = tf.gather(dec_out, beams)
return h_i, dec_out, context, src_mask
def beam_search(self, encoder_outputs, **kwargs):
alpha = kwargs.get('alpha')
if alpha is not None:
kwargs['length_penalty'] = partial(gnmt_length_penalty, alpha=alpha)
return RNNDecoder.BeamSearch(self, **kwargs)(encoder_outputs)
@register_decoder(name='default')
class RNNDecoderWithAttn(RNNDecoder):
def __init__(self, tgt_embeddings, **kwargs):
super().__init__(tgt_embeddings, **kwargs)
def init_attn(self, **kwargs):
attn_type = kwargs.get('attn_type', 'bahdanau').lower()
if attn_type == 'dot':
self.attn_module = LuongDotProductAttention(self.hsz)
elif attn_type == 'concat' or attn_type == 'bahdanau':
self.attn_module = BahdanauAttention(self.hsz)
elif attn_type == 'sdp':
self.attn_module = ScaledDotProductAttention(self.hsz)
else:
self.attn_module = LuongGeneralAttention(self.hsz)
def attn(self, output_t, context, src_mask=None):
return self.attn_module((output_t, context, context, src_mask))
@register_decoder(name='transformer')
class TransformerDecoderWrapper(tf.keras.layers.Layer):
def __init__(self, tgt_embeddings, dropout=0.5, layers=1, hsz=None, num_heads=4, **kwargs):
super().__init__()
self.tgt_embeddings = tgt_embeddings
dsz = self.tgt_embeddings.get_dsz()
if hsz is None:
hsz = dsz
self.hsz = hsz
d_ff = int(kwargs.get('d_ff', 4 * hsz))
rpr_k = kwargs.get('rpr_k')
d_k = kwargs.get('d_k')
activation = kwargs.get('activation', 'gelu')
scale = bool(kwargs.get('scale', True))
layer_drop = float(kwargs.get('layer_drop', 0.0))
ra_type = kwargs.get('ra_type')
transformer_type = kwargs.get('transformer_type')
self.transformer_decoder = TransformerDecoderStack(num_heads, d_model=hsz, d_ff=d_ff,
pdrop=dropout, scale=scale,
layers=layers, rpr_k=rpr_k, d_k=d_k,
activation_type=activation, layer_drop=layer_drop,
ra_type=ra_type, transformer_type=transformer_type)
self.proj_to_dsz = self._identity
self.proj_to_hsz = self._identity
if hsz != dsz:
self.proj_to_hsz = tf.keras.layers.Dense(hsz)
self.proj_to_dsz = tf.keras.layers.Dense(dsz)
do_weight_tying = bool(kwargs.get('tie_weights', True))
if do_weight_tying:
if self.hsz != self.tgt_embeddings.get_dsz():
raise ValueError("weight tying requires hsz == embedding dsz, got {} hsz and {} dsz".format(self.hsz, self.tgt_embedding.get_dsz()))
self.preds = WeightTieDense(self.tgt_embeddings)
else:
self.preds = tf.keras.layers.Dense(self.tgt_embeddings.get_vsz())
def _identity(self, x):
return x
def call(self, encoder_output, dst):
embed_out_bth = self.tgt_embeddings(dst)
embed_out_bth = self.proj_to_hsz(embed_out_bth)
context_bth = encoder_output.output
T = get_shape_as_list(embed_out_bth)[1]
dst_mask = tf.cast(subsequent_mask(T), embed_out_bth.dtype)
src_mask = encoder_output.src_mask
output = self.transformer_decoder((embed_out_bth, context_bth, src_mask, dst_mask))
output = self.proj_to_dsz(output)
prob = self.output(output)
return prob
def output(self, x):
return self.preds(x)
class BeamSearch(BeamSearchBase):
def __init__(self, parent, **kwargs):
super().__init__(**kwargs)
self.parent = parent
def init(self, encoder_outputs):
"""Tile for the batch of the encoder inputs."""
encoder_outputs = TransformerEncoderOutput(
repeat_batch(encoder_outputs.output, self.K),
repeat_batch(encoder_outputs.src_mask, self.K)
)
return encoder_outputs
def step(self, paths, extra):
"""Calculate the probs for the last item based on the full path."""
B, K, T = paths.shape
assert K == self.K
return self.parent(extra, tf.reshape(paths, (B * K, T)))[:, -1], extra
def update(self, beams, extra):
"""There is no state for the transformer so just pass it."""
return extra
def beam_search(self, encoder_outputs, **kwargs):
alpha = kwargs.get('alpha')
if alpha is not None:
kwargs['length_penalty'] = partial(gnmt_length_penalty, alpha=alpha)
return TransformerDecoderWrapper.BeamSearch(self, **kwargs)(encoder_outputs)
|
the-stack_106_17955
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RComplexheatmap(RPackage):
"""Complex heatmaps are efficient to visualize associations between
different sources of data sets and reveal potential structures. Here
the ComplexHeatmap package provides a highly flexible way to arrange
multiple heatmaps and supports self-defined annotation graphics."""
homepage = "https://bioconductor.org/packages/ComplexHeatmap/"
url = "https://git.bioconductor.org/packages/ComplexHeatmap"
list_url = homepage
version('1.14.0', git='https://git.bioconductor.org/packages/ComplexHeatmap', commit='0acd8974fb5cedde8cd96efea6dfa39324d25b34')
depends_on('r-circlize', type=('build', 'run'))
depends_on('r-getoptlong', type=('build', 'run'))
depends_on('r-colorspace', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-dendextend', type=('build', 'run'))
depends_on('r-globaloptions', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.14.0')
|
the-stack_106_17957
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from contextlib import contextmanager
from typing import Any, Mapping
from werkzeug.exceptions import NotFound
from eduid_common.api.testing import EduidAPITestCase
from eduid_common.authn.middleware import AuthnBaseApp
from eduid_common.config.base import EduIDBaseAppConfig
from eduid_common.config.parsers import load_config
class AuthnTestApp(AuthnBaseApp):
def __init__(self, name: str, test_config: Mapping[str, Any], **kwargs):
# This should be an AuthnConfig instance, but an EduIDBaseAppConfig instance suffices for these
# tests and we don't want eduid_common to depend on eduid_webapp.
self.conf = load_config(typ=EduIDBaseAppConfig, app_name=name, ns='webapp', test_config=test_config)
super().__init__(self.conf, **kwargs)
class AuthnTests(EduidAPITestCase):
def load_app(self, config):
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return AuthnTestApp('testing', config)
def update_config(self, config):
config.update(
{
'available_languages': {'en': 'English', 'sv': 'Svenska'},
'development': 'DEBUG',
'application_root': '/',
'no_authn_urls': [],
'log_level': 'DEBUG',
'am_broker_url': 'amqp://eduid:eduid_pw@rabbitmq/am',
'msg_broker_url': 'amqp://eduid:eduid_pw@rabbitmq/msg',
'celery_config': {
'result_backend': 'amqp',
'task_serializer': 'json',
'mongo_uri': config['mongo_uri'],
},
}
)
return config
def test_get_view(self):
response = self.browser.get('/some/path')
self.assertEqual(response.status_code, 302)
with self.session_cookie(self.browser, 'hubba-bubba') as client:
with self.assertRaises(NotFound):
client.get('/some/path')
class UnAuthnTests(EduidAPITestCase):
def load_app(self, config):
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return AuthnTestApp('testing', config)
def update_config(self, config):
config.update(
{
'available_languages': {'en': 'English', 'sv': 'Svenska'},
'development': 'DEBUG',
'application_root': '/',
'log_level': 'DEBUG',
'am_broker_url': 'amqp://eduid:eduid_pw@rabbitmq/am',
'msg_broker_url': 'amqp://eduid:eduid_pw@rabbitmq/msg',
'celery_config': {
'result_backend': 'amqp',
'task_serializer': 'json',
'mongo_uri': config['mongo_uri'],
},
}
)
return config
@contextmanager
def session_cookie(self, client, server_name='localhost'):
with client.session_transaction() as sess:
sess.persist()
client.set_cookie(server_name, key=self.app.config.session_cookie_name, value=sess._session.token.cookie_val)
yield client
def test_get_view(self):
response = self.browser.get('/status/healthy')
self.assertEqual(response.status_code, 200)
|
the-stack_106_17958
|
"""
This script focuses cleaning and applying topic modeling (LDA)
articles extracted from https://www.uberpeople.net/forums/Complaints/
"""
# %% load libraries
# --+ basic
import os
import numpy as np
import pandas as pd
from pprint import pprint as pp
from datetime import datetime
# --+ data manipulation
import re
import spacy
import en_core_web_lg
import gensim
from gensim.models import Phrases
from gensim.corpora import Dictionary
from gensim.models import CoherenceModel
from gensim.corpora.dictionary import Dictionary
# --+ model
from gensim.models import TfidfModel
import gensim.corpora as corpora
from gensim.models import LdaModel, ldamodel
# --+ basic visualization
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# --+ topic modeling visualizatioon
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
# --+ functions from other modules
from coherence_eval import compute_coherence_values
# --+ ignore warnings
import warnings
warnings.filterwarnings("ignore")
# %% set up MALLET mallet
mallet_path = '/Users/phuattagrish/mallet-2.0.8/bin/mallet'
# %% check ipython location
sys.executable
# %% change working directory
os.getcwd()
# os.chdir('data')
os.chdir('../scripts')
# %% load the data
with open ('data.pickle', 'rb') as fp:
dict_ = pickle.load(fp)
# --+ article
articles = dict_['articles']
# --+ timestamp
time_stamp_ = dict_['timestamp']
# --+ put the data into DataFrame
articles_to_use = [x[0] for x in articles]
df = pd.DataFrame({
'articles':articles_to_use,
'timestamp':time_stamp_
})
df.head()
df.head()
# %% basic cleaning
# --+ get only year/month/date
def to_date(string):
date_time_obj = datetime.strptime(string, '%Y-%m-%d')
return date_time_obj
# --+ apply the to_date function to the dataframe
df['timestamp'] = df['timestamp'].str[:10].apply(to_date)
# --+ priliminary clean the article
df.loc[:, 'articles'] = df['articles'].str.replace('\n', '')
df.head()
docs = [doc.strip().lower() for doc in df.articles]
docs = [re.sub(r'\b-\b', '_', text) for text in docs]
len(docs)
type(docs)
# --+ export
_dict_ = {'df_nontokenized':df}
with open('df_nontokenized.pickle','wb') as f:
pickle.dump(_dict_,f)
# %% advanced cleaning
# --+ load English spacy
nlp = spacy.load('en_core_web_lg')
# --+ clean the texts
docs_tokens, tmp_tokens = [], []
for doc in docs:
tmp_tokens = [token.lemma_ for token in nlp(doc)
if not token.is_stop
and not token.is_punct
and not token.like_num
and token.is_alpha]
docs_tokens.append(tmp_tokens)
tmp_tokens = []
# bi-gram and tri-gram
# --+ get rid of common terms
common_terms = [u'of', u'with', u'without', u'and', u'or', u'the', u'a',
u'not', u'be', u'to', u'this', u'who', u'in']
bigram = Phrases(docs_tokens,
min_count=10,
threshold=5,
max_vocab_size=50000,
common_terms=common_terms)
trigram = Phrases(bigram[docs_tokens],
min_count=10,
threshold=5,
max_vocab_size=50000,
common_terms=common_terms)
# --+ get tri-gram of tokenized words
docs_phrased = [trigram[bigram[line]] for line in docs_tokens]
DICT = Dictionary(docs_phrased)
CORPUS = [DICT.doc2bow(doc) for doc in docs_phrased]
# remove addtional noise words using TfidfModel
tfidf = TfidfModel(CORPUS, id2word=DICT)
# --+ set-up arguments
low_value = 0.03
words = []
words_missing_in_tfidf = []
# --+ loop over the CORPUS and remove noise words
for i in range(0, len(CORPUS)):
bow = CORPUS[i]
low_value_words = []
tfidf_ids = [id for id, value in tfidf[bow]]
bow_ids = [id for id, value in bow]
low_value_words = [id for id, value in tfidf[bow] if value < low_value]
drops = low_value_words + words_missing_in_tfidf
for item in drops:
words.append(DICT[item])
words_missing_in_tfidf = [id for id in bow_ids if id not in tfidf_ids]
new_bow = [b for b in bow if b[0] not in low_value_words and b[0]
not in words_missing_in_tfidf]
CORPUS[i] = new_bow
# %% export data
# --+ create a dataframe
df_to_export = pd.DataFrame({
'articles':docs_phrased,
'timestamp':df['timestamp']
})
df_to_export
# --+ export the data
os.getcwd()
os.chdir('../data')
dict_ = {'df_tokenized':df_to_export}
with open('df_tokenized.pickle','wb') as f:
pickle.dump(dict_,f)
# %% compute coherance scores produced by several number of num_topics
# --+ set-up
limit, start, step = 10, 2, 1
tic = time.time()
model_list, coher_vals = compute_coherence_values(dictionary=DICT,
corpus=CORPUS,
texts=docs_phrased,
start= start,
limit= limit,
step= step)
toc = time.time()
print(toc - tic)
model_list
coher_vals
# --+ model with the optimal number of num_topics based on coherance value and interpretibility
lda_mallet = gensim.models.wrappers.LdaMallet(mallet_path= '/Users/phuattagrish/mallet-2.0.8/bin/mallet',
corpus=CORPUS,
num_topics= 4,
id2word=DICT,
random_seed=123)
# --+ words associated with the optimal model
lda_mallet.print_topics(num_topics= 4,
num_words=10)
# %% result analyses
# interpret the result
# --+ get document-topic pairs probabilities
LDA_MALLET_G = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_mallet)
LDA_MALLET_G
TRANSF_CORPUS = LDA_MALLET_G.get_document_topics(CORPUS)
TRANSF_CORPUS
DOC_TOPIC_M = []
len(CORPUS)
for id, doc in enumerate(TRANSF_CORPUS):
for topic in np.arange(0, 4, 1):
topic_n = doc[topic][0]
topic_prob = doc[topic][1]
DOC_TOPIC_M.append([id, topic, topic_prob])
np.arange(0, 4, 1)
# --+ populate the dataframe
DF = pd.DataFrame(DOC_TOPIC_M)
DF # --> correct
DF.iloc[100:120]
# --+ rename columns
OLD_NAMES = [0, 1, 2]
NEW_NAMES = ['doc_id', 'topic_n', 'prob']
COLS = dict(zip(OLD_NAMES, NEW_NAMES))
DF.rename(columns=COLS, inplace=True)
# --+ get dominant topic
GR = DF.groupby('doc_id')
DF.loc[:, 'max'] = GR['prob'].transform(np.max)
DF
DF.loc[:, 'first_topic'] = 0
DF.loc[DF['prob'] == DF['max'], 'first_topic'] = 1
FIRST_TOPIC = DF.loc[DF['first_topic'] == 1]
FIRST_TOPIC
DF
# --+ drop the unused column
FIRST_TOPIC.drop(columns=['first_topic'], axis=1, inplace=True)
FIRST_TOPIC['topic_n'].value_counts()
FIRST_TOPIC
DF_TOPIC = FIRST_TOPIC.copy()
# --+ remove doc_id that cannot be assigned into a unique topic
DF_TOPIC = DF_TOPIC.loc[~FIRST_TOPIC.duplicated(subset=['doc_id'],keep=False), :]
# --+ export the data
os.getcwd()
dict_ = {'DF_TOPIC':DF_TOPIC}
with open('DF_TOPIC.pickle','wb') as f:
pickle.dump(dict_,f)
# %% visualize number of each topic over the years
DF_TOPIC
# --+ to reduce granularity
def Quarter(string):
month = int(string[5:])
if month in [1,2,3]:
string = string[:4] + '_Q1'
if month in [4,5,6]:
string = string[:4] + '_Q2'
if month in [7,8,9]:
string = string[:4] + '_Q3'
if month in [10,11,12]:
string = string[:4] + '_Q4'
return string
# --+ manipulate data
df_vis = DF_TOPIC.copy()
df_vis = df_vis.merge(df, how = 'inner', left_index = True, right_index = True)
df_vis['timestamp'] = df['timestamp'].astype(str).str[:7].apply(Quarter)
df_vis = df_vis[['topic_n','timestamp']]
df_vis['count'] = 1
df_vis = df_vis.groupby(['topic_n','timestamp']).agg(np.sum).reset_index()
df_vis.head()
unique_date = df_vis['timestamp'].unique().tolist()
for d in unique_date:
topic_available = df_vis.loc[df_vis['timestamp']==d,'topic_n'].unique().tolist()
for i in range(1,5):
if i not in topic_available:
df_concat = pd.DataFrame({
'topic_n':[i],
'timestamp':[d],
'count':[0]
})
df_vis = pd.concat([df_vis,df_concat],axis=0)
df_vis = df_vis.reset_index(drop=True)
df_vis['X_label'] = 0
for i in range(1,5):
for idx, d in enumerate(unique_date):
df_vis.loc[(df_vis['topic_n'] ==i) & (df_vis['timestamp'] ==d),'X_label'] = idx
# --+ visualize
plt.rcParams.update({'font.size': 22})
fig, ax = plt.subplots(figsize=(30,15))
x_labels = df_vis['timestamp'].unique().tolist()[1:-1]
labels = df_vis['topic_n'].unique().tolist()
t_1 = np.array(df_vis.loc[df_vis['topic_n']==0,'count'])[1:-1]
t_2 = np.array(df_vis.loc[df_vis['topic_n']==1,'count'])[1:-1]
t_3 = np.array(df_vis.loc[df_vis['topic_n']==2,'count'])[1:-1]
t_4 = np.array(df_vis.loc[df_vis['topic_n']==3,'count'])[1:-1]
ax.bar(x_labels,t_1,label=labels[0])
ax.bar(x_labels,t_2,label=labels[1],bottom=t_1)
ax.bar(x_labels,t_3,label=labels[2],bottom=t_2+t_1)
ax.bar(x_labels,t_4,label=labels[3],bottom=t_3+t_2+t_1)
ax.legend(fontsize=20,labels=['Topic '+str(x) for x in range(0,4)])
# --+ decorate
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.xlabel("Time")
plt.ylabel("Number of Articles")
plt.xticks(rotation=45)
plt.show()
# --+ save the figure
os.getcwd()
out_f = os.path.join("..","output", "topic_modeling.pdf")
plt.savefig(out_f, transparent=True, bbox_inches="tight", pad_inches=0)
# %% project unseen dataset to the model scraped from recent posts in complaint community
test_1 = ["Scruber emailed me and sent msg thru app on Friday Juky 2nd; $100 for 3 completed rides over 4th of July weekend. Figured do 3 quick trips and then done and go home w $100 + fares. Scruber paid the fares but not the promo. I sent msg & they replied ‘due to some outages, we are experiencing delays in response times & prioritizing emergencies.... That was 2 days ago & still no response despite repeated msgs to them. Scruber strikes again."]
test_2 = ["Anyone else get this I'm in Chicago. No way I am waiting 7 minutes for a pax. Since Uber has not been paying for the first 2 minutes after arrival, I am only waiting those 2 minutes from this point forward, then leaving. Uber knows that riders are waiting longer for drivers than ever before; now they are going to give them 7 MORE minutes after we arrive?? Good luck with that Uber and pax."]
test_all = test_1 + test_2
# --+ clean the unseen dataset
docs_tokens_test, tmp_tokens_test = [], []
for doc in test_all:
tmp_tokens_test = [token.lemma_ for token in nlp(doc)
if not token.is_stop
and not token.is_punct
and not token.like_num
and token.is_alpha]
docs_tokens_test.append(tmp_tokens_test)
tmp_tokens_test = []
docs_tokens_test
# bi-gram and tri-gram
# --+ get rid of common terms
bigram = Phrases(docs_tokens_test,
min_count=10,
threshold=5,
max_vocab_size=50000,
common_terms=common_terms)
trigram = Phrases(bigram[docs_tokens_test],
min_count=10,
threshold=5,
max_vocab_size=50000,
common_terms=common_terms)
# --+ get tri-gram of tokenized words
docs_phrased_test = [trigram[bigram[line]] for line in docs_tokens_test]
DICT_test = Dictionary(docs_phrased_test)
CORPUS_test = [DICT_test.doc2bow(doc) for doc in docs_phrased_test]
# remove addtional noise words using TfidfModel
tfidf = TfidfModel(CORPUS_test, id2word=DICT)
# --+ set-up arguments
low_value = 0.03
words = []
words_missing_in_tfidf = []
# --+ loop over the CORPUS and remove noise words
for i in range(0, len(CORPUS_test)):
bow = CORPUS_test[i]
low_value_words = []
tfidf_ids = [id for id, value in tfidf[bow]]
bow_ids = [id for id, value in bow]
low_value_words = [id for id, value in tfidf[bow] if value < low_value]
drops = low_value_words + words_missing_in_tfidf
for item in drops:
words.append(DICT[item])
words_missing_in_tfidf = [id for id in bow_ids if id not in tfidf_ids]
new_bow = [b for b in bow if b[0] not in low_value_words and b[0]
not in words_missing_in_tfidf]
CORPUS_test[i] = new_bow
other_corpus = [DICT.doc2bow(text) for text in docs_phrased_test]
# %% anaylse the unseen documents and their results
vector_0 = lda_mallet[other_corpus[0]]
vector_1 = lda_mallet[other_corpus[1]]
vector_0
vector_1
|
the-stack_106_17960
|
import logging
import os
import uuid
import test_infra.utils as infra_utils
from distutils import util
from pathlib import Path
import pytest
from test_infra import assisted_service_api, consts, utils
qe_env = False
def is_qe_env():
return os.environ.get('NODE_ENV') == 'QE_VM'
def _get_cluster_name():
cluster_name = utils.get_env('CLUSTER_NAME', f'{consts.CLUSTER_PREFIX}')
if cluster_name == consts.CLUSTER_PREFIX:
cluster_name = cluster_name + '-' + str(uuid.uuid4())[:8]
return cluster_name
# TODO changes it
if is_qe_env():
from test_infra.controllers.node_controllers.qe_vm_controler import \
QeVmController as nodeController
qe_env = True
else:
from test_infra.controllers.node_controllers.terraform_controller import \
TerraformController as nodeController
private_ssh_key_path_default = os.path.join(os.getcwd(), "ssh_key/key") if not qe_env else \
os.path.join(str(Path.home()), ".ssh/id_rsa")
env_variables = {"ssh_public_key": utils.get_env('SSH_PUB_KEY'),
"remote_service_url": utils.get_env('REMOTE_SERVICE_URL'),
"pull_secret": utils.get_env('PULL_SECRET'),
"offline_token": utils.get_env('OFFLINE_TOKEN'),
"openshift_version": utils.get_openshift_version(),
"base_domain": utils.get_env('BASE_DOMAIN', "redhat.com"),
"num_masters": int(utils.get_env('NUM_MASTERS', consts.NUMBER_OF_MASTERS)),
"num_workers": max(2, int(utils.get_env('NUM_WORKERS', 0))),
"vip_dhcp_allocation": bool(util.strtobool(utils.get_env('VIP_DHCP_ALLOCATION'))),
"worker_memory": int(utils.get_env('WORKER_MEMORY', '8892')),
"master_memory": int(utils.get_env('MASTER_MEMORY', '16984')),
"network_mtu": utils.get_env('NETWORK_MTU', '1500'),
"worker_disk": int(utils.get_env('WORKER_DISK', '21474836480')),
"master_disk": int(utils.get_env('MASTER_DISK', '128849018880')),
"storage_pool_path": utils.get_env('STORAGE_POOL_PATH', os.path.join(os.getcwd(), "storage_pool")),
"cluster_name": _get_cluster_name(),
"private_ssh_key_path": utils.get_env('PRIVATE_KEY_PATH', private_ssh_key_path_default),
"kubeconfig_path": utils.get_env('KUBECONFIG', ''),
"log_folder": utils.get_env('LOG_FOLDER', consts.LOG_FOLDER),
"service_cidr": utils.get_env('SERVICE_CIDR', '172.30.0.0/16'),
"cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'),
"host_prefix": int(utils.get_env('HOST_PREFIX', '23')),
"iso_image_type": utils.get_env('ISO_IMAGE_TYPE', consts.ImageType.FULL_ISO),
"worker_vcpu": utils.get_env('WORKER_CPU', consts.WORKER_CPU),
"master_vcpu": utils.get_env('MASTER_CPU', consts.MASTER_CPU),
"test_teardown": bool(util.strtobool(utils.get_env('TEST_TEARDOWN', 'true')))
}
cluster_mid_name = infra_utils.get_random_name()
# Tests running on terraform parallel must have unique ISO file
if not qe_env:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-{cluster_mid_name}-'
f'installer-image.iso')).strip()
env_variables["kubeconfig_path"] = f'/tmp/test_kubeconfig_{cluster_mid_name}'
else:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-installer-image.iso')).\
strip()
env_variables["iso_download_path"] = image
env_variables["num_nodes"] = env_variables["num_workers"] + env_variables["num_masters"]
@pytest.fixture(scope="session")
def api_client():
logging.info(f'--- SETUP --- api_client\n')
yield get_api_client()
def get_api_client(offline_token=env_variables['offline_token'], **kwargs):
url = env_variables['remote_service_url']
if not url:
url = utils.get_local_assisted_service_url(
utils.get_env('PROFILE'), utils.get_env('NAMESPACE'), 'assisted-service', utils.get_env('DEPLOY_TARGET'))
return assisted_service_api.create_client(url, offline_token, **kwargs)
@pytest.fixture(scope="session")
def setup_node_controller():
logging.info(f'--- SETUP --- node controller\n')
yield nodeController
logging.info(f'--- TEARDOWN --- node controller\n')
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
result = outcome.get_result()
setattr(item, "result_" + result.when, result)
|
the-stack_106_17961
|
import os
import glob
import ntpath
import subprocess
import re
import shlex
import argparse
from os.path import join
parser = argparse.ArgumentParser(description='Log file evaluator.')
parser.add_argument('-f', '--folder-path', type=str, default=None, help='The folder with logfiles of models to evaluate.')
parser.add_argument('--finished-contains', type=str, default='done training', help='The string to recognize if a logfile contains a complete training run.')
parser.add_argument('--last-checkpoint', type=str, default='checkpoint_last.pt', help='The name of the last checkpoint file.')
parser.add_argument('--best-checkpoint', type=str, default='checkpoint_best.pt', help='The name of the best checkpoint file.')
parser.add_argument('--start', type=str, default='', help='String after which the checkpoint path.')
parser.add_argument('--end', type=str, default='\n', help='String before which the checkpoint path.')
parser.add_argument('--dry', action='store_true', help='Prints the commands that would be executed without executing them')
parser.add_argument('--append', action='store_true', help='Append the evaluation data to the log file instead of creating a new one')
parser.add_argument('--out', default=None, type=str, help='The output folder')
parser.add_argument('--args', type=str, default='--special-eval', help='Additional args for fairseq.')
parser.add_argument('--fairseq-path', type=str, default='/private/home/timdettmers/git/fairseq_private', help='The path to the fairseq source.')
parser.add_argument('--filter', nargs='+', type=str, default='', help='Only evaluate configs with these key-value parameters. Space separated key-values.')
args = parser.parse_args()
if args.out is None and not args.append:
print('Either set the output path --out or set the --append option to append to the log file.')
os.exit()
if args.out is not None and not os.path.exists(args.out):
os.makedirs(args.out)
def clean_string(key):
key = key.strip()
key = key.replace("'", '')
key = key.replace('"', '')
key = key.replace(']', '')
key = key.replace('[', '')
key = key.replace('(', '')
key = key.replace(')', '')
return key
def execute_and_return(strCMD):
proc = subprocess.Popen(shlex.split(strCMD), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
out, err = out.decode("UTF-8").strip(), err.decode("UTF-8").strip()
return out, err
folders = [x[0] for x in os.walk(args.folder_path)]
regex = re.compile(r'(?<={0}).*(?={1})'.format(args.start, args.end))
configs = []
all_cols = set(['NAME'])
for folder in folders:
files = list(glob.iglob(join(folder, '*.log')))
n = len(files)
i = 0
for log_name in files:
config = {}
i += 1
with open(log_name) as f:
lines = f.readlines()
finished = False
last_ckpt = None
best_ckpt = None
namespace = None
for line in lines[::-1]:
if args.finished_contains in line: finished = True
if args.last_checkpoint in line:
matches = re.findall(regex, line)
if len(matches) > 0:
last_ckpt = matches[0].strip()
if args.best_checkpoint in line:
matches = re.findall(regex, line)
if len(matches) > 0:
best_ckpt = matches[0].strip()
if 'Namespace(' in line:
namespace = line
line = line[line.find('Namespace(')+len('Namespace('):]
matches = re.findall(r'(?!^\()([^=,]+)=([^\0]+?)(?=,[^,]+=|\)$)', line)
for m in matches:
key = clean_string(m[0])
value = clean_string(m[1])
config[key] = value
if last_ckpt is None and best_ckpt is not None: last_ckpt = best_ckpt
if last_ckpt is None or namespace is None or not finished: continue
if 'data' not in config:
print('Dataset not found! Skipping this log file: {0}'.format(log_name))
if len(args.filter) > 0:
execute = True
for keyvalue in args.filter:
key, value = keyvalue.split('=')
key = key.strip()
value = value.strip()
if key not in config: execute = False
else:
if value != config[key]: execute = False
if not execute: continue
cmd = 'fairseq-eval-lm --path {0} --max-tokens 4096 --skip-invalid-size-inputs-valid-test --log-format simple --log-interval 100 {2} {1}'.format(best_ckpt if best_ckpt is not None else last_ckpt, join(args.fairseq_path, config['data']), args.args)
if args.dry:
print(cmd)
else:
print('Executing command {0}/{1}'.format(i, n+1))
out, err = execute_and_return(cmd)
out = out + '\n' + err
if 'Traceback' in out:
print('ERROR!')
print(log_name)
print(out)
print(cmd)
else:
if args.append:
with open(log_name, 'a') as g:
g.write('\n')
g.write(out)
else:
with open(join(args.out, ntpath.basename(log_name)), 'w') as g:
g.write('\n')
g.write(namespace + '\n')
g.write(out)
|
the-stack_106_17962
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Portfolio """
import unittest
from test import QiskitFinanceTestCase
import datetime
import numpy as np
from qiskit_optimization.applications.ising.common import sample_most_likely
from qiskit import BasicAer
from qiskit.utils import algorithm_globals, QuantumInstance
from qiskit.algorithms import NumPyMinimumEigensolver, QAOA
from qiskit.algorithms.optimizers import COBYLA
from qiskit_finance.applications.ising import portfolio
from qiskit_finance.data_providers import RandomDataProvider
class TestPortfolio(QiskitFinanceTestCase):
"""Tests Portfolio Ising translator."""
def setUp(self):
super().setUp()
self.seed = 50
algorithm_globals.random_seed = self.seed
num_assets = 4
stocks = [("TICKER%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(2016, 1, 1),
end=datetime.datetime(2016, 1, 30),
seed=self.seed)
data.run()
self.muu = data.get_period_return_mean_vector()
self.sigma = data.get_period_return_covariance_matrix()
self.risk = 0.5
self.budget = int(num_assets / 2)
self.penalty = num_assets
self.qubit_op, self.offset = portfolio.get_operator(
self.muu, self.sigma, self.risk, self.budget, self.penalty)
def test_portfolio(self):
""" portfolio test """
algo = NumPyMinimumEigensolver()
result = algo.compute_minimum_eigenvalue(operator=self.qubit_op)
selection = sample_most_likely(result.eigenstate)
value = portfolio.portfolio_value(
selection, self.muu, self.sigma, self.risk, self.budget, self.penalty)
np.testing.assert_array_equal(selection, [0, 1, 1, 0])
self.assertAlmostEqual(value, -0.00679917)
def test_portfolio_qaoa(self):
""" portfolio test with QAOA """
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend=backend,
seed_simulator=self.seed,
seed_transpiler=self.seed)
qaoa = QAOA(optimizer=COBYLA(maxiter=500),
initial_point=[0., 0.],
quantum_instance=quantum_instance)
result = qaoa.compute_minimum_eigenvalue(operator=self.qubit_op)
selection = sample_most_likely(result.eigenstate)
value = portfolio.portfolio_value(
selection, self.muu, self.sigma, self.risk, self.budget, self.penalty)
np.testing.assert_array_equal(selection, [0, 1, 1, 0])
self.assertAlmostEqual(value, -0.00679917)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_17963
|
"""
Process raw qstr file and output qstr data with length, hash and data bytes.
This script works with Python 2.7, 3.3 and 3.4.
For documentation about the format of compressed translated strings, see
supervisor/shared/translate.h
"""
from __future__ import print_function
import re
import sys
import collections
import gettext
import os.path
py = os.path.dirname(sys.argv[0])
top = os.path.dirname(py)
sys.path.append(os.path.join(top, "tools/huffman"))
import huffman
# Python 2/3 compatibility:
# - iterating through bytes is different
# - codepoint2name lives in a different module
import platform
if platform.python_version_tuple()[0] == '2':
bytes_cons = lambda val, enc=None: bytearray(val)
from htmlentitydefs import codepoint2name
elif platform.python_version_tuple()[0] == '3':
bytes_cons = bytes
from html.entities import codepoint2name
# end compatibility code
codepoint2name[ord('-')] = 'hyphen';
# add some custom names to map characters that aren't in HTML
codepoint2name[ord(' ')] = 'space'
codepoint2name[ord('\'')] = 'squot'
codepoint2name[ord(',')] = 'comma'
codepoint2name[ord('.')] = 'dot'
codepoint2name[ord(':')] = 'colon'
codepoint2name[ord(';')] = 'semicolon'
codepoint2name[ord('/')] = 'slash'
codepoint2name[ord('%')] = 'percent'
codepoint2name[ord('#')] = 'hash'
codepoint2name[ord('(')] = 'paren_open'
codepoint2name[ord(')')] = 'paren_close'
codepoint2name[ord('[')] = 'bracket_open'
codepoint2name[ord(']')] = 'bracket_close'
codepoint2name[ord('{')] = 'brace_open'
codepoint2name[ord('}')] = 'brace_close'
codepoint2name[ord('*')] = 'star'
codepoint2name[ord('!')] = 'bang'
codepoint2name[ord('\\')] = 'backslash'
codepoint2name[ord('+')] = 'plus'
codepoint2name[ord('$')] = 'dollar'
codepoint2name[ord('=')] = 'equals'
codepoint2name[ord('?')] = 'question'
codepoint2name[ord('@')] = 'at_sign'
codepoint2name[ord('^')] = 'caret'
codepoint2name[ord('|')] = 'pipe'
codepoint2name[ord('~')] = 'tilde'
C_ESCAPES = {
"\a": "\\a",
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
"\v": "\\v",
"\'": "\\'",
"\"": "\\\""
}
# this must match the equivalent function in qstr.c
def compute_hash(qstr, bytes_hash):
hash = 5381
for b in qstr:
hash = (hash * 33) ^ b
# Make sure that valid hash is never zero, zero means "hash not computed"
return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1
def translate(translation_file, i18ns):
with open(translation_file, "rb") as f:
table = gettext.GNUTranslations(f)
translations = []
for original in i18ns:
unescaped = original
for s in C_ESCAPES:
unescaped = unescaped.replace(C_ESCAPES[s], s)
translation = table.gettext(unescaped)
# Add in carriage returns to work in terminals
translation = translation.replace("\n", "\r\n")
translations.append((original, translation))
return translations
def compute_huffman_coding(translations, qstrs, compression_filename):
all_strings = [x[1] for x in translations]
all_strings_concat = "".join(all_strings)
counts = collections.Counter(all_strings_concat)
cb = huffman.codebook(counts.items())
values = []
length_count = {}
renumbered = 0
last_l = None
canonical = {}
for ch, code in sorted(cb.items(), key=lambda x: (len(x[1]), x[0])):
values.append(ch)
l = len(code)
if l not in length_count:
length_count[l] = 0
length_count[l] += 1
if last_l:
renumbered <<= (l - last_l)
canonical[ch] = '{0:0{width}b}'.format(renumbered, width=l)
s = C_ESCAPES.get(ch, ch)
print("//", ord(ch), s, counts[ch], canonical[ch], renumbered)
renumbered += 1
last_l = l
lengths = bytearray()
print("// length count", length_count)
for i in range(1, max(length_count) + 2):
lengths.append(length_count.get(i, 0))
print("// values", values, "lengths", len(lengths), lengths)
print("// estimated total memory size", len(lengths) + 2*len(values) + sum(len(cb[u]) for u in all_strings_concat))
print("//", values, lengths)
values_type = "uint16_t" if max(ord(u) for u in values) > 255 else "uint8_t"
max_translation_encoded_length = max(len(translation.encode("utf-8")) for original,translation in translations)
with open(compression_filename, "w") as f:
f.write("const uint8_t lengths[] = {{ {} }};\n".format(", ".join(map(str, lengths))))
f.write("const {} values[] = {{ {} }};\n".format(values_type, ", ".join(str(ord(u)) for u in values)))
f.write("#define compress_max_length_bits ({})\n".format(max_translation_encoded_length.bit_length()))
return values, lengths
def decompress(encoding_table, encoded, encoded_length_bits):
values, lengths = encoding_table
dec = []
this_byte = 0
this_bit = 7
b = encoded[this_byte]
bits = 0
for i in range(encoded_length_bits):
bits <<= 1
if 0x80 & b:
bits |= 1
b <<= 1
if this_bit == 0:
this_bit = 7
this_byte += 1
if this_byte < len(encoded):
b = encoded[this_byte]
else:
this_bit -= 1
length = bits
i = 0
while i < length:
bits = 0
bit_length = 0
max_code = lengths[0]
searched_length = lengths[0]
while True:
bits <<= 1
if 0x80 & b:
bits |= 1
b <<= 1
bit_length += 1
if this_bit == 0:
this_bit = 7
this_byte += 1
if this_byte < len(encoded):
b = encoded[this_byte]
else:
this_bit -= 1
if max_code > 0 and bits < max_code:
#print('{0:0{width}b}'.format(bits, width=bit_length))
break
max_code = (max_code << 1) + lengths[bit_length]
searched_length += lengths[bit_length]
v = values[searched_length + bits - max_code]
i += len(v.encode('utf-8'))
dec.append(v)
return ''.join(dec)
def compress(encoding_table, decompressed, encoded_length_bits, len_translation_encoded):
if not isinstance(decompressed, str):
raise TypeError()
values, lengths = encoding_table
enc = bytearray(len(decompressed) * 3)
#print(decompressed)
#print(lengths)
current_bit = 7
current_byte = 0
code = len_translation_encoded
bits = encoded_length_bits+1
for i in range(bits - 1, 0, -1):
if len_translation_encoded & (1 << (i - 1)):
enc[current_byte] |= 1 << current_bit
if current_bit == 0:
current_bit = 7
#print("packed {0:0{width}b}".format(enc[current_byte], width=8))
current_byte += 1
else:
current_bit -= 1
for c in decompressed:
#print()
#print("char", c, values.index(c))
start = 0
end = lengths[0]
bits = 1
compressed = None
code = 0
while compressed is None:
s = start
e = end
#print("{0:0{width}b}".format(code, width=bits))
# Binary search!
while e > s:
midpoint = (s + e) // 2
#print(s, e, midpoint)
if values[midpoint] == c:
compressed = code + (midpoint - start)
#print("found {0:0{width}b}".format(compressed, width=bits))
break
elif c < values[midpoint]:
e = midpoint
else:
s = midpoint + 1
code += end - start
code <<= 1
start = end
end += lengths[bits]
bits += 1
#print("next bit", bits)
for i in range(bits - 1, 0, -1):
if compressed & (1 << (i - 1)):
enc[current_byte] |= 1 << current_bit
if current_bit == 0:
current_bit = 7
#print("packed {0:0{width}b}".format(enc[current_byte], width=8))
current_byte += 1
else:
current_bit -= 1
if current_bit != 7:
current_byte += 1
return enc[:current_byte]
def qstr_escape(qst):
def esc_char(m):
c = ord(m.group(0))
try:
name = codepoint2name[c]
except KeyError:
name = '0x%02x' % c
return "_" + name + '_'
return re.sub(r'[^A-Za-z0-9_]', esc_char, qst)
def parse_input_headers(infiles):
# read the qstrs in from the input files
qcfgs = {}
qstrs = {}
i18ns = set()
for infile in infiles:
with open(infile, 'rt') as f:
for line in f:
line = line.strip()
# is this a config line?
match = re.match(r'^QCFG\((.+), (.+)\)', line)
if match:
value = match.group(2)
if value[0] == '(' and value[-1] == ')':
# strip parenthesis from config value
value = value[1:-1]
qcfgs[match.group(1)] = value
continue
match = re.match(r'^TRANSLATE\("(.*)"\)$', line)
if match:
i18ns.add(match.group(1))
continue
# is this a QSTR line?
match = re.match(r'^Q\((.*)\)$', line)
if not match:
continue
# get the qstr value
qstr = match.group(1)
# special case to specify control characters
if qstr == '\\n':
qstr = '\n'
# work out the corresponding qstr name
ident = qstr_escape(qstr)
# don't add duplicates
if ident in qstrs:
continue
# add the qstr to the list, with order number to retain original order in file
order = len(qstrs)
# but put special method names like __add__ at the top of list, so
# that their id's fit into a byte
if ident == "":
# Sort empty qstr above all still
order = -200000
elif ident == "__dir__":
# Put __dir__ after empty qstr for builtin dir() to work
order = -190000
elif ident.startswith("__"):
order -= 100000
qstrs[ident] = (order, ident, qstr)
if not qcfgs and qstrs:
sys.stderr.write("ERROR: Empty preprocessor output - check for errors above\n")
sys.exit(1)
return qcfgs, qstrs, i18ns
def make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr):
qbytes = bytes_cons(qstr, 'utf8')
qlen = len(qbytes)
qhash = compute_hash(qbytes, cfg_bytes_hash)
if all(32 <= ord(c) <= 126 and c != '\\' and c != '"' for c in qstr):
# qstr is all printable ASCII so render it as-is (for easier debugging)
qdata = qstr
else:
# qstr contains non-printable codes so render entire thing as hex pairs
qdata = ''.join(('\\x%02x' % b) for b in qbytes)
if qlen >= (1 << (8 * cfg_bytes_len)):
print('qstr is too long:', qstr)
assert False
qlen_str = ('\\x%02x' * cfg_bytes_len) % tuple(((qlen >> (8 * i)) & 0xff) for i in range(cfg_bytes_len))
qhash_str = ('\\x%02x' * cfg_bytes_hash) % tuple(((qhash >> (8 * i)) & 0xff) for i in range(cfg_bytes_hash))
return '(const byte*)"%s%s" "%s"' % (qhash_str, qlen_str, qdata)
def print_qstr_data(encoding_table, qcfgs, qstrs, i18ns):
# get config variables
cfg_bytes_len = int(qcfgs['BYTES_IN_LEN'])
cfg_bytes_hash = int(qcfgs['BYTES_IN_HASH'])
# print out the starter of the generated C header file
print('// This file was automatically generated by makeqstrdata.py')
print('')
# add NULL qstr with no hash or data
print('QDEF(MP_QSTR_NULL, (const byte*)"%s%s" "")' % ('\\x00' * cfg_bytes_hash, '\\x00' * cfg_bytes_len))
total_qstr_size = 0
total_qstr_compressed_size = 0
# go through each qstr and print it out
for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
qbytes = make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr)
print('QDEF(MP_QSTR_%s, %s)' % (ident, qbytes))
total_qstr_size += len(qstr)
total_text_size = 0
total_text_compressed_size = 0
max_translation_encoded_length = max(len(translation.encode("utf-8")) for original, translation in i18ns)
encoded_length_bits = max_translation_encoded_length.bit_length()
for original, translation in i18ns:
translation_encoded = translation.encode("utf-8")
compressed = compress(encoding_table, translation, encoded_length_bits, len(translation_encoded))
total_text_compressed_size += len(compressed)
decompressed = decompress(encoding_table, compressed, encoded_length_bits)
assert decompressed == translation
for c in C_ESCAPES:
decompressed = decompressed.replace(c, C_ESCAPES[c])
print("TRANSLATION(\"{}\", {}) // {}".format(original, ", ".join(["{:d}".format(x) for x in compressed]), decompressed))
total_text_size += len(translation.encode("utf-8"))
print()
print("// {} bytes worth of qstr".format(total_qstr_size))
print("// {} bytes worth of translations".format(total_text_size))
print("// {} bytes worth of translations compressed".format(total_text_compressed_size))
print("// {} bytes saved".format(total_text_size - total_text_compressed_size))
def print_qstr_enums(qstrs):
# print out the starter of the generated C header file
print('// This file was automatically generated by makeqstrdata.py')
print('')
# add NULL qstr with no hash or data
print('QENUM(MP_QSTR_NULL)')
# go through each qstr and print it out
for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
print('QENUM(MP_QSTR_%s)' % (ident,))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Process QSTR definitions into headers for compilation')
parser.add_argument('infiles', metavar='N', type=str, nargs='+',
help='an integer for the accumulator')
parser.add_argument('--translation', default=None, type=str,
help='translations for i18n() items')
parser.add_argument('--compression_filename', default=None, type=str,
help='header for compression info')
args = parser.parse_args()
qcfgs, qstrs, i18ns = parse_input_headers(args.infiles)
if args.translation:
i18ns = sorted(i18ns)
translations = translate(args.translation, i18ns)
encoding_table = compute_huffman_coding(translations, qstrs, args.compression_filename)
print_qstr_data(encoding_table, qcfgs, qstrs, translations)
else:
print_qstr_enums(qstrs)
|
the-stack_106_17964
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8340 import *
class agilent8340A(agilentBase8340):
"Agilent 8340A IVI RF sweep generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8340A')
super(agilent8340A, self).__init__(*args, **kwargs)
self._frequency_low = 10e6
self._frequency_high = 26.5e9
|
the-stack_106_17965
|
from subprocess import Popen, PIPE
import subprocess
import os
from datetime import datetime
import time
podpackages = [
'AWSCore.podspec',
'AWSAPIGateway.podspec',
'AWSAutoScaling.podspec',
'AWSCloudWatch.podspec',
'AWSCognito.podspec',
'AWSCognitoIdentityProvider.podspec',
'AWSCognitoSync.podspec',
'AWSCognitoAuth.podspec',
'AWSDynamoDB.podspec',
'AWSEC2.podspec',
'AWSElasticLoadBalancing.podspec',
'AWSIoT.podspec',
'AWSKinesis.podspec',
'AWSKinesisVideo.podspec',
'AWSKinesisVideoArchivedMedia.podspec',
'AWSKMS.podspec',
'AWSLambda.podspec',
'AWSLogs.podspec',
'AWSMachineLearning.podspec',
'AWSMobileAnalytics.podspec',
'AWSPinpoint.podspec',
'AWSS3.podspec',
'AWSSES.podspec',
'AWSSimpleDB.podspec',
'AWSSNS.podspec',
'AWSSQS.podspec',
'AWSLex.podspec',
'AWSPolly.podspec',
'AWSRekognition.podspec',
'AWSTranslate.podspec',
'AWSComprehend.podspec',
'AWSTranscribe.podspec',
'AWSAuthCore.podspec',
'AWSUserPoolsSignIn.podspec',
'AWSFacebookSignIn.podspec',
'AWSGoogleSignIn.podspec',
'AWSAuthUI.podspec',
'AWSAuth.podspec',
'AWSMobileClient.podspec',
'AWSiOSSDKv2.podspec',
]
print (str(datetime.now()) + ': publishing cocoapods ...')
for package in podpackages:
print (str(datetime.now())+': publishing ' + package + ' ...')
process = Popen(["pod", 'trunk','push',package,'--allow-warnings'], stdout= PIPE, stderr= PIPE)
wait_times = 0 ;
while True:
try:
(output, err) = process.communicate(timeout = 10)
except subprocess.TimeoutExpired:
wait_times = wait_times + 1;
#tell circleci I am still alive, don't kill me
if wait_times % 30 == 0 :
print(str(datetime.now())+ ": I am still alive")
if wait_times > 600 :
print(str(datetime.now())+ ": time out")
quit(1)
continue
break
exit_code = process.wait()
if exit_code != 0 :
if "Unable to accept duplicate entry for:" in str(output):
print (str(datetime.now()) +": " + package +" is already published")
else:
print(output)
print(err, exit_code )
print(str(datetime.now()) + " Failed to publish " + package)
quit(exit_code);
print (str(datetime.now())+': published ' + package)
|
the-stack_106_17966
|
# Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import requests
from substra.sdk import exceptions, assets, utils
logger = logging.getLogger(__name__)
DEFAULT_RETRY_TIMEOUT = 5 * 60
class Client():
"""REST Client to communicate with Substra server."""
def __init__(self, config=None):
self._headers = {}
self._default_kwargs = {}
self._base_url = None
if config:
self.set_config(config)
def set_config(self, config):
"""Reset internal attributes from config."""
# get default requests keyword arguments from config
kwargs = {}
if config['auth']:
user, password = config['auth']['user'], config['auth']['password']
kwargs['auth'] = (user, password)
if config['insecure']:
kwargs['verify'] = False
# get default HTTP headers from config
headers = {'Accept': 'application/json;version={}'.format(config['version'])}
self._headers = headers
self._default_kwargs = kwargs
self._base_url = config['url'][:-1] if config['url'].endswith('/') else config['url']
def _request(self, request_name, url, **request_kwargs):
"""Base request helper."""
if request_name == 'get':
fn = requests.get
elif request_name == 'post':
fn = requests.post
else:
raise NotImplementedError
# override default request arguments with input arguments
kwargs = dict(self._default_kwargs)
kwargs.update(request_kwargs)
# do HTTP request and catch generic exceptions
try:
r = fn(url, headers=self._headers, **kwargs)
r.raise_for_status()
except requests.exceptions.ConnectionError as e:
raise exceptions.ConnectionError.from_request_exception(e)
except requests.exceptions.Timeout as e:
raise exceptions.Timeout.from_request_exception(e)
except requests.exceptions.HTTPError as e:
logger.error(f"Requests error status {e.response.status_code}: {e.response.text}")
if e.response.status_code == 400:
raise exceptions.InvalidRequest.from_request_exception(e)
if e.response.status_code == 401:
raise exceptions.AuthenticationError.from_request_exception(e)
if e.response.status_code == 403:
raise exceptions.AuthorizationError.from_request_exception(e)
if e.response.status_code == 404:
raise exceptions.NotFound.from_request_exception(e)
if e.response.status_code == 408:
raise exceptions.RequestTimeout.from_request_exception(e)
if e.response.status_code == 409:
raise exceptions.AlreadyExists.from_request_exception(e)
if e.response.status_code == 500:
raise exceptions.InternalServerError.from_request_exception(e)
raise exceptions.HTTPError.from_request_exception(e)
return r
def request(self, request_name, asset_name, path=None, json_response=True,
**request_kwargs):
"""Base request."""
path = path or ''
url = f"{self._base_url}/{assets.to_server_name(asset_name)}/{path}"
if not url.endswith("/"):
url = url + "/" # server requires a suffix /
response = self._request(
request_name,
url,
**request_kwargs,
)
if not json_response:
return response
try:
return response.json()
except ValueError as e:
msg = f"Cannot parse response to JSON: {e}"
raise exceptions.InvalidResponse(response, msg)
def get(self, name, key):
"""Get asset by key."""
return self.request(
'get',
name,
path=f"{key}",
)
def list(self, name, filters=None):
"""List assets by filters."""
request_kwargs = {}
if filters:
request_kwargs['params'] = utils.parse_filters(filters)
items = self.request(
'get',
name,
**request_kwargs,
)
# when filtering 'complex' assets the server responds with a list per filter
# item, these list of list must then be flatten
if isinstance(items, list) and all([isinstance(i, list) for i in items]):
items = utils.flatten(items)
return items
def add(self, name, retry_timeout=DEFAULT_RETRY_TIMEOUT, exist_ok=False,
**request_kwargs):
"""Add asset.
In case of timeout, block till resource is created.
If `exist_ok` is true, `AlreadyExists` exceptions will be ignored and the
existing asset will be returned.
"""
try:
return self.request(
'post',
name,
**request_kwargs,
)
except exceptions.RequestTimeout as e:
logger.warning(
'Request timeout, blocking till asset is created')
key = e.pkhash
is_many = isinstance(key, list) # timeout on many objects is not handled
if not retry_timeout or is_many:
raise e
retry = utils.retry_on_exception(
exceptions=(exceptions.NotFound),
timeout=float(retry_timeout),
)
return retry(self.get)(name, key)
except exceptions.AlreadyExists as e:
if not exist_ok:
raise
key = e.pkhash
is_many = isinstance(key, list)
if is_many:
logger.warning("Many assets not compatible with 'exist_ok' option")
raise
logger.warning(f"{name} already exists: key='{key}'")
return self.get(name, key)
def get_data(self, address, **request_kwargs):
"""Get asset data."""
return self._request(
'get',
address,
**request_kwargs,
)
|
the-stack_106_17968
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# %% 加载基础库
import configparser
import os
import sys
cur_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
#conf_path = os.path.join(cur_dir, 'config.ini')
conf_path = os.path.abspath(r'F:\gitee\knowledge\Python\yys_script\src\conf\config.ini')
# %% 打开config文件,要以utf-8的方式打开,否则中文乱码
config = configparser.RawConfigParser()
config.read(conf_path, encoding='utf-8')
# %% sections options
sections = config.sections() # [str]
general_options = config.options('general') # [str]
print(sections, general_options)
# %% 测试获取值
print(config.get('general', 'title'))
print(config.getint('yuhun', 'players'))
# %% 测试设置值
config.set('yuhun', 'players', '3')
print(config.getint('yuhun', 'players'))
|
the-stack_106_17969
|
import argparse
import math
import os
import subprocess
import time
import traceback
from datetime import datetime
import tensorflow as tf
from datasets.datafeeder import DataFeeder
from hparams import hparams, hparams_debug_string
from models import create_model
from text import sequence_to_text
from util import audio, infolog, plot, ValueWindow
log = infolog.log
def get_git_commit():
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]
log('Git commit: %s' % commit)
return commit
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('linear_outputs', model.linear_outputs)
tf.summary.histogram('linear_targets', model.linear_targets)
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('loss_mel', model.mel_loss)
tf.summary.scalar('loss_linear', model.linear_loss)
tf.summary.scalar('learning_rate', model.learning_rate)
tf.summary.scalar('loss', model.loss)
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
return tf.summary.merge_all()
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def train(log_dir, args):
commit = get_git_commit() if args.git else 'None'
checkpoint_path = os.path.join(log_dir, 'model.ckpt')
input_path = os.path.join(args.base_dir, args.input)
log('Checkpoint path: %s' % checkpoint_path)
log('Loading training data from: %s' % input_path)
log('Using model: %s' % args.model)
log(hparams_debug_string())
# Set up DataFeeder:
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
feeder = DataFeeder(coord, input_path, hparams)
# Set up model:
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(args.model, hparams)
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.linear_targets)
model.add_loss()
model.add_optimizer(global_step)
stats = add_stats(model)
# Bookkeeping:
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)
# Train!
with tf.Session() as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
if args.restore_step:
# Restore from a checkpoint if the user requested it.
restore_path = '%s-%d' % (checkpoint_path, args.restore_step)
saver.restore(sess, restore_path)
log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
else:
log('Starting new training run at commit: %s' % commit, slack=True)
feeder.start_in_session(sess)
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss, model.optimize])
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (
step, time_window.average, loss, loss_window.average)
log(message, slack=(step % args.checkpoint_interval == 0))
if loss > 100 or math.isnan(loss):
log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)
raise Exception('Loss Exploded')
if step % args.summary_interval == 0:
log('Writing summary at step: %d' % step)
summary_writer.add_summary(sess.run(stats), step)
if step % args.checkpoint_interval == 0:
log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
log('Saving audio and alignment...')
input_seq, spectrogram, alignment = sess.run([
model.inputs[0], model.linear_outputs[0], model.alignments[0]])
waveform = audio.inv_spectrogram(spectrogram.T)
audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step))
plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step),
info='%s, %s, %s, step=%d, loss=%.5f' % (
args.model, commit, time_string(), step, loss))
log('Input: %s' % sequence_to_text(input_seq))
except Exception as e:
log('Exiting due to exception: %s' % e, slack=True)
traceback.print_exc()
coord.request_stop(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
parser.add_argument('--input', default='training/train.txt')
parser.add_argument('--model', default='tacotron')
parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
parser.add_argument('--summary_interval', type=int, default=100,
help='Steps between running summary ops.')
parser.add_argument('--checkpoint_interval', type=int, default=1000,
help='Steps between writing checkpoints.')
parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
hparams.parse(args.hparams)
train(log_dir, args)
if __name__ == '__main__':
main()
|
the-stack_106_17970
|
import gomaps, time
if __name__ == "__main__":
t0 = time.process_time()
results = gomaps.maps_search("Tops Diner, NJ")
print(results)
values = results[0].get_values()
for val in values.values():
print(val)
assert val != None and val != {}, "Gomaps results missing values!"
results = gomaps.maps_search("Tops Diner, NJ", fields=["coords", "rating"])
print(results.values)
assert len(results.values) == 2, "Gomaps fields feature failed!"
t1 = time.process_time()
total = t1 - t0
print(f"\nTimestamp 1: {t0} secs\nTimestamp 2: {t1} secs")
print("Module Time Elapsed:", total, "seconds")
|
the-stack_106_17971
|
# This program takes an image using L1_camera, applies filters with openCV, and returns
# a color target if located in the image. The target parameters are (x,y,radius).
# This program requires that opencv2 is installed for python3.
"""
***************************************************************************
HEAVILY MODIFIED FROM ORIGINAL
***************************************************************************
can be used to find HSV
"""
# Import internal programs:
import L1_camera as cam
# Import external programs:
import cv2 # computer vision
import numpy as np # for handling matrices
import time # for keeping time
# Define global parameters
color_range = ((0,100,180),(25,145,255)) # This color range defines the color target
yote = 1
cont = 0
prev = None
def autoYeet(color_range,yote = 1,cont = 0,prev=None):
h1,h2,s1,s2,v1,v2 = 0,0,0,0,0,0
print('----------------')
print('curr vals: ',color_range)
print('----------------')
ans = prev
if not cont:
print('val to test (hmin,smin,vmin,hmax,smax,vmax): ')
ans = input()
cont = 1
print('up or down?(u,d,n,back): ')
tmp = input()
if tmp == 'back':
cont = 0
return color_range,yote,cont,ans
if tmp == 'yeet' or ans =='yeet':
yote = 0
return color_range, yote,cont,ans
if tmp =='n' or tmp =='':
return color_range,yote,cont,ans
h1 = color_range[0][0]
s1 = color_range[0][1]
v1 = color_range[0][2]
h2 = color_range[1][0]
s2 = color_range[1][1]
v2 = color_range[1][2]
# what do you wanna tell joe byron right now?
# wassup baby, take me out to dinner *wink*
# AYO?!
if ans == 'hmin':
if tmp == 'u':
h1 = h1 + 10
elif tmp =='d':
h1 = h1 - 10
if ans == 'smin':
if tmp == 'u':
s1 = s1 + 10
elif tmp =='d':
s1 = s1 - 10
if ans == 'vmin':
if tmp == 'u':
v1 = v1 + 10
elif tmp =='d':
v1 = v1 - 10
if ans == 'hmax':
if tmp == 'u':
h2 = h2 + 10
elif tmp =='d':
h2 = h2 - 10
if ans == 'smax':
if tmp == 'u':
s2 = s2 + 10
elif tmp =='d':
s2 = s2 - 10
if ans == 'vmax':
if tmp == 'u':
v2 = v2 + 10
elif tmp =='d':
v2 = v2 - 10
color_range = ((h1,s1,v1),(h2,s2,v2))
print("new vals: ",color_range)
return color_range,yote,cont,ans
def testing(color_range,yote):
print('----------------')
print('curr vals: ',color_range)
print('----------------')
h1 = input('h min: ')
if h1 == 'yeet':
yote = 0
return color_range,yote
if h1 == '':
return color_range,yote
else:
h1 = int(h1)
s1 = int(input('s min: '))
v1 = int(input('v min: '))
print()
h2 = int(input('h max: '))
s2 = int(input('s max: '))
v2 = int(input('v max: '))
return ((h1,s1,v1),(h2,s2,v2)),yote
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))): # function defaults to open range if no range is provided
image = cam.newImage()
if filter == 'RGB':
image_hsv = image.copy()
else:
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # convert to hsv colorspace
thresh = cv2.inRange(image_hsv, color_range[0], color_range[1])
kernel = np.ones((5, 5), np.uint8) # apply a blur function
mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) # Apply blur
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # Apply blur 2nd iteration
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2] # generates number of contiguous "1" pixels
'''********* Used for testing ***********'''
#cv2.imshow('mask',mask)
#cv2.imshow('orig', image)
#cv2.waitKey(20)
'''**************************************'''
if len(cnts) > 0: # begin processing if there are "1" pixels discovered
c = max(cnts, key=cv2.contourArea) # return the largest target area
((x, y), radius) = cv2.minEnclosingCircle(c) # get properties of circle around shape
targ = np.array([int(x), int(y), # return x, y, radius, of target
round(radius, 1)])
return targ
else:
return np.array([None, None, 0])
def getAngle(x): # check deviation of target from center
if x is not None:
ratio = x / 240 # divide by pixels in width
offset = -2*(ratio - 0.5) # offset. Now, positive = right, negative = left
offset_x = round(offset,2) # perform rounding
return (offset_x)
else:
return None
# THIS SECTION ONLY RUNS IF THE PROGRAM IS CALLED DIRECTLY
if __name__ == "__main__":
while True:
'''if yote:
print(color_range[0][0])
color_range,yote,cont,prev=autoYeet(color_range,yote,cont,prev)
#color_range,yote = testing(color_range,yote)'''
target = colorTarget(color_range) # generate a target
print(target)
x = target[0]
if x is None:
print("no target located.")
else:
x_range = getAngle(x)
print("Target x location: ", x_range)
time.sleep(0.1) # short delay
|
the-stack_106_17972
|
from dataclasses import dataclass, field
from typing import List
from xsdata.models.datatype import XmlPeriod
__NAMESPACE__ = "NISTSchema-SV-IV-list-gDay-maxLength-1-NS"
@dataclass
class NistschemaSvIvListGDayMaxLength1:
class Meta:
name = "NISTSchema-SV-IV-list-gDay-maxLength-1"
namespace = "NISTSchema-SV-IV-list-gDay-maxLength-1-NS"
value: List[XmlPeriod] = field(
default_factory=list,
metadata={
"max_length": 5,
"tokens": True,
}
)
|
the-stack_106_17973
|
#!/usr/bin/env python3
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
__copyright__ = ('Copyright Amazon.com, Inc. or its affiliates. '
'All Rights Reserved.')
__version__ = '2.7.1'
__license__ = 'MIT-0'
__author__ = 'Akihiro Nakajima'
__url__ = 'https://github.com/aws-samples/siem-on-amazon-opensearch-service'
import os
from aws_cdk import core
from mysiem.aes_siem_stack import MyAesSiemStack
app = core.App()
MyAesSiemStack(app, "aes-siem",
description=f'SIEM on Amazon OpenSearch Service v{__version__}',
env=core.Environment(
account=os.environ['CDK_DEFAULT_ACCOUNT'],
region=os.environ['CDK_DEFAULT_REGION']))
app.synth()
|
the-stack_106_17974
|
from django.db import models
from Crypto.Util import number
from django.contrib.auth.models import AbstractUser
from django.utils import timezone
import random
# Create your models here.
def get_semester():
now = timezone.now()
if now.month in (9,10,11,12,1,2):
# Fall semester
return '{}-{}-1'.format(now.year, now.year+1)
else:
return '{}-{}-2'.format(now.year-1, now.year)
class TeachingClass(models.Model):
classno = models.CharField(max_length=10, unique=True)
school = models.CharField('学院', max_length=10, blank=True)
def __str__(self):
return self.classno
class PublicKey(models.Model):
a = models.CharField(max_length=1500, blank=True)
b = models.CharField(max_length=1500, blank=True)
c = models.CharField(max_length=1500, blank=True)
n = models.CharField(max_length=1500, blank=True)
g = models.CharField(max_length=1500, blank=True)
h = models.CharField(max_length=1500, blank=True)
p = models.CharField(max_length=1500, blank=True)
le = 1026
ls = 4096
ln = 2048
teaching_class = models.ForeignKey(TeachingClass, models.CASCADE)
semester = models.CharField("学期", max_length=20)
def get_int(self, name):
if isinstance(name, (list, tuple)):
ret = []
for i in name:
if i in ('a', 'b', 'c', 'n', 'g', 'h', 'p'):
ret.append(int(self.__getattribute__(i)))
return ret
if name in ('a', 'b', 'c', 'n', 'g', 'h', 'p'):
return int(self.__getattribute__(name))
else:
return None
def init_key(self):
p = number.getStrongPrime(self.ln//2)
q = number.getStrongPrime(self.ln//2)
n = p*q
randlis = [random.randrange(0, 1<<1024) for _ in range(4)]
rand2lis = list(map(lambda x: pow(x, 2, n) ,randlis))
h = rand2lis[3]
r = random.randrange(100)
g = pow(h, r, n)
self.n = str(n)
self.a = str(rand2lis[0])
self.b = str(rand2lis[1])
self.c = str(rand2lis[2])
self.h = str(rand2lis[3])
self.p = str(p)
self.g = str(g)
@classmethod
def create(cls, teaching_class, semester=None):
if not semester:
semester = get_semester()
obj = cls(
teaching_class = teaching_class,
semester = semester
)
obj.init_key()
return obj
def __str__(self):
return '{}_{}'.format(self.teaching_class, self.semester)
def renew(self):
if get_semester() == self.semester:
return None
return self.create(self.teaching_class)
class AipUser(AbstractUser):
teaching_class = models.ForeignKey(
TeachingClass, on_delete=models.SET_NULL,
blank=True, null=True
)
is_signed = models.BooleanField(default=False)
|
the-stack_106_17977
|
# SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
import unittest
import unittest.mock
import metrics_pb2
from common import metrics_export
from orc8r.protos import metricsd_pb2
from prometheus_client import (
CollectorRegistry,
Counter,
Gauge,
Histogram,
Summary,
)
class Service303MetricTests(unittest.TestCase):
"""
Tests for the Service303 metrics interface
"""
def setUp(self):
self.registry = CollectorRegistry()
self.maxDiff = None
def test_counter(self):
"""Test that we can track counters in Service303"""
# Add a counter with a label to the regisry
c = Counter(
'process_max_fds', 'A counter', ['result'],
registry=self.registry,
)
# Create two series for value1 and value2
c.labels('success').inc(1.23)
c.labels('failure').inc(2.34)
# Build proto outputs
counter1 = metrics_pb2.Counter(value=1.23)
counter2 = metrics_pb2.Counter(value=2.34)
metric1 = metrics_pb2.Metric(
counter=counter1,
timestamp_ms=1234000,
)
metric2 = metrics_pb2.Metric(
counter=counter2,
timestamp_ms=1234000,
)
family = metrics_pb2.MetricFamily(
name=str(metricsd_pb2.process_max_fds),
type=metrics_pb2.COUNTER,
)
metric1.label.add(
name=str(metricsd_pb2.result),
value='success',
)
metric2.label.add(
name=str(metricsd_pb2.result),
value='failure',
)
family.metric.extend([metric1, metric2])
with unittest.mock.patch('time.time') as mock_time:
mock_time.side_effect = lambda: 1234
self.assertCountEqual(
list(metrics_export.get_metrics(self.registry))[0].metric,
family.metric,
)
def test_gauge(self):
"""Test that we can track gauges in Service303"""
# Add a gauge with a label to the regisry
c = Gauge(
'process_max_fds', 'A gauge', ['result'],
registry=self.registry,
)
# Create two series for value1 and value2
c.labels('success').inc(1.23)
c.labels('failure').inc(2.34)
# Build proto outputs
gauge1 = metrics_pb2.Gauge(value=1.23)
gauge2 = metrics_pb2.Gauge(value=2.34)
metric1 = metrics_pb2.Metric(
gauge=gauge1,
timestamp_ms=1234000,
)
metric2 = metrics_pb2.Metric(
gauge=gauge2,
timestamp_ms=1234000,
)
family = metrics_pb2.MetricFamily(
name=str(metricsd_pb2.process_max_fds),
type=metrics_pb2.GAUGE,
)
metric1.label.add(
name=str(metricsd_pb2.result),
value='success',
)
metric2.label.add(
name=str(metricsd_pb2.result),
value='failure',
)
family.metric.extend([metric1, metric2])
with unittest.mock.patch('time.time') as mock_time:
mock_time.side_effect = lambda: 1234
self.assertCountEqual(
list(metrics_export.get_metrics(self.registry))[0].metric,
family.metric,
)
def test_summary(self):
"""Test that we can track summaries in Service303"""
# Add a summary with a label to the regisry
c = Summary(
'process_max_fds', 'A summary', [
'result',
], registry=self.registry,
)
c.labels('success').observe(1.23)
c.labels('failure').observe(2.34)
# Build proto outputs
summary1 = metrics_pb2.Summary(sample_count=1, sample_sum=1.23)
summary2 = metrics_pb2.Summary(sample_count=1, sample_sum=2.34)
metric1 = metrics_pb2.Metric(
summary=summary1,
timestamp_ms=1234000,
)
metric2 = metrics_pb2.Metric(
summary=summary2,
timestamp_ms=1234000,
)
family = metrics_pb2.MetricFamily(
name=str(metricsd_pb2.process_max_fds),
type=metrics_pb2.SUMMARY,
)
metric1.label.add(
name=str(metricsd_pb2.result),
value='success',
)
metric2.label.add(
name=str(metricsd_pb2.result),
value='failure',
)
family.metric.extend([metric1, metric2])
with unittest.mock.patch('time.time') as mock_time:
mock_time.side_effect = lambda: 1234
self.assertCountEqual(
list(metrics_export.get_metrics(self.registry))[0].metric,
family.metric,
)
def test_histogram(self):
"""Test that we can track histogram in Service303"""
# Add a histogram with a label to the regisry
c = Histogram(
'process_max_fds', 'A summary', ['result'],
registry=self.registry, buckets=[0, 2, float('inf')],
)
c.labels('success').observe(1.23)
c.labels('failure').observe(2.34)
# Build proto outputs
histogram1 = metrics_pb2.Histogram(sample_count=1, sample_sum=1.23)
histogram1.bucket.add(upper_bound=0, cumulative_count=0)
histogram1.bucket.add(upper_bound=2, cumulative_count=1)
histogram1.bucket.add(upper_bound=float('inf'), cumulative_count=1)
histogram2 = metrics_pb2.Histogram(sample_count=1, sample_sum=2.34)
histogram2.bucket.add(upper_bound=0, cumulative_count=0)
histogram2.bucket.add(upper_bound=2, cumulative_count=0)
histogram2.bucket.add(upper_bound=float('inf'), cumulative_count=1)
metric1 = metrics_pb2.Metric(
histogram=histogram1,
timestamp_ms=1234000,
)
metric2 = metrics_pb2.Metric(
histogram=histogram2,
timestamp_ms=1234000,
)
family = metrics_pb2.MetricFamily(
name=str(metricsd_pb2.process_max_fds),
type=metrics_pb2.HISTOGRAM,
)
metric1.label.add(
name=str(metricsd_pb2.result),
value='success',
)
metric2.label.add(
name=str(metricsd_pb2.result),
value='failure',
)
family.metric.extend([metric1, metric2])
with unittest.mock.patch('time.time') as mock_time:
mock_time.side_effect = lambda: 1234
self.assertCountEqual(
list(metrics_export.get_metrics(self.registry))[0].metric,
family.metric,
)
def test_converted_enums(self):
""" Test that metric names and labels are auto converted """
# enum values (from metricsd.proto):
# mme_new_association => 500, result => 0
c = Counter(
'mme_new_association', 'A counter', ['result'],
registry=self.registry,
)
c.labels('success').inc(1.23)
metric_family = list(metrics_export.get_metrics(self.registry))[0]
self.assertEqual(
metric_family.name,
str(metricsd_pb2.mme_new_association),
)
metric_labels = metric_family.metric[0].label
# Order not guaranteed=
self.assertEqual(metric_labels[0].name, str(metricsd_pb2.result))
self.assertEqual(metric_labels[0].value, 'success')
if __name__ == "__main__":
unittest.main()
|
the-stack_106_17978
|
#!/usr/bin/env python
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import os
from botocore.vendored import requests
from awscli.compat import six
from awscli.compat import compat_open
logger = logging.getLogger(__name__)
# These are special cased arguments that do _not_ get the
# special param file processing. This is typically because it
# refers to an actual URI of some sort and we don't want to actually
# download the content (i.e TemplateURL in cloudformation).
PARAMFILE_DISABLED = set([
'apigateway.put-integration.uri',
'cloudformation.create-stack.template-url',
'cloudformation.update-stack.template-url',
'cloudformation.validate-template.template-url',
'cloudformation.estimate-template-cost.template-url',
'cloudformation.create-stack.stack-policy-url',
'cloudformation.update-stack.stack-policy-url',
'cloudformation.set-stack-policy.stack-policy-url',
'cloudformation.update-stack.stack-policy-during-update-url',
# We will want to change the event name to ``s3`` as opposed to
# custom in the near future along with ``s3`` to ``s3api``.
'custom.cp.website-redirect',
'custom.mv.website-redirect',
'custom.sync.website-redirect',
'iam.create-open-id-connect-provider.url',
'machinelearning.predict.predict-endpoint',
'sqs.add-permission.queue-url',
'sqs.change-message-visibility.queue-url',
'sqs.change-message-visibility-batch.queue-url',
'sqs.delete-message.queue-url',
'sqs.delete-message-batch.queue-url',
'sqs.delete-queue.queue-url',
'sqs.get-queue-attributes.queue-url',
'sqs.list-dead-letter-source-queues.queue-url',
'sqs.receive-message.queue-url',
'sqs.remove-permission.queue-url',
'sqs.send-message.queue-url',
'sqs.send-message-batch.queue-url',
'sqs.set-queue-attributes.queue-url',
'sqs.purge-queue.queue-url',
's3.copy-object.website-redirect-location',
's3.create-multipart-upload.website-redirect-location',
's3.put-object.website-redirect-location',
# Double check that this has been renamed!
'sns.subscribe.notification-endpoint',
])
class ResourceLoadingError(Exception):
pass
def get_paramfile(path):
"""Load parameter based on a resource URI.
It is possible to pass parameters to operations by referring
to files or URI's. If such a reference is detected, this
function attempts to retrieve the data from the file or URI
and returns it. If there are any errors or if the ``path``
does not appear to refer to a file or URI, a ``None`` is
returned.
:type path: str
:param path: The resource URI, e.g. file://foo.txt. This value
may also be a non resource URI, in which case ``None`` is returned.
:return: The loaded value associated with the resource URI.
If the provided ``path`` is not a resource URI, then a
value of ``None`` is returned.
"""
data = None
if isinstance(path, six.string_types):
for prefix, function_spec in PREFIX_MAP.items():
if path.startswith(prefix):
function, kwargs = function_spec
data = function(prefix, path, **kwargs)
return data
def get_file(prefix, path, mode):
file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):]))
try:
with compat_open(file_path, mode) as f:
return f.read()
except UnicodeDecodeError:
raise ResourceLoadingError(
'Unable to load paramfile (%s), text contents could '
'not be decoded. If this is a binary file, please use the '
'fileb:// prefix instead of the file:// prefix.' % file_path)
except (OSError, IOError) as e:
raise ResourceLoadingError('Unable to load paramfile %s: %s' % (
path, e))
def get_uri(prefix, uri):
try:
r = requests.get(uri)
if r.status_code == 200:
return r.text
else:
raise ResourceLoadingError(
"received non 200 status code of %s" % (
r.status_code))
except Exception as e:
raise ResourceLoadingError('Unable to retrieve %s: %s' % (uri, e))
PREFIX_MAP = {
'file://': (get_file, {'mode': 'r'}),
'fileb://': (get_file, {'mode': 'rb'}),
'http://': (get_uri, {}),
'https://': (get_uri, {}),
}
|
the-stack_106_17979
|
# !/usr/bin/env python3
# -*- config: utf-8 -*-
# Ваиант 10. В списке, состоящем из вещественных элементов, вычислить:
# 1) номер минимального по модулю элемента списка;
# 2) сумму модулей элементов списка, расположенных после первого отрицательного элемента.
# Сжать список, удалив из него все элементы, величина которых находится в интервале [а, b].
# Освободившиеся в конце списка элементы заполнить нулями.
import sys
if __name__ == '__main__':
a = list(map(float, input().split()))
if not a:
print('Заданный список пуст', file=sys.stderr)
exit(1)
for i, j in enumerate(a):
if j < 0:
a_den = i
break
new_a = a[a_den+1:]
b = [abs(i) for i in new_a]
s = sum(b)
print(s)
|
the-stack_106_17980
|
'''
This module implements :class:`AnalogSignal`, an array of analog signals.
:class:`AnalogSignal` inherits from :class:`basesignal.BaseSignal` which
derives from :class:`BaseNeo`, and from :class:`quantites.Quantity`which
in turn inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
import logging
try:
import scipy.signal
except ImportError as err:
HAVE_SCIPY = False
else:
HAVE_SCIPY = True
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
from neo.core.dataobject import DataObject
from copy import copy, deepcopy
from neo.core.basesignal import BaseSignal
logger = logging.getLogger("Neo")
def _get_sampling_rate(sampling_rate, sampling_period):
'''
Gets the sampling_rate from either the sampling_period or the
sampling_rate, or makes sure they match if both are specified
'''
if sampling_period is None:
if sampling_rate is None:
raise ValueError("You must provide either the sampling rate or " + "sampling period")
elif sampling_rate is None:
sampling_rate = 1.0 / sampling_period
elif sampling_period != 1.0 / sampling_rate:
raise ValueError('The sampling_rate has to be 1/sampling_period')
if not hasattr(sampling_rate, 'units'):
raise TypeError("Sampling rate/sampling period must have units")
return sampling_rate
def _new_AnalogSignalArray(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, annotations=None,
channel_index=None, segment=None):
'''
A function to map AnalogSignal.__new__ to function that
does not do the unit checking. This is needed for pickle to work.
'''
obj = cls(signal=signal, units=units, dtype=dtype, copy=copy,
t_start=t_start, sampling_rate=sampling_rate,
sampling_period=sampling_period, name=name,
file_origin=file_origin, description=description,
array_annotations=array_annotations, **annotations)
obj.channel_index = channel_index
obj.segment = segment
return obj
class AnalogSignal(BaseSignal):
'''
Array of one or more continuous analog signals.
A representation of several continuous, analog signals that
have the same duration, sampling rate and start time.
Basically, it is a 2D array: dim 0 is time, dim 1 is
channel index
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*Usage*::
>>> from neo.core import AnalogSignal
>>> import quantities as pq
>>>
>>> sigarr = AnalogSignal([[1, 2, 3], [4, 5, 6]], units='V',
... sampling_rate=1*pq.Hz)
>>>
>>> sigarr
<AnalogSignal(array([[1, 2, 3],
[4, 5, 6]]) * mV, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)>
>>> sigarr[:,1]
<AnalogSignal(array([2, 5]) * V, [0.0 s, 2.0 s],
sampling rate: 1.0 Hz)>
>>> sigarr[1, 1]
array(5) * V
*Required attributes/properties*:
:signal: (quantity array 2D, numpy array 2D, or list (data, channel))
The data itself.
:units: (quantity units) Required if the signal is a list or NumPy
array, not if it is a :class:`Quantity`
:t_start: (quantity scalar) Time when signal begins
:sampling_rate: *or* **sampling_period** (quantity scalar) Number of
samples per unit time or
interval between two samples.
If both are specified, they are
checked for consistency.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
:array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
for all data points
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`quantity scalar`)
:duration: (Quantity) Signal duration, read-only.
(size * :attr:`sampling_period`)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(:attr:`t_start` + :attr:`duration`)
:times: (quantity 1D) The time points of each sample of the signal,
read-only.
(:attr:`t_start` + arange(:attr:`shape`[0])/:attr:`sampling_rate`)
:channel_index:
(deprecated) access to the channel_index attribute of the principal ChannelIndex
associated with this signal.
*Slicing*:
:class:`AnalogSignal` objects can be sliced. When taking a single
column (dimension 0, e.g. [0, :]) or a single element,
a :class:`~quantities.Quantity` is returned.
Otherwise an :class:`AnalogSignal` (actually a view) is
returned, with the same metadata, except that :attr:`t_start`
is changed if the start index along dimension 1 is greater than 1.
Note that slicing an :class:`AnalogSignal` may give a different
result to slicing the underlying NumPy array since signals
are always two-dimensional.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'ChannelIndex')
_single_parent_attrs = ('segment', 'channel_index')
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 2),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = BaseNeo._recommended_attrs
def __new__(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, **annotations):
'''
Constructs new :class:`AnalogSignal` from data.
This is called whenever a new class:`AnalogSignal` is created from
the constructor, but not when slicing.
__array_finalize__ is called on the new object.
'''
signal = cls._rescale(signal, units=units)
obj = pq.Quantity(signal, units=units, dtype=dtype, copy=copy).view(cls)
if obj.ndim == 1:
obj.shape = (-1, 1)
if t_start is None:
raise ValueError('t_start cannot be None')
obj._t_start = t_start
obj._sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.segment = None
obj.channel_index = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
sampling_rate=None, sampling_period=None, name=None, file_origin=None,
description=None, array_annotations=None, **annotations):
'''
Initializes a newly constructed :class:`AnalogSignal` instance.
'''
# This method is only called when constructing a new AnalogSignal,
# not when slicing or viewing. We use the same call signature
# as __new__ for documentation purposes. Anything not in the call
# signature is stored in annotations.
# Calls parent __init__, which grabs universally recommended
# attributes and sets up self.annotations
DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
array_annotations=array_annotations, **annotations)
def __reduce__(self):
'''
Map the __new__ function onto _new_AnalogSignalArray, so that pickle
works
'''
return _new_AnalogSignalArray, (self.__class__, np.array(self), self.units, self.dtype,
True, self.t_start, self.sampling_rate,
self.sampling_period, self.name, self.file_origin,
self.description, self.array_annotations,
self.annotations, self.channel_index, self.segment)
def _array_finalize_spec(self, obj):
'''
Set default values for attributes specific to :class:`AnalogSignal`.
Common attributes are defined in
:meth:`__array_finalize__` in :class:`basesignal.BaseSignal`),
which is called every time a new signal is created
and calls this method.
'''
self._t_start = getattr(obj, '_t_start', 0 * pq.s)
self._sampling_rate = getattr(obj, '_sampling_rate', None)
return obj
def __repr__(self):
'''
Returns a string representing the :class:`AnalogSignal`.
'''
return ('<%s(%s, [%s, %s], sampling rate: %s)>' % (self.__class__.__name__,
super().__repr__(),
self.t_start, self.t_stop,
self.sampling_rate))
def get_channel_index(self):
"""
"""
if self.channel_index:
return self.channel_index.index
else:
return None
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
if isinstance(i, (int, np.integer)): # a single point in time across all channels
obj = super().__getitem__(i)
obj = pq.Quantity(obj.magnitude, units=obj.units)
elif isinstance(i, tuple):
obj = super().__getitem__(i)
j, k = i
if isinstance(j, (int, np.integer)): # extract a quantity array
obj = pq.Quantity(obj.magnitude, units=obj.units)
else:
if isinstance(j, slice):
if j.start:
obj.t_start = (self.t_start + j.start * self.sampling_period)
if j.step:
obj.sampling_period *= j.step
elif isinstance(j, np.ndarray):
raise NotImplementedError(
"Arrays not yet supported") # in the general case, would need to return
# IrregularlySampledSignal(Array)
else:
raise TypeError("%s not supported" % type(j))
if isinstance(k, (int, np.integer)):
obj = obj.reshape(-1, 1)
if self.channel_index:
obj.channel_index = self.channel_index.__getitem__(k)
obj.array_annotate(**deepcopy(self.array_annotations_at_index(k)))
elif isinstance(i, slice):
obj = super().__getitem__(i)
if i.start:
obj.t_start = self.t_start + i.start * self.sampling_period
obj.array_annotations = deepcopy(self.array_annotations)
elif isinstance(i, np.ndarray):
# Indexing of an AnalogSignal is only consistent if the resulting number of
# samples is the same for each trace. The time axis for these samples is not
# guaranteed to be continuous, so returning a Quantity instead of an AnalogSignal here.
new_time_dims = np.sum(i, axis=0)
if len(new_time_dims) and all(new_time_dims == new_time_dims[0]):
obj = np.asarray(self).T.__getitem__(i.T)
obj = obj.T.reshape(self.shape[1], -1).T
obj = pq.Quantity(obj, units=self.units)
else:
raise IndexError("indexing of an AnalogSignals needs to keep the same number of "
"sample for each trace contained")
else:
raise IndexError("index should be an integer, tuple, slice or boolean numpy array")
return obj
def __setitem__(self, i, value):
"""
Set an item or slice defined by :attr:`i` to `value`.
"""
# because AnalogSignals are always at least two-dimensional,
# we need to handle the case where `i` is an integer
if isinstance(i, int):
i = slice(i, i + 1)
elif isinstance(i, tuple):
j, k = i
if isinstance(k, int):
i = (j, slice(k, k + 1))
return super().__setitem__(i, value)
# sampling_rate attribute is handled as a property so type checking can
# be done
@property
def sampling_rate(self):
'''
Number of samples per unit time.
(1/:attr:`sampling_period`)
'''
return self._sampling_rate
@sampling_rate.setter
def sampling_rate(self, rate):
'''
Setter for :attr:`sampling_rate`
'''
if rate is None:
raise ValueError('sampling_rate cannot be None')
elif not hasattr(rate, 'units'):
raise ValueError('sampling_rate must have units')
self._sampling_rate = rate
# sampling_period attribute is handled as a property on underlying rate
@property
def sampling_period(self):
'''
Interval between two samples.
(1/:attr:`sampling_rate`)
'''
return 1. / self.sampling_rate
@sampling_period.setter
def sampling_period(self, period):
'''
Setter for :attr:`sampling_period`
'''
if period is None:
raise ValueError('sampling_period cannot be None')
elif not hasattr(period, 'units'):
raise ValueError('sampling_period must have units')
self.sampling_rate = 1. / period
# t_start attribute is handled as a property so type checking can be done
@property
def t_start(self):
'''
Time when signal begins.
'''
return self._t_start
@t_start.setter
def t_start(self, start):
'''
Setter for :attr:`t_start`
'''
if start is None:
raise ValueError('t_start cannot be None')
self._t_start = start
@property
def duration(self):
'''
Signal duration
(:attr:`size` * :attr:`sampling_period`)
'''
return self.shape[0] / self.sampling_rate
@property
def t_stop(self):
'''
Time when signal ends.
(:attr:`t_start` + :attr:`duration`)
'''
return self.t_start + self.duration
@property
def times(self):
'''
The time points of each sample of the signal
(:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`)
'''
return self.t_start + np.arange(self.shape[0]) / self.sampling_rate
def __eq__(self, other):
'''
Equality test (==)
'''
if (isinstance(other, AnalogSignal) and (
self.t_start != other.t_start or self.sampling_rate != other.sampling_rate)):
return False
return super().__eq__(other)
def _check_consistency(self, other):
'''
Check if the attributes of another :class:`AnalogSignal`
are compatible with this one.
'''
if isinstance(other, AnalogSignal):
for attr in "t_start", "sampling_rate":
if getattr(self, attr) != getattr(other, attr):
raise ValueError(
"Inconsistent values of %s" % attr) # how to handle name and annotations?
def _repr_pretty_(self, pp, cycle):
'''
Handle pretty-printing the :class:`AnalogSignal`.
'''
pp.text("{cls} with {channels} channels of length {length}; "
"units {units}; datatype {dtype} ".format(cls=self.__class__.__name__,
channels=self.shape[1],
length=self.shape[0],
units=self.units.dimensionality.string,
dtype=self.dtype))
if self._has_repr_pretty_attrs_():
pp.breakable()
self._repr_pretty_attrs_(pp, cycle)
def _pp(line):
pp.breakable()
with pp.group(indent=1):
pp.text(line)
_pp("sampling rate: {}".format(self.sampling_rate))
_pp("time: {} to {}".format(self.t_start, self.t_stop))
def time_index(self, t):
"""Return the array index (or indices) corresponding to the time (or times) `t`"""
i = (t - self.t_start) * self.sampling_rate
i = np.rint(i.simplified.magnitude).astype(np.int)
return i
def time_slice(self, t_start, t_stop):
'''
Creates a new AnalogSignal corresponding to the time slice of the
original AnalogSignal between times t_start, t_stop. Note, that for
numerical stability reasons if t_start does not fall exactly on
the time bins defined by the sampling_period it will be rounded to
the nearest sampling bin. The time bin for t_stop will be chosen to
make the duration of the resultant signal as close as possible to
t_stop - t_start. This means that for a given duration, the size
of the slice will always be the same.
'''
# checking start time and transforming to start index
if t_start is None:
i = 0
t_start = 0 * pq.s
else:
i = self.time_index(t_start)
# checking stop time and transforming to stop index
if t_stop is None:
j = len(self)
else:
delta = (t_stop - t_start) * self.sampling_rate
j = i + int(np.rint(delta.simplified.magnitude))
if (i < 0) or (j > len(self)):
raise ValueError('t_start, t_stop have to be within the analog \
signal duration')
# Time slicing should create a deep copy of the object
obj = deepcopy(self[i:j])
obj.t_start = self.t_start + i * self.sampling_period
return obj
def time_shift(self, t_shift):
"""
Shifts a :class:`AnalogSignal` to start at a new time.
Parameters:
-----------
t_shift: Quantity (time)
Amount of time by which to shift the :class:`AnalogSignal`.
Returns:
--------
new_sig: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object starting at t_shift later than the
original :class:`AnalogSignal` (the original :class:`AnalogSignal` is not modified).
"""
new_sig = deepcopy(self)
new_sig.t_start = new_sig.t_start + t_shift
return new_sig
def splice(self, signal, copy=False):
"""
Replace part of the current signal by a new piece of signal.
The new piece of signal will overwrite part of the current signal
starting at the time given by the new piece's `t_start` attribute.
The signal to be spliced in must have the same physical dimensions,
sampling rate, and number of channels as the current signal and
fit within it.
If `copy` is False (the default), modify the current signal in place.
If `copy` is True, return a new signal and leave the current one untouched.
In this case, the new signal will not be linked to any parent objects.
"""
if signal.t_start < self.t_start:
raise ValueError("Cannot splice earlier than the start of the signal")
if signal.t_stop > self.t_stop:
raise ValueError("Splice extends beyond signal")
if signal.sampling_rate != self.sampling_rate:
raise ValueError("Sampling rates do not match")
i = self.time_index(signal.t_start)
j = i + signal.shape[0]
if copy:
new_signal = deepcopy(self)
new_signal.segment = None
new_signal.channel_index = None
new_signal[i:j, :] = signal
return new_signal
else:
self[i:j, :] = signal
return self
def downsample(self, downsampling_factor, **kwargs):
"""
Downsample the data of a signal.
This method reduces the number of samples of the AnalogSignal to a fraction of the
original number of samples, defined by `downsampling_factor`.
This method is a wrapper of scipy.signal.decimate and accepts the same set of keyword
arguments, except for specifying the axis of resampling, which is fixed to the first axis
here.
Parameters:
-----------
downsampling_factor: integer
Factor used for decimation of samples. Scipy recommends to call decimate multiple times
for downsampling factors higher than 13 when using IIR downsampling (default).
Returns:
--------
downsampled_signal: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object containing the resampled data points.
The original :class:`AnalogSignal` is not modified.
Note:
-----
For resampling the signal with a fixed number of samples, see `resample` method.
"""
if not HAVE_SCIPY:
raise ImportError('Decimating requires availability of scipy.signal')
# Resampling is only permitted along the time axis (axis=0)
if 'axis' in kwargs:
kwargs.pop('axis')
downsampled_data = scipy.signal.decimate(self.magnitude, downsampling_factor, axis=0,
**kwargs)
downsampled_signal = self.duplicate_with_new_data(downsampled_data)
# since the number of channels stays the same, we can also copy array annotations here
downsampled_signal.array_annotations = self.array_annotations.copy()
downsampled_signal.sampling_rate = self.sampling_rate / downsampling_factor
return downsampled_signal
def resample(self, sample_count, **kwargs):
"""
Resample the data points of the signal.
This method interpolates the signal and returns a new signal with a fixed number of
samples defined by `sample_count`.
This method is a wrapper of scipy.signal.resample and accepts the same set of keyword
arguments, except for specifying the axis of resampling which is fixed to the first axis
here, and the sample positions. .
Parameters:
-----------
sample_count: integer
Number of desired samples. The resulting signal starts at the same sample as the
original and is sampled regularly.
Returns:
--------
resampled_signal: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object containing the resampled data points.
The original :class:`AnalogSignal` is not modified.
Note:
-----
For reducing the number of samples to a fraction of the original, see `downsample` method
"""
if not HAVE_SCIPY:
raise ImportError('Resampling requires availability of scipy.signal')
# Resampling is only permitted along the time axis (axis=0)
if 'axis' in kwargs:
kwargs.pop('axis')
if 't' in kwargs:
kwargs.pop('t')
resampled_data, resampled_times = scipy.signal.resample(self.magnitude, sample_count,
t=self.times, axis=0, **kwargs)
resampled_signal = self.duplicate_with_new_data(resampled_data)
resampled_signal.sampling_rate = (sample_count / self.shape[0]) * self.sampling_rate
# since the number of channels stays the same, we can also copy array annotations here
resampled_signal.array_annotations = self.array_annotations.copy()
return resampled_signal
def rectify(self, **kwargs):
"""
Rectify the signal.
This method rectifies the signal by taking the absolute value.
This method is a wrapper of numpy.absolute() and accepts the same set of keyword
arguments.
Returns:
--------
resampled_signal: :class:`AnalogSignal`
New instance of a :class:`AnalogSignal` object containing the rectified data points.
The original :class:`AnalogSignal` is not modified.
"""
# Use numpy to get the absolute value of the signal
rectified_data = np.absolute(self.magnitude, **kwargs)
rectified_signal = self.duplicate_with_new_data(rectified_data)
# the sampling rate stays constant
rectified_signal.sampling_rate = self.sampling_rate
# since the number of channels stays the same, we can also copy array annotations here
rectified_signal.array_annotations = self.array_annotations.copy()
return rectified_signal
|
the-stack_106_17984
|
from typing import Callable, Tuple, Union
import numpy as np
from emukit.core.acquisition import Acquisition
from emukit.core.interfaces import IModel, IPriorHyperparameters
class IntegratedHyperParameterAcquisition(Acquisition):
"""
This acquisition class provides functionality for integrating any acquisition function over model hyper-parameters
"""
def __init__(
self,
model: Union[IModel, IPriorHyperparameters],
acquisition_generator: Callable,
n_samples: int = 10,
n_burnin: int = 100,
subsample_interval: int = 10,
step_size: float = 1e-1,
leapfrog_steps: int = 20,
):
"""
:param model: An emukit model that implements IPriorHyperparameters
:param acquisition_generator: Function that returns acquisition object when given the model as the only argument
:param n_samples: Number of hyper-parameter samples
:param n_burnin: Number of initial samples not used.
:param subsample_interval: Interval of subsampling from HMC samples.
:param step_size: Size of the gradient steps in the HMC sampler.
:param leapfrog_steps: Number of gradient steps before each Metropolis Hasting step.
"""
self.model = model
self.acquisition_generator = acquisition_generator
self.n_samples = n_samples
self.n_burnin = n_burnin
self.subsample_interval = subsample_interval
self.step_size = step_size
self.leapfrog_steps = leapfrog_steps
self.update_parameters()
acquisition = self.acquisition_generator(model)
self._has_gradients = acquisition.has_gradients
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate acquisition by integrating over the hyper-parameters of the model
:param x: locations where the evaluation is done.
:return: Array with integrated acquisition value at all input locations
"""
acquisition_value = 0
for sample in self.samples:
self.model.fix_model_hyperparameters(sample)
acquisition = self.acquisition_generator(self.model)
acquisition_value += acquisition.evaluate(x)
return acquisition_value / self.n_samples
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple:
"""
Computes the acquisition value and its derivative integrating over the hyper-parameters of the model
:param x: locations where the evaluation with gradients is done.
:return: tuple containing the integrated expected improvement at the points x and its gradient.
"""
if x.ndim == 1:
x = x[None, :]
acquisition_value = 0
d_acquisition_dx = 0
for sample in self.samples:
self.model.fix_model_hyperparameters(sample)
acquisition = self.acquisition_generator(self.model)
improvement_sample, d_improvement_dx_sample = acquisition.evaluate_with_gradients(x)
acquisition_value += improvement_sample
d_acquisition_dx += d_improvement_dx_sample
return acquisition_value / self.n_samples, d_acquisition_dx / self.n_samples
def update_parameters(self):
self.samples = self.model.generate_hyperparameters_samples(
self.n_samples, self.n_burnin, self.subsample_interval, self.step_size, self.leapfrog_steps
)
@property
def has_gradients(self) -> bool:
"""Returns that this acquisition has gradients"""
return self._has_gradients
def update_batches(self, x_batch, lipschitz_constant, f_min):
acquisition = self.acquisition_generator(self.model)
acquisition.update_batches(x_batch, lipschitz_constant, f_min)
|
the-stack_106_17986
|
import pytest
import logging
from os import path
from logging.config import dictConfig
CURRENT_DIR = path.dirname(__file__)
def full_path(file: str) -> str:
return path.join(CURRENT_DIR, file)
@pytest.fixture(scope='session', autouse=True)
def set_base_test_logger():
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'default',
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'console': {
'handlers': ['console'],
'propagate': False}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
dictConfig(LOGGING)
|
the-stack_106_17988
|
import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["odziez.herokuapp.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
default_acl = "public-read"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Odziez <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Odziez]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware") # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=None, # Send no events from log messages
)
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[sentry_logging, DjangoIntegration()])
# Your stuff...
# ------------------------------------------------------------------------------
DEFAULT_TO_EMAIL = env(
"DJANGO_DEFAULT_TO_EMAIL", default="Odziez <[email protected]>"
)
# FAKE_MANUFACTURER_EMAIL = env(
# "FAKE_MANUFACTURER_EMAL",
#)
#FAKE_SUPERVISOR_EMAIL = env(
# "FAKE_SUPERVISOR_EMAIL",
#)
|
the-stack_106_17994
|
##
# @filename : main.cpp
# @brief : 2.9inch e-paper display (B) demo
# @author : Yehui from Waveshare
#
# Copyright (C) Waveshare July 24 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import epd2in9b
import Image
import ImageFont
import ImageDraw
#import imagedata
COLORED = 1
UNCOLORED = 0
def main():
epd = epd2in9b.EPD()
epd.init()
# clear the frame buffer
frame_black = [0xFF] * (epd.width * epd.height / 8)
frame_red = [0xFF] * (epd.width * epd.height / 8)
# For simplicity, the arguments are explicit numerical coordinates
epd.draw_rectangle(frame_black, 10, 80, 50, 140, COLORED);
epd.draw_line(frame_black, 10, 80, 50, 140, COLORED);
epd.draw_line(frame_black, 50, 80, 10, 140, COLORED);
epd.draw_circle(frame_black, 90, 110, 30, COLORED);
epd.draw_filled_rectangle(frame_red, 10, 180, 50, 240, COLORED);
epd.draw_filled_rectangle(frame_red, 0, 6, 128, 26, COLORED);
epd.draw_filled_circle(frame_red, 90, 210, 30, COLORED);
# write strings to the buffer
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf', 16)
epd.draw_string_at(frame_black, 4, 30, "e-Paper Demo", font, COLORED)
epd.draw_string_at(frame_red, 6, 10, "Hello world!", font, UNCOLORED)
# display the frames
epd.display_frame(frame_black, frame_red)
# display images
frame_black = epd.get_frame_buffer(Image.open('black.bmp'))
frame_red = epd.get_frame_buffer(Image.open('red.bmp'))
epd.display_frame(frame_black, frame_red)
# You can get frame buffer from an image or import the buffer directly:
#epd.display_frame(imagedata.IMAGE_BLACK, imagedata.IMAGE_RED)
if __name__ == '__main__':
main()
|
the-stack_106_17996
|
# Standard library imports
import sys
# Third party imports
import serial
# Adafruit package imports
from adafruit_fingerprint import AdafruitFingerprint
from adafruit_fingerprint.responses import *
def main():
# Attempt to connect to serial port
try:
port = '/dev/ttyUSB0' # USB TTL converter port
baud_rate = '57600'
serial_port = serial.Serial(port, baud_rate)
except Exception as e:
print(e)
sys.exit()
# Initialize sensor library with serial port connection
finger = AdafruitFingerprint(port=serial_port)
response = finger.vfy_pwd()
if response is not FINGERPRINT_PASSWORD_OK:
print('Did not find fingerprint sensor :(')
sys.exit()
print('Found Fingerprint Sensor!\n')
while True:
print('\nPlease type in the ID # (from 1 to 255) you want to delete...\n')
id = read_number()
print(f'Deleting ID #{id}\n')
if delete(finger=finger, page_id=id, num=1):
print(f'Fingerprint at ID #{id} has been successfully deleted.')
def read_number():
num = 0
while num < 1 or num > 255:
try:
num = int(input())
except ValueError:
print('Please provide an integer')
else:
if num < 1 or num > 255:
print('Please provide an integer in the above range')
return num
def delete(finger, page_id, num):
response = -1
response = finger.delete_char(page_id=page_id, num=num)
if response is FINGERPRINT_OK:
print('Deleted')
sys.stdout.flush()
return page_id
elif response is FINGERPRINT_PACKETRECEIVER:
print('Communication error')
elif response is FINGERPRINT_TEMPLATEDELETEFAIL:
print('Could not delete')
elif response is FINGERPRINT_BADLOCATION:
print('Could not delete in that location')
elif response is FINGERPRINT_FLASHER:
print('Error writing to flash')
else:
print('Unknown Error')
return False
__all__ = ['delete']
if __name__ == '__main__':
main()
|
the-stack_106_17997
|
"""Test runner runs a TFJob test."""
import argparse
import datetime
import httplib
import logging
import json
import os
import time
import uuid
from kubernetes import client as k8s_client
from kubernetes.client import rest
from google.cloud import storage # pylint: disable=no-name-in-module
from py import test_util
from py import util
from py import tf_job_client
def wait_for_delete(client,
namespace,
name,
timeout=datetime.timedelta(minutes=5),
polling_interval=datetime.timedelta(seconds=30),
status_callback=None):
"""Wait for the specified job to be deleted.
Args:
client: K8s api client.
namespace: namespace for the job.
name: Name of the job.
timeout: How long to wait for the job.
polling_interval: How often to poll for the status of the job.
status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the job. Callable takes a single argument which
is the job.
"""
crd_api = k8s_client.CustomObjectsApi(client)
end_time = datetime.datetime.now() + timeout
while True:
try:
results = crd_api.get_namespaced_custom_object(
tf_job_client.TF_JOB_GROUP, tf_job_client.TF_JOB_VERSION, namespace,
tf_job_client.TF_JOB_PLURAL, name)
except rest.ApiException as e:
if e.status == httplib.NOT_FOUND:
return
raise
if status_callback:
status_callback(results)
if datetime.datetime.now() + polling_interval > end_time:
raise util.TimeoutError(
"Timeout waiting for job {0} in namespace {1} to be deleted.".format(
name, namespace))
time.sleep(polling_interval.seconds)
def get_labels(name, runtime_id, replica_type=None, replica_index=None):
"""Return labels.
"""
labels = {
"kubeflow.org": "",
"tf_job_name": name,
"runtime_id": runtime_id,
}
if replica_type:
labels["job_type"] = replica_type
if replica_index:
labels["task_index"] = replica_index
return labels
def to_selector(labels):
parts = []
for k, v in labels.iteritems():
parts.append("{0}={1}".format(k, v))
return ",".join(parts)
def list_pods(client, namespace, label_selector):
core = k8s_client.CoreV1Api(client)
try:
pods = core.list_namespaced_pod(namespace, label_selector=label_selector)
return pods
except rest.ApiException as e:
message = ""
if e.message:
message = e.message
if e.body:
try:
body = json.loads(e.body)
except ValueError:
# There was a problem parsing the body of the response as json.
logging.error(
("Exception when calling DefaultApi->"
"apis_fqdn_v1_namespaces_namespace_resource_post. body: %s"), e.body)
raise
message = body.get("message")
logging.error(("Exception when calling DefaultApi->"
"apis_fqdn_v1_namespaces_namespace_resource_post: %s"),
message)
raise e
def run_test(args): # pylint: disable=too-many-branches,too-many-statements
"""Run a test."""
gcs_client = storage.Client(project=args.project)
project = args.project
cluster_name = args.cluster
zone = args.zone
# TODO(jlewi): When using GKE we should copy the .kube config and any other
# files to the test directory. We should then set the environment variable
# KUBECONFIG to point at that file. This should prevent us from having
# to rerun util.configure_kubectl on each step. Instead we could run it once
# as part of GKE cluster creation and store the config in the NFS directory.
# This would make the handling of credentials
# and KUBECONFIG more consistent between GKE and minikube and eventually
# this could be extended to other K8s deployments.
if cluster_name:
util.configure_kubectl(project, zone, cluster_name)
util.load_kube_config()
api_client = k8s_client.ApiClient()
salt = uuid.uuid4().hex[0:4]
# Create a new environment for this run
env = "test-env-{0}".format(salt)
util.run(["ks", "env", "add", env], cwd=args.app_dir)
name = None
namespace = None
for pair in args.params.split(","):
k, v = pair.split("=", 1)
if k == "name":
name = v
if k == "namespace":
namespace = v
util.run(
["ks", "param", "set", "--env=" + env, args.component, k, v],
cwd=args.app_dir)
if not name:
raise ValueError("name must be provided as a parameter.")
t = test_util.TestCase()
t.class_name = "tfjob_test"
t.name = os.path.basename(name)
if not namespace:
raise ValueError("namespace must be provided as a parameter.")
start = time.time()
try:
# We repeat the test multiple times.
# This ensures that if we delete the job we can create a new job with the
# same name.
# TODO(jlewi): We should make this an argument.
num_trials = 2
for trial in range(num_trials):
logging.info("Trial %s", trial)
util.run(["ks", "apply", env, "-c", args.component], cwd=args.app_dir)
logging.info("Created job %s in namespaces %s", name, namespace)
results = tf_job_client.wait_for_job(
api_client, namespace, name, status_callback=tf_job_client.log_status)
if results.get("status", {}).get("state", {}).lower() != "succeeded":
t.failure = "Trial {0} Job {1} in namespace {2} in state {3}".format(
trial, name, namespace,
results.get("status", {}).get("state", None))
logging.error(t.failure)
break
runtime_id = results.get("spec", {}).get("RuntimeId")
logging.info("Trial %s Job %s in namespace %s runtime ID %s", trial, name,
namespace, runtime_id)
# TODO(jlewi): We should check that pods were created for each replica
pod_labels = get_labels(name, runtime_id)
pod_selector = to_selector(pod_labels)
pods = list_pods(api_client, namespace, pod_selector)
logging.info("Trial %s selector: %s matched %s pods", trial, pod_selector,
len(pods.items))
if not pods.items:
t.failure = ("Trial {0} Job {1} in namespace {2} no pods found for "
" selector {3}").format(trial, name, namespace,
pod_selector)
logging.error(t.failure)
break
tf_job_client.delete_tf_job(api_client, namespace, name)
logging.info("Waiting for job %s in namespaces %s to be deleted.", name, namespace)
wait_for_delete(
api_client, namespace, name, status_callback=tf_job_client.log_status)
# Verify the pods have been deleted. tf_job_client uses foreground
# deletion so there shouldn't be any resources for the job left
# once the job is gone.
pods = list_pods(api_client, namespace, pod_selector)
logging.info("Trial %s selector: %s matched %s pods", trial, pod_selector,
len(pods.items))
if pods.items:
t.failure = ("Trial {0} Job {1} in namespace {2} pods found for "
" selector {3}; pods\n{4}").format(trial, name, namespace,
pod_selector, pods)
logging.error(t.failure)
break
logging.info("Trial %s all pods deleted.", trial)
# TODO(jlewi):
# Here are some validation checks to run:
# 1. Check that all resources are garbage collected.
# TODO(jlewi): Add an option to add chaos and randomly kill various resources?
# TODO(jlewi): Are there other generic validation checks we should
# run.
except util.TimeoutError:
t.failure = "Timeout waiting for {0} in namespace {1} to finish.".format(
name, namespace)
logging.error(t.failure)
except Exception as e: # pylint: disable-msg=broad-except
# TODO(jlewi): I'm observing flakes where the exception has message "status"
# in an effort to try to nail down this exception we print out more
# information about the exception.
logging.error("There was a problem running the job; Exception %s", e)
logging.error("There was a problem running the job; Exception "
"message: %s", e.message)
logging.error("Exception type: %s", e.__class__)
logging.error("Exception args: %s", e.args)
# We want to catch all exceptions because we want the test as failed.
t.failure = ("Exception occured; type {0} message {1}".format(
e.__class__, e.message))
finally:
t.time = time.time() - start
if args.junit_path:
test_util.create_junit_xml_file([t], args.junit_path, gcs_client)
def add_common_args(parser):
"""Add a set of common parser arguments."""
parser.add_argument(
"--project", default=None, type=str, help=("The project to use."))
parser.add_argument(
"--cluster", default=None, type=str, help=("The name of the cluster."))
parser.add_argument(
"--app_dir",
default=None,
type=str,
help="Directory containing the ksonnet app.")
parser.add_argument(
"--component",
default=None,
type=str,
help="The ksonnet component of the job to run.")
parser.add_argument(
"--params",
default=None,
type=str,
help="Comma separated list of key value pairs to set on the component.")
parser.add_argument(
"--zone",
default="us-east1-d",
type=str,
help=("The zone for the cluster."))
parser.add_argument(
"--junit_path",
default="",
type=str,
help="Where to write the junit xml file with the results.")
def build_parser():
# create the top-level parser
parser = argparse.ArgumentParser(description="Run a TFJob test.")
subparsers = parser.add_subparsers()
parser_test = subparsers.add_parser("test", help="Run a tfjob test.")
add_common_args(parser_test)
parser_test.set_defaults(func=run_test)
return parser
def main(): # pylint: disable=too-many-locals
logging.getLogger().setLevel(logging.INFO) # pylint: disable=too-many-locals
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
util.maybe_activate_service_account()
parser = build_parser()
# parse the args and call whatever function was selected
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
the-stack_106_17998
|
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmdet.core import bbox2roi, matrix_nms, multi_apply
from ..builder import HEADS, build_loss
INF = 1e8
from scipy import ndimage
def points_nms(heat, kernel=2):
# kernel must be 2
hmax = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=1)
keep = (hmax[:, :, :-1, :-1] == heat).float()
return heat * keep
def dice_loss(input, target):
input = input.contiguous().view(input.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
return 1 - d
@HEADS.register_module
class SOLOHead(nn.Module):
def __init__(
self,
num_classes,
in_channels,
seg_feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
base_edge_list=(16, 32, 64, 128, 256),
scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)),
sigma=0.4,
num_grids=None,
cate_down_pos=0,
with_deform=False,
loss_ins=None,
loss_cate=None,
conv_cfg=None,
norm_cfg=None,
):
super(SOLOHead, self).__init__()
self.num_classes = num_classes
self.seg_num_grids = num_grids
self.cate_out_channels = self.num_classes - 1
self.in_channels = in_channels
self.seg_feat_channels = seg_feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.sigma = sigma
self.cate_down_pos = cate_down_pos
self.base_edge_list = base_edge_list
self.scale_ranges = scale_ranges
self.with_deform = with_deform
self.loss_cate = build_loss(loss_cate)
self.ins_loss_weight = loss_ins["loss_weight"]
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self._init_layers()
def _init_layers(self):
norm_cfg = dict(type="GN", num_groups=32, requires_grad=True)
self.ins_convs = nn.ModuleList()
self.cate_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels + 2 if i == 0 else self.seg_feat_channels
self.ins_convs.append(
ConvModule(
chn,
self.seg_feat_channels,
3,
stride=1,
padding=1,
norm_cfg=norm_cfg,
bias=norm_cfg is None,
)
)
chn = self.in_channels if i == 0 else self.seg_feat_channels
self.cate_convs.append(
ConvModule(
chn,
self.seg_feat_channels,
3,
stride=1,
padding=1,
norm_cfg=norm_cfg,
bias=norm_cfg is None,
)
)
self.solo_ins_list = nn.ModuleList()
for seg_num_grid in self.seg_num_grids:
self.solo_ins_list.append(
nn.Conv2d(self.seg_feat_channels, seg_num_grid ** 2, 1)
)
self.solo_cate = nn.Conv2d(
self.seg_feat_channels, self.cate_out_channels, 3, padding=1
)
def init_weights(self):
for m in self.ins_convs:
normal_init(m.conv, std=0.01)
for m in self.cate_convs:
normal_init(m.conv, std=0.01)
bias_ins = bias_init_with_prob(0.01)
for m in self.solo_ins_list:
normal_init(m, std=0.01, bias=bias_ins)
bias_cate = bias_init_with_prob(0.01)
normal_init(self.solo_cate, std=0.01, bias=bias_cate)
def forward(self, feats, eval=False):
new_feats = self.split_feats(feats)
featmap_sizes = [featmap.size()[-2:] for featmap in new_feats]
upsampled_size = (featmap_sizes[0][0] * 2, featmap_sizes[0][1] * 2)
ins_pred, cate_pred = multi_apply(
self.forward_single,
new_feats,
list(range(len(self.seg_num_grids))),
eval=eval,
upsampled_size=upsampled_size,
)
return ins_pred, cate_pred
def split_feats(self, feats):
return (
F.interpolate(feats[0], scale_factor=0.5, mode="bilinear"),
feats[1],
feats[2],
feats[3],
F.interpolate(feats[4], size=feats[3].shape[-2:], mode="bilinear"),
)
def forward_single(self, x, idx, eval=False, upsampled_size=None):
ins_feat = x
cate_feat = x
# ins branch
# concat coord
x_range = torch.linspace(-1, 1, ins_feat.shape[-1], device=ins_feat.device)
y_range = torch.linspace(-1, 1, ins_feat.shape[-2], device=ins_feat.device)
y, x = torch.meshgrid(y_range, x_range)
y = y.expand([ins_feat.shape[0], 1, -1, -1])
x = x.expand([ins_feat.shape[0], 1, -1, -1])
coord_feat = torch.cat([x, y], 1)
ins_feat = torch.cat([ins_feat, coord_feat], 1)
for i, ins_layer in enumerate(self.ins_convs):
ins_feat = ins_layer(ins_feat)
ins_feat = F.interpolate(ins_feat, scale_factor=2, mode="bilinear")
ins_pred = self.solo_ins_list[idx](ins_feat)
# cate branch
# conv step by step
for i, cate_layer in enumerate(self.cate_convs): # 7 times
if i == self.cate_down_pos:
seg_num_grid = self.seg_num_grids[idx]
cate_feat = F.interpolate(
cate_feat, size=seg_num_grid, mode="bilinear"
) # align
cate_feat = cate_layer(cate_feat)
cate_pred = self.solo_cate(cate_feat)
if eval:
ins_pred = F.interpolate(
ins_pred.sigmoid(), size=upsampled_size, mode="bilinear"
)
cate_pred = points_nms(cate_pred.sigmoid(), kernel=2).permute(0, 2, 3, 1)
return ins_pred, cate_pred
def loss(
self,
ins_preds,
cate_preds,
gt_bbox_list,
gt_label_list,
gt_mask_list,
img_metas,
cfg,
gt_bboxes_ignore=None,
):
featmap_sizes = [featmap.size()[-2:] for featmap in ins_preds]
ins_label_list, cate_label_list, ins_ind_label_list = multi_apply(
self.solo_target_single,
gt_bbox_list,
gt_label_list,
gt_mask_list,
featmap_sizes=featmap_sizes,
)
# ins
ins_labels = [
torch.cat(
[
ins_labels_level_img[ins_ind_labels_level_img, ...]
for ins_labels_level_img, ins_ind_labels_level_img in zip(
ins_labels_level, ins_ind_labels_level
)
],
0,
)
for ins_labels_level, ins_ind_labels_level in zip(
zip(*ins_label_list), zip(*ins_ind_label_list)
)
]
ins_preds = [
torch.cat(
[
ins_preds_level_img[ins_ind_labels_level_img, ...]
for ins_preds_level_img, ins_ind_labels_level_img in zip(
ins_preds_level, ins_ind_labels_level
)
],
0,
)
for ins_preds_level, ins_ind_labels_level in zip(
ins_preds, zip(*ins_ind_label_list)
)
]
ins_ind_labels = [
torch.cat(
[
ins_ind_labels_level_img.flatten()
for ins_ind_labels_level_img in ins_ind_labels_level
]
)
for ins_ind_labels_level in zip(*ins_ind_label_list)
]
flatten_ins_ind_labels = torch.cat(ins_ind_labels)
num_ins = flatten_ins_ind_labels.sum()
# dice loss
loss_ins = []
for input, target in zip(ins_preds, ins_labels):
if input.size()[0] == 0:
continue
input = torch.sigmoid(input)
loss_ins.append(dice_loss(input, target))
loss_ins = torch.cat(loss_ins).mean()
loss_ins = loss_ins * self.ins_loss_weight
# cate
cate_labels = [
torch.cat(
[
cate_labels_level_img.flatten()
for cate_labels_level_img in cate_labels_level
]
)
for cate_labels_level in zip(*cate_label_list)
]
flatten_cate_labels = torch.cat(cate_labels)
cate_preds = [
cate_pred.permute(0, 2, 3, 1).reshape(-1, self.cate_out_channels)
for cate_pred in cate_preds
]
flatten_cate_preds = torch.cat(cate_preds)
loss_cate = self.loss_cate(
flatten_cate_preds, flatten_cate_labels, avg_factor=num_ins + 1
)
return dict(loss_ins=loss_ins, loss_cate=loss_cate)
def solo_target_single(
self, gt_bboxes_raw, gt_labels_raw, gt_masks_raw, featmap_sizes=None
):
device = gt_labels_raw[0].device
# ins
gt_areas = torch.sqrt(
(gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0])
* (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])
)
ins_label_list = []
cate_label_list = []
ins_ind_label_list = []
for (lower_bound, upper_bound), stride, featmap_size, num_grid in zip(
self.scale_ranges, self.strides, featmap_sizes, self.seg_num_grids
):
ins_label = torch.zeros(
[num_grid ** 2, featmap_size[0], featmap_size[1]],
dtype=torch.uint8,
device=device,
)
cate_label = torch.zeros(
[num_grid, num_grid], dtype=torch.int64, device=device
)
ins_ind_label = torch.zeros(
[num_grid ** 2], dtype=torch.bool, device=device
)
hit_indices = (
((gt_areas >= lower_bound) & (gt_areas <= upper_bound))
.nonzero(as_tuple=False)
.flatten()
)
if len(hit_indices) == 0:
ins_label_list.append(ins_label)
cate_label_list.append(cate_label)
ins_ind_label_list.append(ins_ind_label)
continue
gt_bboxes = gt_bboxes_raw[hit_indices]
gt_labels = gt_labels_raw[hit_indices]
gt_masks = gt_masks_raw[hit_indices.cpu().numpy(), ...]
half_ws = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * self.sigma
half_hs = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) * self.sigma
output_stride = stride / 2
for seg_mask, gt_label, half_h, half_w in zip(
gt_masks, gt_labels, half_hs, half_ws
):
if seg_mask.sum() < 10:
continue
# mass center
upsampled_size = (featmap_sizes[0][0] * 4, featmap_sizes[0][1] * 4)
center_h, center_w = ndimage.measurements.center_of_mass(seg_mask)
coord_w = int((center_w / upsampled_size[1]) // (1.0 / num_grid))
coord_h = int((center_h / upsampled_size[0]) // (1.0 / num_grid))
# left, top, right, down
top_box = max(
0,
int(((center_h - half_h) / upsampled_size[0]) // (1.0 / num_grid)),
)
down_box = min(
num_grid - 1,
int(((center_h + half_h) / upsampled_size[0]) // (1.0 / num_grid)),
)
left_box = max(
0,
int(((center_w - half_w) / upsampled_size[1]) // (1.0 / num_grid)),
)
right_box = min(
num_grid - 1,
int(((center_w + half_w) / upsampled_size[1]) // (1.0 / num_grid)),
)
top = max(top_box, coord_h - 1)
down = min(down_box, coord_h + 1)
left = max(coord_w - 1, left_box)
right = min(right_box, coord_w + 1)
cate_label[top : (down + 1), left : (right + 1)] = gt_label
# ins
seg_mask = mmcv.imrescale(seg_mask, scale=1.0 / output_stride)
seg_mask = torch.Tensor(seg_mask)
for i in range(top, down + 1):
for j in range(left, right + 1):
label = int(i * num_grid + j)
ins_label[
label, : seg_mask.shape[0], : seg_mask.shape[1]
] = seg_mask
ins_ind_label[label] = True
ins_label_list.append(ins_label)
cate_label_list.append(cate_label)
ins_ind_label_list.append(ins_ind_label)
return ins_label_list, cate_label_list, ins_ind_label_list
def get_seg(self, seg_preds, cate_preds, img_metas, cfg, rescale=None):
assert len(seg_preds) == len(cate_preds)
num_levels = len(cate_preds)
featmap_size = seg_preds[0].size()[-2:]
result_list = []
for img_id in range(len(img_metas)):
cate_pred_list = [
cate_preds[i][img_id].view(-1, self.cate_out_channels).detach()
for i in range(num_levels)
]
seg_pred_list = [seg_preds[i][img_id].detach() for i in range(num_levels)]
img_shape = img_metas[img_id]["img_shape"]
scale_factor = img_metas[img_id]["scale_factor"]
ori_shape = img_metas[img_id]["ori_shape"]
cate_pred_list = torch.cat(cate_pred_list, dim=0)
seg_pred_list = torch.cat(seg_pred_list, dim=0)
result = self.get_seg_single(
cate_pred_list,
seg_pred_list,
featmap_size,
img_shape,
ori_shape,
scale_factor,
cfg,
rescale,
)
result_list.append(result)
return result_list
def get_seg_single(
self,
cate_preds,
seg_preds,
featmap_size,
img_shape,
ori_shape,
scale_factor,
cfg,
rescale=False,
debug=False,
):
assert len(cate_preds) == len(seg_preds)
# overall info.
h, w, _ = img_shape
upsampled_size_out = (featmap_size[0] * 4, featmap_size[1] * 4)
# process.
inds = cate_preds > cfg.score_thr
# category scores.
cate_scores = cate_preds[inds]
if len(cate_scores) == 0:
return None
# category labels.
inds = inds.nonzero(as_tuple=False)
cate_labels = inds[:, 1]
# strides.
size_trans = cate_labels.new_tensor(self.seg_num_grids).pow(2).cumsum(0)
strides = cate_scores.new_ones(size_trans[-1])
n_stage = len(self.seg_num_grids)
strides[: size_trans[0]] *= self.strides[0]
for ind_ in range(1, n_stage):
strides[size_trans[ind_ - 1] : size_trans[ind_]] *= self.strides[ind_]
strides = strides[inds[:, 0]]
# masks.
seg_preds = seg_preds[inds[:, 0]]
seg_masks = seg_preds > cfg.mask_thr
sum_masks = seg_masks.sum((1, 2)).float()
# filter.
keep = sum_masks > strides
if keep.sum() == 0:
return None
seg_masks = seg_masks[keep, ...]
seg_preds = seg_preds[keep, ...]
sum_masks = sum_masks[keep]
cate_scores = cate_scores[keep]
cate_labels = cate_labels[keep]
# mask scoring.
seg_scores = (seg_preds * seg_masks.float()).sum((1, 2)) / sum_masks
cate_scores *= seg_scores
# sort and keep top nms_pre
sort_inds = torch.argsort(cate_scores, descending=True)
if len(sort_inds) > cfg.nms_pre:
sort_inds = sort_inds[: cfg.nms_pre]
seg_masks = seg_masks[sort_inds, :, :]
seg_preds = seg_preds[sort_inds, :, :]
sum_masks = sum_masks[sort_inds]
cate_scores = cate_scores[sort_inds]
cate_labels = cate_labels[sort_inds]
# Matrix NMS
cate_scores = matrix_nms(
seg_masks,
cate_labels,
cate_scores,
kernel=cfg.kernel,
sigma=cfg.sigma,
sum_masks=sum_masks,
)
# filter.
keep = cate_scores >= cfg.update_thr
if keep.sum() == 0:
return None
seg_preds = seg_preds[keep, :, :]
cate_scores = cate_scores[keep]
cate_labels = cate_labels[keep]
# sort and keep top_k
sort_inds = torch.argsort(cate_scores, descending=True)
if len(sort_inds) > cfg.max_per_img:
sort_inds = sort_inds[: cfg.max_per_img]
seg_preds = seg_preds[sort_inds, :, :]
cate_scores = cate_scores[sort_inds]
cate_labels = cate_labels[sort_inds]
seg_preds = F.interpolate(
seg_preds.unsqueeze(0), size=upsampled_size_out, mode="bilinear"
)[:, :, :h, :w]
seg_masks = F.interpolate(
seg_preds, size=ori_shape[:2], mode="bilinear"
).squeeze(0)
seg_masks = seg_masks > cfg.mask_thr
return seg_masks, cate_labels, cate_scores
|
the-stack_106_17999
|
#!/usr/bin/python
#
# this script attempts to turn doc comment attributes (#[doc = "..."])
# into sugared-doc-comments (/** ... */ and /// ...)
#
# it sugarises all .rs/.rc files underneath the working directory
#
import sys, os, fnmatch, re
DOC_PATTERN = '^(?P<indent>[\\t ]*)#\\[(\\s*)doc(\\s*)=' + \
'(\\s*)"(?P<text>(\\"|[^"])*?)"(\\s*)\\]' + \
'(?P<semi>;)?'
ESCAPES = [("\\'", "'"),
('\\"', '"'),
("\\n", "\n"),
("\\r", "\r"),
("\\t", "\t")]
def unescape(s):
for (find, repl) in ESCAPES:
s = s.replace(find, repl)
return s
def block_trim(s):
lns = s.splitlines()
# remove leading/trailing whitespace-lines
while lns and not lns[0].strip():
lns = lns[1:]
while lns and not lns[-1].strip():
lns = lns[:-1]
# remove leading horizontal whitespace
n = sys.maxint
for ln in lns:
if ln.strip():
n = min(n, len(re.search('^\s*', ln).group()))
if n != sys.maxint:
lns = [ln[n:] for ln in lns]
# strip trailing whitespace
lns = [ln.rstrip() for ln in lns]
return lns
def replace_doc(m):
indent = m.group('indent')
text = block_trim(unescape(m.group('text')))
if len(text) > 1:
inner = '!' if m.group('semi') else '*'
starify = lambda s: indent + ' *' + (' ' + s if s else '')
text = '\n'.join(map(starify, text))
repl = indent + '/*' + inner + '\n' + text + '\n' + indent + ' */'
else:
inner = '!' if m.group('semi') else '/'
repl = indent + '//' + inner + ' ' + text[0]
return repl
def sugarise_file(path):
s = open(path).read()
r = re.compile(DOC_PATTERN, re.MULTILINE | re.DOTALL)
ns = re.sub(r, replace_doc, s)
if s != ns:
open(path, 'w').write(ns)
for (dirpath, dirnames, filenames) in os.walk('.'):
for name in fnmatch.filter(filenames, '*.r[sc]'):
sugarise_file(os.path.join(dirpath, name))
|
the-stack_106_18000
|
"""
Contains tests to test the Status class, which is responsible for
storing a NAGIOS exit status and corresponding message
"""
import pytest
from pynagios.status import Status
class TestStatus(object):
def test_status_comparison(self):
"""
Tests __cmp__ operator of Status class
"""
a = Status('OK',0)
b = Status('OK',0)
assert a == b
assert not a is b
assert Status('Test',0) < Status('Test',1)
assert Status('Test',1) > Status('Test',0)
|
the-stack_106_18001
|
# clean_lambda_functions.py
# Package Imports
import boto3
from botocore.exceptions import ClientError
# Module Imports
import helpers
# Cleaner Settings
RESOURCE_NAME = "Lambda Function"
WHITELIST_NAME = "lambda_functions"
BOTO3_NAME = "lambda"
BOTO3_LIST_FUNCTION = "list_functions"
def clean_lambda_functions() -> list:
"""Main ordering for cleaning lambda functions.
Returns:
A list of all terminated functions
"""
helpers.starting_clean_print(RESOURCE_NAME)
lambda_client = boto3.client(BOTO3_NAME)
functions = get_functions(lambda_client)
terminated_functions = delete_functions(lambda_client, functions)
helpers.finished_clean_print(RESOURCE_NAME, terminated_functions)
return terminated_functions
def get_functions(lambda_client) -> list:
"""Gets all the lambda functions in an account.
Args:
lambda_client: A lambda boto3 client.
Returns:
A list of all lambda functions in the account.
"""
function_list = []
paginator = lambda_client.get_paginator(BOTO3_LIST_FUNCTION)
pages = paginator.paginate()
for page in pages:
function_list = function_list + page["Functions"]
return function_list
def delete_functions(lambda_client, function_list) -> list:
"""Deletes all instances in the instances parameter.
Args:
lambda_client: A lambda boto3 client
function_list: A list of instances you want deleted.
Returns:
A count of deleted instances
"""
terminated_functions = []
for lambda_function in function_list:
function_name = lambda_function["FunctionName"]
if helpers.check_in_whitelist(function_name, WHITELIST_NAME):
continue
try:
lambda_client.delete_function(
FunctionName=function_name
)
except ClientError as error:
error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME,
function_name)
print(error_string)
terminated_functions.append(error_string)
continue
terminated_functions.append(lambda_function["FunctionName"])
return terminated_functions
|
the-stack_106_18002
|
import os;
import shutil;
import datetime as dt;
import xlsxwriter as xl;
alf = ("D", "E", "F", "G", "H", "I", "J", "K");
nameWorkDir = "./datasets/";
nameDataFile = "./data.xlsx";
if os. path. exists (nameWorkDir) != True:
os. mkdir (nameWorkDir, 0o777);
def createWorkBook ():
dis = {
"A1": "date time",
"B1": "time seconds",
"C1": "pose arm",
"D1": "EMG1",
"E1": "EMG2",
"F1": "EMG3",
"G1": "EMG4",
"H1": "EMG5",
"I1": "EMG6",
"J1": "EMG7",
"K1": "EMG8"
}
global count
count = 2;
workBook = xl.Workbook(nameWorkDir + nameDataFile);
workSheet = workBook.add_worksheet();
for i in dis:
workSheet. write (i, dis [i]);
return workBook, workSheet
workBook, workSheet = createWorkBook()
def addEmg (timeDate, timeSeconds, poseArm, emg):
global count;
workSheet. write ("A" + str (count), str (timeDate));
workSheet. write (("B" + str (count)), str (timeSeconds));
workSheet. write (("C" + str (count)), poseArm);
for i in range (0, 8):
workSheet. write ((alf [i] + str (count)), str (emg [i]));
count += 1;
def saveTable():
global workBook
global workSheet
if os. path. exists (nameWorkDir) != True:
os. mkdir (nameWorkDir, 0o777);
newNameDataFile = "./datasets/";
newNameDataFile = str (dt.datetime.now());
newNameDataFile = newNameDataFile + ".xlsx";
workBook.close();
shutil.move(nameWorkDir + nameDataFile, nameWorkDir + newNameDataFile);
workBook, workSheet = createWorkBook()
|
the-stack_106_18003
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the function "cube_manipulation.sort_coord_in_cube".
"""
import unittest
import iris
import numpy as np
from iris.coords import AuxCoord
from iris.tests import IrisTest
from improver.utilities.cube_manipulation import sort_coord_in_cube
from improver.utilities.warnings_handler import ManageWarnings
from ...set_up_test_cubes import set_up_variable_cube
class Test_sort_coord_in_cube(IrisTest):
"""Class to test the sort_coord_in_cube function."""
def setUp(self):
"""Set up ascending and descending cubes"""
self.ascending_height_points = np.array(
[5., 10., 20.], dtype=np.float32)
self.descending_height_points = np.flip(self.ascending_height_points)
self.data = np.array(
[np.ones((3, 3)), 2*np.ones((3, 3)), 3*np.ones((3, 3))],
dtype=np.float32)
self.ascending_cube = set_up_variable_cube(self.data)
self.ascending_cube.coord("realization").rename("height")
self.ascending_cube.coord("height").points = (
self.ascending_height_points)
self.ascending_cube.coord("height").units = "m"
self.descending_cube = self.ascending_cube.copy()
self.descending_cube.coord("height").points = (
self.descending_height_points)
def test_ascending_then_ascending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in ascending order."""
expected_data = self.data
coord_name = "height"
result = sort_coord_in_cube(self.ascending_cube, coord_name)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(self.ascending_cube.coord_dims(coord_name),
result.coord_dims(coord_name))
self.assertArrayAlmostEqual(
self.ascending_height_points,
result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_auxcoord(self):
"""Test that the above sorting is successful when an AuxCoord is
used."""
expected_data = self.data
coord_name = "height_aux"
height_coord = self.ascending_cube.coord('height')
height_coord_index, = self.ascending_cube.coord_dims('height')
new_coord = AuxCoord(height_coord.points, long_name=coord_name)
self.ascending_cube.add_aux_coord(new_coord, height_coord_index)
result = sort_coord_in_cube(self.ascending_cube, coord_name)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(self.ascending_cube.coord_dims(coord_name),
result.coord_dims(coord_name))
self.assertArrayAlmostEqual(
self.ascending_height_points,
result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_ascending_then_descending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in descending order."""
expected_data = np.flip(self.data)
coord_name = "height"
result = sort_coord_in_cube(
self.ascending_cube, coord_name, descending=True)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(self.descending_cube.coord_dims(coord_name),
result.coord_dims(coord_name))
self.assertArrayAlmostEqual(
self.descending_height_points, result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_descending_then_ascending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in ascending order."""
expected_data = np.flip(self.data)
coord_name = "height"
result = sort_coord_in_cube(self.descending_cube, coord_name)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(self.ascending_cube.coord_dims(coord_name),
result.coord_dims(coord_name))
self.assertArrayAlmostEqual(
self.ascending_height_points, result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_descending_then_descending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in descending order."""
expected_data = self.data
coord_name = "height"
result = sort_coord_in_cube(
self.descending_cube, coord_name, descending=True)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(self.descending_cube.coord_dims(coord_name),
result.coord_dims(coord_name))
self.assertArrayAlmostEqual(
self.descending_height_points, result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_latitude(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate (latitude).
The points in the resulting cube should now be in descending order."""
expected_data = np.array(
[[[1.00, 1.00, 1.00],
[1.00, 1.00, 1.00],
[6.00, 1.00, 1.00]],
[[2.00, 2.00, 2.00],
[2.00, 2.00, 2.00],
[6.00, 2.00, 2.00]],
[[3.00, 3.00, 3.00],
[3.00, 3.00, 3.00],
[6.00, 3.00, 3.00]]])
self.ascending_cube.data[:, 0, 0] = 6.0
expected_points = np.flip(self.ascending_cube.coord("latitude").points)
coord_name = "latitude"
result = sort_coord_in_cube(
self.ascending_cube, coord_name, descending=True)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.ascending_cube.coord_dims(coord_name),
result.coord_dims(coord_name))
self.assertArrayAlmostEqual(
expected_points, result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
@ManageWarnings(record=True)
def test_warn_raised_for_circular_coordinate(self, warning_list=None):
"""Test that a warning is successfully raised when circular
coordinates are sorted."""
self.ascending_cube.data[:, 0, 0] = 6.0
coord_name = "latitude"
self.ascending_cube.coord(coord_name).circular = True
result = sort_coord_in_cube(
self.ascending_cube, coord_name, descending=True)
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
warning_msg = "The latitude coordinate is circular."
self.assertTrue(any(warning_msg in str(item)
for item in warning_list))
self.assertIsInstance(result, iris.cube.Cube)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_18004
|
"""
Copyright 2016-2017 Ellation, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from mock import call, Mock, patch
# For local application imports, context_paths must be first despite lexicon ordering
import context_paths
from ef_config import EFConfig
from ef_template_resolver import EFTemplateResolver
from ef_conf_utils import get_account_alias
TEST_PROFILE = get_account_alias("test")
TEST_REGION = EFConfig.DEFAULT_REGION
TEST_ENV = "test"
TEST_SERVICE = "none"
PARAMS = """{
"params":{
"default":{
"one": "default one",
"two": "default two",
"o": "o",
"ne": "ne",
"/_-.": "slashunderscoredashdot",
".": "dot",
"my-thing": "my-hyphen-thing"
},
"alpha":{
"blah": "unused",
"two": "alpha two",
"one": "alpha one"
},
""" +\
"\"" + TEST_ENV + "\"" + """:{
"one": "testenv one",
"two": "testenv two",
"ENV": "myenvironmentshouldnotoverride"
},
"staging": {
"one": "staging one"
}
}
}
"""
ILLEGAL_COMMA_PARAMS = """{
"params":{
"legal_key_name": "valid_value",
"illegal,key_name": "valid value"
}
}
"""
class TestEFTemplateResolver(unittest.TestCase):
"""Tests for `ef_template_resolver.py`."""
def setUp(self):
"""
Setup function that is run before every test
Returns:
None
"""
mock_cloud_formation_client = Mock(name="Mock CloudFormation Client")
mock_cloud_front_client = Mock(name="Mock CloudFront Client")
mock_ec2_client = Mock(name="Mock EC2 Client")
mock_iam_client = Mock(name="Mock IAM Client")
mock_iam_client.get_user.return_value = {"User": {"Arn": "::::111111111:"}}
mock_iam_client.list_account_aliases.return_value = {"AccountAliases": ["alphaaccount"]}
mock_kms_client = Mock(name="Mock KMS Client")
mock_lambda_client = Mock(name="Mock Lambda Client")
mock_route_53_client = Mock(name="Mock Route 53 Client")
mock_s3_client = Mock(name="Mock S3 Client")
mock_sts_client = Mock(name="Mock STS Client")
mock_waf_client = Mock(name="Mock WAF Client")
mock_session = Mock(name="Mock Client")
self.test_params_json = os.path.join(os.path.dirname(__file__), '../test_data/parameters/test.cnf.parameters.json')
self.test_params_yaml = os.path.join(os.path.dirname(__file__), '../test_data/parameters/test.cnf.parameters.yml')
self._clients = {
"cloudformation": mock_cloud_formation_client,
"cloudfront": mock_cloud_front_client,
"ec2": mock_ec2_client,
"iam": mock_iam_client,
"kms": mock_kms_client,
"lambda": mock_lambda_client,
"route53": mock_route_53_client,
"s3": mock_s3_client,
"sts": mock_sts_client,
"waf": mock_waf_client,
"SESSION": mock_session
}
def tearDown(self):
"""
Teardown function that is run after every test.
Returns:
None
"""
pass
@patch('ef_template_resolver.create_aws_clients')
def test_resolution(self, mock_create_aws):
"""Do context symbols resolve correctly"""
mock_create_aws.return_value = self._clients
test_string = "{{one}}|{{two}}|{{/_-.}}|{{ENV}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "testenv one|testenv two|slashunderscoredashdot|test")
@patch('ef_template_resolver.create_aws_clients')
def test_leading_dot(self, mock_create_aws):
"""Do symbols with a leading dot render correctly"""
mock_create_aws.return_value = self._clients
test_string = "{{.one}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "testenv one")
@patch('ef_template_resolver.create_aws_clients')
def test_leading_dot_context(self, mock_create_aws):
"""Do context symbols with a leading dot render correctly"""
mock_create_aws.return_value = self._clients
test_string = "{{.ENV}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), TEST_ENV)
@patch('ef_template_resolver.create_aws_clients')
def test_newline_literal(self, mock_create_aws):
"""Do newline literals get converted to newlines"""
mock_create_aws.return_value = self._clients
test_string = "foo\nbar"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "foo\nbar")
@patch('ef_template_resolver.create_aws_clients')
def test_newline_literal_against_raw(self, mock_create_aws):
"""Another check to make sure newline literals are not mistakenly written as r'\n'"""
mock_create_aws.return_value = self._clients
test_string = "foo\nbar"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertNotEqual(resolver.render(), r'foo\nbar')
@patch('ef_template_resolver.create_aws_clients')
def test_embedded_symbols(self, mock_create_aws):
"""Does a symbol built from other symbols resolve correctly"""
mock_create_aws.return_value = self._clients
test_string = "{{{{o}}{{ne}}}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "testenv one")
@patch('ef_template_resolver.create_aws_clients')
def test_unresolved_symbols(self, mock_create_aws):
"""Are unresolved symbols stored and reported, and non-symbols ignored"""
mock_create_aws.return_value = self._clients
test_string = "{{cannot_resolve}}{{not a symbo}}{{notasymbol?}}{{cannot_resolve}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.unresolved_symbols(), set(["cannot_resolve"]))
@patch('ef_template_resolver.create_aws_clients')
def test_hierarchical_overlays(self, mock_create_aws):
"""Is the hierarchy of default..env applied correctly"""
mock_create_aws.return_value = self._clients
test_string = "{{one}}|{{two}}|{{my-thing}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "testenv one|testenv two|my-hyphen-thing")
@patch('ef_template_resolver.create_aws_clients')
def test_context_vars_protected(self, mock_create_aws):
"""Context vars like {{ENV}} are not overridden even if present in template"""
mock_create_aws.return_value = self._clients
test_string = "{{ENV}}"
resolver = EFTemplateResolver(profile=TEST_PROFILE, env=TEST_ENV, region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), TEST_ENV)
@patch('ef_template_resolver.create_aws_clients')
def test_fully_qualified_env(self, mock_create_aws):
"""Does {{ENV_FULL}} resolve correctly"""
mock_create_aws.return_value = self._clients
# alpha0
test_string = "{{ENV_FULL}}"
resolver = EFTemplateResolver(profile=get_account_alias("alpha0"),
env="alpha0", region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "alpha0")
# prod
resolver = EFTemplateResolver(profile=get_account_alias("test"),
env="test", region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "test")
# mgmt.testaccount
resolver = EFTemplateResolver(profile=get_account_alias("mgmt.testaccount"),
env="mgmt.testaccount", region=TEST_REGION, service=TEST_SERVICE)
resolver.load(test_string, PARAMS)
self.assertEqual(resolver.render(), "mgmt.testaccount")
@patch('ef_template_resolver.create_aws_clients')
def test_load_json_file(self, mock_create_aws):
"""Does {{one}} resolve correctly from json parameters file"""
mock_create_aws.return_value = self._clients
test_string = "{{one}}"
resolver = EFTemplateResolver(profile=get_account_alias("alpha0"),
env="alpha0", region=TEST_REGION, service=TEST_SERVICE)
with open(self.test_params_json) as json_file:
resolver.load(test_string, json_file)
self.assertEqual(resolver.render(), "alpha one")
@patch('ef_template_resolver.create_aws_clients')
def test_load_yaml_file(self, mock_create_aws):
"""Does {{one}} resolve correctly from yaml parameters file"""
mock_create_aws.return_value = self._clients
test_string = "{{one}}"
resolver = EFTemplateResolver(profile=get_account_alias("alpha0"),
env="alpha0", region=TEST_REGION, service=TEST_SERVICE)
with open(self.test_params_yaml) as yaml_file:
resolver.load(test_string, yaml_file)
self.assertEqual(resolver.render(), "alpha one")
@patch('ef_template_resolver.create_aws_clients')
def test_render_multiline_string_from_string(self, mock_create_aws):
"""Does {{multi}} resolve correctly as a multiline string from yaml parameters file"""
mock_create_aws.return_value = self._clients
test_string = "{{multi}}"
resolver = EFTemplateResolver(profile=get_account_alias("test"),
env="test", region=TEST_REGION, service=TEST_SERVICE)
with open(self.test_params_json) as json_file:
resolver.load(test_string, json_file)
self.assertEqual(resolver.render(), "thisisareallylongstringthatcoversmultiple\nlinesfortestingmultilinestrings")
@patch('ef_template_resolver.create_aws_clients')
def test_render_multiline_string_from_list(self, mock_create_aws):
"""Does {{multi}} resolve correctly as a multiline string from yaml parameters file"""
mock_create_aws.return_value = self._clients
test_string = "{{multi2}}"
resolver = EFTemplateResolver(profile=get_account_alias("test"),
env="test", region=TEST_REGION, service=TEST_SERVICE)
with open(self.test_params_json) as json_file:
resolver.load(test_string, json_file)
self.assertEqual(resolver.render(), "one\ntwo\nthree")
@patch('ef_template_resolver.create_aws_clients')
def test_render_multiline_string(self, mock_create_aws):
"""Does {{multi}} resolve correctly as a multiline string from yaml parameters file"""
mock_create_aws.return_value = self._clients
test_string = "{{multi}}"
resolver = EFTemplateResolver(profile=get_account_alias("test"),
env="test", region=TEST_REGION, service=TEST_SERVICE)
with open(self.test_params_yaml) as yaml_file:
resolver.load(test_string, yaml_file)
self.assertEqual(resolver.render(), "thisisareallylongstringthatcoversmultiple\nlinesfortestingmultilinestrings")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.