filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_28975 | import io
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def create_lr_sched(start_epoch, n_epochs, lr0=1e-3, lr_end=1e-9, warmup=True):
"""
start_epoch: epoch where to start decaying
n_epochs: total number of epochs
lr0: initial learning rate
lr_end: learning rate end value
warmup: wheter to gradualy increase learning rate on the first few epochs
return: learning rate scheduler function with given parameters.
"""
def sched(epoch):
exp_range = np.log10(lr0/lr_end)
epoch_ratio = (epoch - start_epoch)/(n_epochs - start_epoch)
warmup_epochs = int(np.log10(lr0/lr_end))
if warmup and epoch < warmup_epochs:
lr = lr_end * (10 ** (epoch))
elif epoch < start_epoch:
lr = lr0
else:
lr = lr0 * 10**-(exp_range * epoch_ratio)
return lr
return sched
def export_embeddings(embeddings, path):
with open(path, 'w') as f:
text = '\n'.join(
'\t'.join(str(v) for v in e)
for e in embeddings)
f.write(text)
def export_vocabulary(path, vocab_size, word_index):
with open(path, 'w') as f:
# padding
f.writelines(['0\n'])
words = list(word_index.keys())
if '\n' in words:
index = words.index('\n')
words.remove('\n')
words.insert(index, '\\n')
f.write('\n'.join(words[:vocab_size]))
def plot_series(x, y, scale='log'):
fig = plt.figure()
sub = fig.add_subplot()
sub.set_yscale(scale)
sub.plot(x, y)
plt.show()
def plot_hist(history, key, path, with_val=True, sufix=''):
train_series = history.history[key]
epochs = range(len(train_series))
plt.xlabel('Epochs')
plt.ylabel(key)
plt.plot(epochs, train_series, color='blue')
if with_val:
val_series = history.history['val_' + key]
plt.plot(epochs, val_series, color='red')
plt.legend(['training', 'validation'])
else:
plt.legend(['training'])
return plt.show()
|
the-stack_106_28976 | from .channel import Channel
from .exceptions import SlackClientError
from .slackrequest import SlackRequest
from .user import User
from .util import SearchList, SearchDict
import json
import logging
import time
import random
from requests.packages.urllib3.util.url import parse_url
from ssl import SSLError
from websocket import create_connection
from websocket._exceptions import WebSocketConnectionClosedException
class Server(object):
"""
The Server object owns the websocket connection and all attached channel information.
"""
def __init__(self, token=None, connect=True, proxies=None, **kwargs):
# Slack app configs
self.token = token
# api configs
self.proxies = proxies
# HTTP Request handler
self.api_requester = SlackRequest(proxies=proxies)
# Workspace metadata
self.username = None
self.domain = None
self.login_data = None
self.users = SearchDict()
self.channels = SearchList()
# RTM configs
self.websocket = None
self.ws_url = None
self.connected = False
self.auto_reconnect = False
self.last_connected_at = 0
self.reconnect_count = 0
self.rtm_connect_retries = 0
# Connect to RTM on load
if connect:
self.rtm_connect()
def __eq__(self, compare_str):
if compare_str == self.domain or compare_str == self.token:
return True
else:
return False
def __hash__(self):
return hash(self.token)
def __str__(self):
"""
Example Output::
username : None
domain : None
websocket : None
users : []
login_data : None
api_requester : <slackclient.slackrequest.SlackRequest
channels : []
token : xoxb-asdlfkyadsofii7asdf734lkasdjfllakjba7zbu
connected : False
ws_url : None
"""
data = ""
for key in list(self.__dict__.keys()):
data += "{} : {}\n".format(key, str(self.__dict__[key])[:40])
return data
def __repr__(self):
return self.__str__()
def append_user_agent(self, name, version):
self.api_requester.append_user_agent(name, version)
def rtm_connect(self, reconnect=False, timeout=None, use_rtm_start=True, **kwargs):
"""
Connects to the RTM API - https://api.slack.com/rtm
If `auto_reconnect` is set to `True` then the SlackClient is initialized, this method
will be used to reconnect on websocket read failures, which indicate disconnection
:Args:
reconnect (boolean) Whether this method is being called to reconnect to RTM
timeout (int): Stop waiting for Web API response after this many seconds
use_rtm_start (boolean): `True` to connect using `rtm.start` or
`False` to connect using`rtm.connect`
https://api.slack.com/rtm#connecting_with_rtm.connect_vs._rtm.start
:Returns:
None
"""
# rtm.start returns user and channel info, rtm.connect does not.
connect_method = "rtm.start" if use_rtm_start else "rtm.connect"
# If the `auto_reconnect` param was passed, set the server's `auto_reconnect` attr
if "auto_reconnect" in kwargs:
self.auto_reconnect = kwargs["auto_reconnect"]
# If this is an auto reconnect, rate limit reconnect attempts
if self.auto_reconnect and reconnect:
# Raise a SlackConnectionError after 5 retries within 3 minutes
recon_count = self.reconnect_count
if recon_count == 5:
logging.error("RTM connection failed, reached max reconnects.")
raise SlackConnectionError(
"RTM connection failed, reached max reconnects."
)
# Wait to reconnect if the last reconnect was less than 3 minutes ago
if (time.time() - self.last_connected_at) < 180:
if recon_count > 0:
# Back off after the the first attempt
backoff_offset_multiplier = random.randint(1, 4)
retry_timeout = (
backoff_offset_multiplier * recon_count * recon_count
)
logging.debug("Reconnecting in %d seconds", retry_timeout)
time.sleep(retry_timeout)
self.reconnect_count += 1
else:
self.reconnect_count = 0
reply = self.api_requester.do(
self.token, connect_method, post_data=kwargs, timeout=timeout
)
if reply.status_code != 200:
if self.rtm_connect_retries < 5 and reply.status_code == 429:
self.rtm_connect_retries += 1
retry_after = int(reply.headers.get("retry-after", 120))
logging.debug(
"HTTP 429: Rate limited. Retrying in %d seconds", retry_after
)
time.sleep(retry_after)
self.rtm_connect(
reconnect=reconnect,
timeout=timeout,
use_rtm_start=use_rtm_start,
**kwargs
)
else:
raise SlackConnectionError(
"RTM connection attempt was rate limited 5 times."
)
else:
self.rtm_connect_retries = 0
login_data = reply.json()
if login_data["ok"]:
self.ws_url = login_data["url"]
self.connect_slack_websocket(self.ws_url)
if not reconnect:
self.parse_slack_login_data(login_data, use_rtm_start)
else:
raise SlackLoginError(reply=reply)
def parse_slack_login_data(self, login_data, use_rtm_start):
self.login_data = login_data
self.domain = self.login_data["team"]["domain"]
self.username = self.login_data["self"]["name"]
# if the connection was made via rtm.start, update the server's state
if use_rtm_start:
self.parse_channel_data(login_data["channels"])
self.parse_channel_data(login_data["groups"])
self.parse_user_data(login_data["users"])
self.parse_channel_data(login_data["ims"])
def connect_slack_websocket(self, ws_url):
"""Uses http proxy if available"""
if self.proxies and "http" in self.proxies:
parts = parse_url(self.proxies["http"])
proxy_host, proxy_port = parts.host, parts.port
auth = parts.auth
proxy_auth = auth and auth.split(":")
else:
proxy_auth, proxy_port, proxy_host = None, None, None
try:
self.websocket = create_connection(
ws_url,
http_proxy_host=proxy_host,
http_proxy_port=proxy_port,
http_proxy_auth=proxy_auth,
)
self.connected = True
self.last_connected_at = time.time()
logging.debug("RTM connected")
self.websocket.sock.setblocking(0)
except Exception as e:
self.connected = False
raise SlackConnectionError(message=str(e))
def parse_channel_data(self, channel_data):
for channel in channel_data:
if "name" not in channel:
channel["name"] = channel["id"]
if "members" not in channel:
channel["members"] = []
self.attach_channel(channel["name"], channel["id"], channel["members"])
def parse_user_data(self, user_data):
for user in user_data:
if "tz" not in user:
user["tz"] = "unknown"
if "real_name" not in user:
user["real_name"] = user["name"]
if "email" not in user["profile"]:
user["profile"]["email"] = ""
self.attach_user(
user["name"],
user["id"],
user["real_name"],
user["tz"],
user["profile"]["email"],
)
def send_to_websocket(self, data):
"""
Send a JSON message directly to the websocket. See
`RTM documentation <https://api.slack.com/rtm` for allowed types.
:Args:
data (dict) the key/values to send the websocket.
"""
try:
data = json.dumps(data)
self.websocket.send(data)
except Exception:
self.rtm_connect(reconnect=True, use_rtm_start=False)
def rtm_send_message(self, channel, message, thread=None, reply_broadcast=None):
"""
Sends a message to a given channel.
:Args:
channel (str) - the string identifier for a channel or channel name (e.g. 'C1234ABC',
'bot-test' or '#bot-test')
message (message) - the string you'd like to send to the channel
thread (str or None) - the parent message ID, if sending to a
thread
reply_broadcast (bool) - if messaging a thread, whether to
also send the message back to the channel
:Returns:
None
"""
message_json = {"type": "message", "channel": channel, "text": message}
if thread is not None:
message_json["thread_ts"] = thread
if reply_broadcast:
message_json["reply_broadcast"] = True
self.send_to_websocket(message_json)
def ping(self):
return self.send_to_websocket({"type": "ping"})
def websocket_safe_read(self):
"""
Returns data if available, otherwise ''. Newlines indicate multiple
messages
"""
data = ""
while True:
try:
data += "{0}\n".format(self.websocket.recv())
except SSLError as e:
if e.errno == 2:
# errno 2 occurs when trying to read or write data, but more
# data needs to be received on the underlying TCP transport
# before the request can be fulfilled.
#
# Python 2.7.9+ and Python 3.3+ give this its own exception,
# SSLWantReadError
return ""
raise
except WebSocketConnectionClosedException:
logging.debug("RTM disconnected")
self.connected = False
if self.auto_reconnect:
self.rtm_connect(reconnect=True, use_rtm_start=False)
else:
raise SlackConnectionError(
"Unable to send due to closed RTM websocket"
)
return data.rstrip()
def attach_user(self, name, user_id, real_name, tz, email):
self.users.update({user_id: User(self, name, user_id, real_name, tz, email)})
def attach_channel(self, name, channel_id, members=None):
if members is None:
members = []
if self.channels.find(channel_id) is None:
self.channels.append(Channel(self, name, channel_id, members))
def join_channel(self, name, timeout=None):
"""
Join a channel by name.
Note: this action is not allowed by bots, they must be invited to channels.
"""
response = self.api_call("channels.join", channel=name, timeout=timeout)
return response
def api_call(self, token, request="?", timeout=None, **kwargs):
"""
Call the Slack Web API as documented here: https://api.slack.com/web
:Args:
method (str): The API Method to call. See here for a list: https://api.slack.com/methods
:Kwargs:
(optional) timeout: stop waiting for a response after a given number of seconds
(optional) kwargs: any arguments passed here will be bundled and sent to the api
requester as post_data
and will be passed along to the API.
Example::
sc.server.api_call(
"channels.setPurpose",
channel="CABC12345",
purpose="Writing some code!"
)
Returns:
str -- returns HTTP response text and headers as JSON.
Examples::
u'{"ok":true,"purpose":"Testing bots"}'
or
u'{"ok":false,"error":"channel_not_found"}'
See here for more information on responses: https://api.slack.com/web
"""
response = self.api_requester.do(token, request, kwargs, timeout=timeout)
response_json = {}
resp_text = response.text
if resp_text:
response_json = json.loads(resp_text)
response_json["headers"] = dict(response.headers)
return json.dumps(response_json)
# TODO: Move the error types defined below into the .exceptions namespace. This would be a semver
# major change because any clients already referencing these types in order to catch them
# specifically would need to deal with the symbol names changing.
class SlackConnectionError(SlackClientError):
def __init__(self, message="", reply=None):
super(SlackConnectionError, self).__init__(message)
self.reply = reply
class SlackLoginError(SlackClientError):
def __init__(self, message="", reply=None):
super(SlackLoginError, self).__init__(message)
self.reply = reply
|
the-stack_106_28977 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
# Do not import changelog_unittest.ChangeLogTest directly as that will cause it to be run again.
from webkitpy.common.checkout import changelog_unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
class PrepareChangeLogTest(changelog_unittest.ChangeLogTest):
def test_resolve_existing_entry(self):
step = PrepareChangeLog(MockTool(), MockOptions())
headers = ["2013-01-18 Timothy Loh <[email protected]>\n\n",
"2013-01-20 Timothy Loh <[email protected]>\n\n",
u"2009-08-17 Tor Arne Vestb\xf8 <[email protected]>\n\n",
u"2009-08-18 Tor Arne Vestb\xf8 <[email protected]>\n\n",
"2013-01-18 Eric Seidel <[email protected]>\n\n",
"2013-01-20 Eric Seidel <[email protected]>\n\n",
]
bug_descs = [" prepare-Changelog should support updating the list of changed files\n",
" webkit-patch upload should support updating the list of changed files\n"]
bug_url = " https://bugs.webkit.org/show_bug.cgi?id=74358\n\n"
descriptions = ["", " A description of the changes.\n\n",
" A description.\n\n With some\n line breaks\n\n"]
changes = [
""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
(PrepareChangeLog):
(PrepareChangeLog.run):\n\n""",
""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
(PrepareChangeLog._resolve_existing_entry):
(PrepareChangeLog):
(PrepareChangeLog.run):\n\n""",
""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
(PrepareChangeLog): Some annotations
(PrepareChangeLog.run):
More annotations\n\n""",
""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
(PrepareChangeLog): Some annotations
(PrepareChangeLog.run):
More annotations
* Scripts/webkitpy/tool/steps/preparechangelog.py:
(PrepareChangeLog._resolve_existing_entry):
(PrepareChangeLog):
(PrepareChangeLog.run):\n\n""",
]
def make_entry(indices):
a, b, c, d = indices
return headers[a] + bug_descs[b] + bug_url + descriptions[c] + changes[d]
test_cases = [((0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (0, 0, 1, 0)),
((1, 0, 0, 0), (0, 0, 2, 0), (1, 0, 2, 0)),
((0, 1, 0, 0), (0, 0, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 1), (0, 0, 0, 0), (0, 0, 0, 1)),
((0, 0, 0, 0), (0, 0, 1, 1), (0, 0, 1, 0)),
((0, 0, 0, 0), (0, 0, 2, 2), (0, 0, 2, 2)),
((0, 0, 0, 1), (0, 0, 1, 2), (0, 0, 1, 3)),
((1, 1, 0, 1), (0, 0, 0, 2), (1, 1, 0, 3)),
((3, 0, 0, 0), (2, 0, 1, 0), (3, 0, 1, 0)),
((4, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0)),
((5, 0, 0, 0), (0, 0, 0, 0), (1, 0, 0, 0)),
((0, 0, 0, 0), (4, 0, 0, 0), (4, 0, 0, 0)),
((1, 0, 0, 0), (4, 0, 0, 0), (5, 0, 0, 0)),
]
for new, old, final in test_cases:
new_entry = make_entry(new)
old_entry = make_entry(old)
start_file = new_entry + old_entry + self._rolled_over_footer
final_entry = make_entry(final)
end_file = final_entry + self._rolled_over_footer
path = "ChangeLog"
step._tool.filesystem = MockFileSystem()
step._tool.filesystem.write_text_file(path, start_file)
step._resolve_existing_entry(path)
actual_output = step._tool.filesystem.read_text_file(path)
self.assertEquals(actual_output, end_file)
def test_ensure_bug_url(self):
capture = OutputCapture()
step = PrepareChangeLog(MockTool(), MockOptions())
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = "ChangeLog"
state = {
"bug_title": "Example title",
"bug_id": 1234,
"changelogs": [changelog_path],
}
step._tool.filesystem = MockFileSystem()
step._tool.filesystem.write_text_file(changelog_path, changelog_contents)
capture.assert_outputs(self, step._ensure_bug_url, [state])
actual_contents = step._tool.filesystem.read_text_file(changelog_path)
expected_message = "Example title\n http://example.com/1234"
expected_contents = changelog_contents.replace("Need a short description (OOPS!).\n Need the bug URL (OOPS!).", expected_message)
self.assertEqual(actual_contents, expected_contents)
|
the-stack_106_28978 | import pytest
from typing import Callable
import torch
from torch.optim import Optimizer, Adam, AdamW
from torch.optim.lr_scheduler import _LRScheduler, LambdaLR
from simple_model import args_from_dict, SimpleModel, random_dataloader
from common import distributed_test
from util import required_torch_version
import deepspeed
from deepspeed.ops.adam import FusedAdam
from deepspeed.runtime.lr_schedules import WARMUP_LR, WarmupLR
from deepspeed.runtime.config import ADAM_OPTIMIZER
from deepspeed.runtime.utils import see_memory_usage
@pytest.mark.parametrize('zero_stage,world_size', [(0, 1), (3, 1)])
def test_no_optim(zero_stage, world_size):
if zero_stage == 3 and not required_torch_version():
pytest.skip("zero-3 param offload requires at least torch 1.8")
ds_config = {
'train_batch_size': world_size,
'fp16': {
'enabled': True
},
'zero_optimization': {
"stage": zero_stage,
"offload_param": {
"device": "cpu"
}
}
}
# 20B test
#hidden_dim = 16 * 1024
hidden_dim = 4
@distributed_test(world_size=[world_size])
def _go(hidden_dim):
with deepspeed.zero.Init(enabled=zero_stage == 3, config_dict_or_path=ds_config):
model = SimpleModel(hidden_dim, nlayers=78)
print('total number of parameters:',
sum([p.numel() for p in model.parameters()]))
see_memory_usage('pre-init', force=True)
model, _, _, _ = deepspeed.initialize(model=model, config=ds_config)
see_memory_usage('post-init', force=True)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.half)
print(f"optimizer={model.optimizer}")
for batch in data_loader:
model(batch[0], batch[1])
see_memory_usage('post-fwds', force=True)
_go(hidden_dim)
@pytest.mark.parametrize('optimizer_type', [None, Optimizer, Callable])
def test_client_optimizer(tmpdir, optimizer_type):
def _optimizer_callable(params) -> Optimizer:
return AdamW(params=params)
hidden_dim = 10
model = SimpleModel(hidden_dim)
config_dict = {'train_batch_size': 1}
if optimizer_type is None:
client_optimizer = None
config_dict['optimizer'] = {'type': ADAM_OPTIMIZER}
elif optimizer_type is Optimizer:
client_optimizer = Adam(model.parameters())
else:
client_optimizer = _optimizer_callable
args = args_from_dict(tmpdir, config_dict)
@distributed_test(world_size=[1])
def _test_client_optimizer(args, model, client_optimizer):
_, ds_optimizer, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=list(model.parameters()),
optimizer=client_optimizer)
if client_optimizer is None:
assert isinstance(ds_optimizer, FusedAdam)
elif isinstance(client_optimizer, Optimizer):
assert ds_optimizer == client_optimizer
else:
assert isinstance(ds_optimizer, AdamW)
_test_client_optimizer(args=args, model=model, client_optimizer=client_optimizer)
@pytest.mark.parametrize('scheduler_type, optimizer_type',
[(None,
None),
(None,
Optimizer),
(None,
Callable),
(_LRScheduler,
None),
(_LRScheduler,
Optimizer),
(_LRScheduler,
Callable),
(Callable,
None),
(Callable,
Optimizer),
(Callable,
Callable)])
def test_client_lr_scheduler(tmpdir, scheduler_type, optimizer_type):
def _my_lambda(epoch):
return epoch // 10
def _optimizer_callable(params) -> Optimizer:
return torch.optim.AdamW(params=params)
def _lr_scheduler_callable(optimizer) -> _LRScheduler:
return LambdaLR(optimizer, _my_lambda)
hidden_dim = 10
model = SimpleModel(hidden_dim)
config_dict = {'train_batch_size': 1}
client_optimizer = None
client_scheduler = None
if optimizer_type is None:
config_dict['optimizer'] = {'type': ADAM_OPTIMIZER}
elif optimizer_type is Optimizer:
client_optimizer = torch.optim.Adam(model.parameters())
else:
client_optimizer = _optimizer_callable
if scheduler_type is None:
config_dict['scheduler'] = {'type': WARMUP_LR, 'params': {}}
elif scheduler_type == _LRScheduler:
if isinstance(client_optimizer, Optimizer):
client_scheduler = LambdaLR(client_optimizer, _my_lambda)
else:
# Verify invalid combination is correctly handled
client_scheduler = LambdaLR(torch.optim.Adam(model.parameters()), _my_lambda)
else:
client_scheduler = _lr_scheduler_callable
args = args_from_dict(tmpdir, config_dict)
@distributed_test(world_size=[1])
def _test_client_lr_scheduler(args, model, optimizer, lr_scheduler):
if isinstance(lr_scheduler,
_LRScheduler) and not isinstance(optimizer,
Optimizer):
with pytest.raises(AssertionError):
_, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=list(model.parameters()),
optimizer=optimizer,
lr_scheduler=lr_scheduler)
else:
_, _, _, ds_lr_scheduler = deepspeed.initialize(args=args,
model=model,
model_parameters=list(model.parameters()),
optimizer=optimizer,
lr_scheduler=lr_scheduler)
if lr_scheduler is None:
assert isinstance(ds_lr_scheduler, WarmupLR)
elif isinstance(lr_scheduler, _LRScheduler):
assert ds_lr_scheduler == lr_scheduler
else:
assert isinstance(ds_lr_scheduler, LambdaLR)
_test_client_lr_scheduler(args=args,
model=model,
optimizer=client_optimizer,
lr_scheduler=client_scheduler)
|
the-stack_106_28979 | """Onnx Model Tools."""# coding=utf-8
#
# /************************************************************************************
# ***
# *** Copyright Dell 2021, All Rights Reserved.
# ***
# *** File Author: Dell, 2021ๅนด 03ๆ 23ๆฅ ๆๆไบ 12:42:57 CST
# ***
# ************************************************************************************/
#
#
# nodes = onnx_model.graph.node
# dir(onnx_model)
# ['graph', 'ir_version', 'model_version', 'opset_import', 'producer_name', 'producer_version']
# dir(onnx_model.graph)
# ['initializer', 'input', 'name', 'node', 'output']
import argparse
import os
import sys
import torch
import onnx
from onnx import numpy_helper
import pdb
class GraphParser:
'''
make_graph(nodes,name,inputs,outputs,initializer=None,doc_string=None,value_info=[])
nodes: NodeProto list, e.g: [node1,node2,node3,โฆ]
name: String
inputs: ValueInfoProto list
outputs: ValueInfoProto list
initializer: TensorProto list
doc_string: String
value_info: ValueInfoProto list
'''
def __init__(self, graph):
self.graph = graph
self.used_vars = set()
self.var_type = {}
self.var_shape = {}
self.node_attr_type = {}
self.node_attr_ints = {}
self.const_type = {}
self.const_dims = {}
def parse(self):
# int used_vars
for o in self.graph.output:
# etype = o.type.tensor_type.elem_type
# ndims = o.type.tensor_type.shape
self.used_vars.add(o.name)
node_list = [n for n in self.graph.node]
need_checking = True
while need_checking:
need_checking = False
for n in node_list:
if self.node_used(n):
# Add all node.input to used_vars
for i in n.input:
self.used_vars.add(i)
need_checking = True
node_list.remove(n)
break
for i in self.graph.input:
# name = i.name
# etype = i.type.tensor_type.elem_type
# shape = i.type.tensor_type.shape
print("i -- ", i)
name, dtype, shape = self.tensor_value_info_parse(i)
self.var_type[name] = dtype
self.var_shape[name] = shape
print(name, dtype, shape)
for o in self.graph.output:
# name = graph_input[i].name
# etype = system_t_data_type_names[graph_input[i].type.tensor_type.elem_type]
# ndims = str(graph_input[i].type.tensor_type.shape).count("dim {")
print("o --- ", o)
name, dtype, shape = self.tensor_value_info_parse(o)
self.var_type[name] = dtype
self.var_shape[name] = shape
print(name, dtype, shape)
for w in self.graph.initializer:
# TensorProto
print(w.name, w.data_type, w.dims)
self.const_type[w.name] = w.data_type
self.const_dims[w.name] = w.dims
# w_step = 16
# w_name = "w_" + w.name.replace(".", "_") + "_buf"
# print(f"uint8_t {w_name}[] = {{ ")
# for i in range(0, len(w.raw_data), w_step):
# w_bytes = ', '.join(['0x%02x'%b for b in w.raw_data[i : i + w_step]])
# print(f" {w_bytes},")
# print(f"}};")
for n in self.graph.node:
print(n)
attr_type, attr_ints = self.node_attribute_parse(n)
self.node_attr_type[n.name] = attr_type
self.node_attr_ints[n.name] = attr_ints
def var_used(self, name):
return name in self.used_vars
def node_used(self, node):
for o in node.output:
if self.var_used(o):
return True
return False
def tensor_value_info_parse(self, t):
# make_tensor_value_info(name,elem_type,shape,doc_string="",shape_denotation=None) --> ValueInfoProto
name = t.name
etype = t.type.tensor_type.elem_type
shape = t.type.tensor_type.shape
return name, etype, shape
def node_attribute_parse(self, node):
# make_attribute(key,value,doc_string=None) --> AttributeProto
attr_ints = {}
attr_type = {}
for a in node.attribute:
if (len(a.ints)) == 0:
attr_ints[a.name] = a.i
else:
attr_ints[a.name] = a.ints
attr_type[a.name] = a.type
return attr_type, attr_ints
# https://pytorch.org/docs/stable/onnx.html
# https://github.com/onnx/onnx
# https://github.com/onnx/onnx/blob/master/docs/IR.md
# https://github.com/onnx/onnx/blob/master/onnx/onnx.proto3
system_c_data_type_names = {
0: "UNDEFINED",
1: "float",
2: "uint8_t",
3: "int8_t",
4: "uint16_t",
5: "int16_t",
6: "int32_t",
7: "int64_t",
8: "string",
9: "bool",
10: "FLOAT16",
11: "double",
12: "uint32_t",
13: "uint64_t",
14: "COMPLEX64",
15: "COMPLEX128",
16: "BFLOAT16"
}
system_t_data_type_names = {
0: "?", # "UNDEFINED",
1: "f", # "float",
2: "?", # "uint8_t",
3: "?", # "int8_t",
4: "?", # "uint16_t",
5: "?", # "int16_t",
6: "i", # "int32_t",
7: "?", # "int64_t",
8: "?", # "string",
9: "?", # "bool",
10: "?", # "FLOAT16",
11: "?", # "double",
12: "?", # "uint32_t",
13: "?", # "uint64_t",
14: "?", # "COMPLEX64",
15: "?", # "COMPLEX128",
16: "?", # "BFLOAT16"
}
system_node_operator_functions = {}
def register_attribute_functions(name, afunc):
system_node_operator_functions[name] = afunc
def MaxPool(node, attr_type, attr_ints):
ndim = len(attr_ints['kernel_shape'])
parameters = []
parameters += [str(i) for i in attr_ints['kernel_shape']]
parameters += [str(i) for i in attr_ints['strides']]
parameters += [str(i) for i in attr_ints['pads'][0:2]]
output = []
output.append("MaxPool{}d".format(ndim))
output.append("<")
output.append(", ".join(parameters))
output.append(">")
return "".join(output)
def Conv(node, attr_type, attr_ints):
ndim = len(attr_ints['kernel_shape'])
parameters = []
parameters += [str(i) for i in attr_ints['kernel_shape']]
parameters += [str(i) for i in attr_ints['strides']]
parameters += [str(i) for i in attr_ints['pads'][0:2]]
parameters += [str(i) for i in attr_ints['dilations']]
output = []
output.append("Conv{}d".format(ndim))
output.append("<")
output.append(", ".join(parameters))
output.append(">")
return "".join(output)
def AveragePool(node, attr_type, attr_ints):
ndim = len(attr_ints['kernel_shape'])
parameters = []
parameters += [str(i) for i in attr_ints['kernel_shape']]
parameters += [str(i) for i in attr_ints['strides']]
output = []
output.append("AveragePool{}d".format(ndim))
output.append("<")
output.append(", ".join(parameters))
output.append(">")
return "".join(output)
def get_forward_args(graph_input):
'''
t4::tensor2f AlexNetForward(const AlexNet& ctx, t4::tensor4f xinput_1)
'''
output = []
for i in range(len(graph_input)):
name = graph_input[i].name
etype = system_t_data_type_names[graph_input[i].type.tensor_type.elem_type]
ndims = str(graph_input[i].type.tensor_type.shape).count("dim {")
output.append("t4::tensor{}{} x{}".format(ndims, etype, name))
return ", ".join(output)
def get_forward_return(graph_output):
'''
t4::tensor2f AlexNetForward(const AlexNet& ctx, t4::tensor4f xinput_1)
'''
output = []
for i in range(len(graph_output)):
name = graph_output[i].name
etype = system_t_data_type_names[graph_output[i].type.tensor_type.elem_type]
ndims = str(graph_output[i].type.tensor_type.shape).count("dim {")
output.append("t4::tensor{}{}".format(ndims, etype))
return ", ".join(output)
def get_forward_declare(model, class_name):
'''
t4::tensor2f AlexNetForward(const AlexNet& ctx, t4::tensor4f xinput_1)
'''
output = "{} {}Forward(const {}& ctx, {})".format(
get_forward_return(model.graph.output),
class_name, class_name,
get_forward_args(model.graph.input))
return output
def get_node_operators(node):
# [name: "dilations"
# ints: [1, 1]
# type: INTS
# , name: "group"
# i: 1
# type: INT
# , name: "kernel_shape"
# ints: [11, 11]
# type: INTS
# , name: "strides"
# ints: [4, 4]
# type: INTS
# ]
# if node.op_type == 'Clip':
# pdb.set_trace()
if not node.op_type in system_node_operator_functions:
return node.op_type
attr_ints = {}
attr_type = {}
for a in node.attribute:
if (len(a.ints)) == 0:
attr_ints[a.name] = a.i
else:
attr_ints[a.name] = a.ints
attr_type[a.name] = a.type
# attr_ints
# {'dilations': [1, 1], 'group': 1, 'kernel_shape': [3, 3], 'pads': [1, 1, 1, 1], 'strides': [1, 1]}
output = system_node_operator_functions[node.op_type](node, attr_type, attr_ints)
return output
def create_head_file(onnx_model, class_name):
'''
struct AlexNet
{
t4::tensor4f features_0_weight;
...
t4::tensor1f classifier_6_bias;
};
'''
# weights = model.graph.initializer
print("Create file {}.h ...".format(class_name))
# Include header file
output = []
output.append('#include "tensor4.h"')
output.append("")
# Define class structure
output.append("struct " + class_name + " {")
for w in onnx_model.graph.initializer:
output.append(" t4::tensor{}f {};".format(len(w.dims), w.name))
pdb.set_trace()
output.append("};")
output.append("")
output.append("{} {}Load(const char* filename);\n".format(class_name, class_name))
output.append("")
# Define forward function
output.append(get_forward_declare(onnx_model, class_name) + ";")
# with open(class_name + ".h", "w") as source_h:
# source_h.write("\n".join(output))
print("--------------------------------------------------------")
print("\n".join(output))
print("--------------------------------------------------------")
print("OK.")
def create_bin_file(onnx_model, class_name):
print("Create file {}.bin ...".format(class_name))
print("OK.")
def create_cpp_file(onnx_model, class_name):
'''
#include "AlexNet.h"
AlexNet AlexNetLoad(const char* filename)
{
AlexNet ctx;
t4::model_dict dict = t4::load(filename);
dict.load(ctx.features_0_weight, "features.0.weight", 64, 3, 11, 11);
ใใใ
return ctx;
}
'''
print("Create file {}.cpp ...".format(class_name))
output = []
output.append('#include "{}.h"'.format(class_name))
output.append("")
output.append("{} {}Load(const char *filename)".format(class_name, class_name))
# {
output.append("{")
output.append(" {} ctx;".format(class_name))
output.append(" t4::model_dict dict = t4::load(filename);")
for w in onnx_model.graph.initializer:
wstr = ", ".join([str(e) for e in w.dims])
output.append(' dict.looad(ctx.{}, "{}", {});'.format(w.name, w.name, wstr))
output.append(" return ctx;")
# }
output.append("}")
output.append("")
output.append("")
'''
t4::tensor2f AlexNetForward(const AlexNet& ctx, t4::tensor4f xinput_1)
{
t4::tensor4f x17 = t4::Conv2d<11, 11, 4, 4, 2, 2, 1, 1>(xinput_1, ctx.features_0_weight, ctx.features_0_bias); //features.0
t4::release(xinput_1);
return x43;
}
'''
weights = {}
for w in onnx_model.graph.initializer:
weights[w.name] = w.dims
output.append(get_forward_declare(onnx_model, class_name))
# {
output.append("{")
for node in onnx_model.graph.node:
node_input = [("ctr." + e) if e in weights else ("x" + e) for e in node.input]
node_output = [("ctr." + e) if e in weights else ("x" + e) for e in node.output]
node_function = get_node_operators(node)
output.append(" auto {} = t4::{}({})".format(
", ".join(node_output), node_function, ", ".join(node_input)) + ";")
output.append("")
output.append(" return {};".format(", ".join(node_output)))
#}
output.append("}")
# with open(class_name + ".cpp", "w") as source_cpp:
# source_cpp.write("\n".join(output))
print("--------------------------------------------------------")
print("\n".join(output))
print("--------------------------------------------------------")
print("OK.")
if __name__ == '__main__':
"""Onnx tools ..."""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-m', '--model', help="onnx model file", default='test.onnx')
parser.add_argument('-n', '--network', help="network name", default='XAlexNet')
args = parser.parse_args()
if not os.path.exists(args.model):
print("Onnx model does not exist, stop.")
sys.exit(-1)
register_attribute_functions("Conv", Conv)
register_attribute_functions("MaxPool", MaxPool)
register_attribute_functions("AveragePool", AveragePool)
model = onnx.load(args.model)
onnx_parser = GraphParser(model.graph)
onnx_parser.parse()
pdb.set_trace()
if os.path.exists("{}.h".format(args.network)):
print("File {}.h exist, stop.".format(args.network))
sys.exit(-1)
else:
create_head_file(model, args.network)
create_bin_file(model, args.network)
if os.path.exists("{}.cpp".format(args.network)):
print("File {}.cpp exist, stop.".format(args.network))
sys.exit(-1)
else:
create_cpp_file(model, args.network)
|
the-stack_106_28980 | from simula_random import simula_random
from simula_2 import simula2
def run():
"""Main function to run simulation"""
stock_tickers = ['LPP', 'INVESTORMS', 'PEKAO', 'KGHM', 'INGBSK','KETY','WAWEL','BZWBK','PZU','STALPROD','PKOBP','CCC', 'BOGDANKA','PKNORLEN','BUDIMEX','PGE', 'PULAWY',
'AMICA','HANDLOWY','CYFRPLSAT','LOTOS','INTERCARS','EUROCASH','AMREST','ASSECOPOL','COMARCH','CEZ', 'ENEA','KERNEL','CIECH','ZPUE','NEUCA','ELBUDOWA',
'APATOR','GROCLIN','ASTARTA','PEP', 'ORBIS']
# UI_021.txt Generali Akcje Malych i Srednich Spolek
tfi_tickers = ['UI_021']
tfi_location = "/mnt/d/databases/tfi/omegafun"
total_trans = 0
total_balance = 0.0;
cnt_win = 0
cnt_loose = 0
for ticker in tfi_tickers:
(ct, tb, cw, cl) = simula_random(ticker, tfi_location)
total_trans += ct
total_balance += tb
cnt_win += cw
cnt_loose += cl
print("TOTAL: transactions=%d, total balance=%.2f, wins=%d, loose=%d" % (total_trans, total_balance, cnt_win, cnt_loose))
if __name__ == '__main__':
run()
|
the-stack_106_28981 | #-------------------------------------------------------------------------------
# Name: CONNECT 4 GAME
# Purpose: PROJECT FOR GAME DEVELOPMENT IN OOP
#
# Author: GROUP CATACUTAN, PASCUAL, LAURENT, VENERACION
#
# Created: 30/10/2019
# Copyright: (c) XENON_XEIN_XENLY 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import pygame as pg
from pygame.locals import *
from connect_4_images import * # BLIT, RECT, SCREEN
from connect_4_scene_switch import *
class LAURENT():
def __init__(self):
self.BACK = False
self.BLUE_CURSOR = False
self.SHOW_CURSOR_BACK = False
self.__FADE = False
self.counter = 0
def cursor_toggle(self):
if self.SHOW_CURSOR_BACK:
image._CURSOR_BACK() #RED BACK CURSOR
if self.BLUE_CURSOR:
image._CURSOR_MAIN() #BLUE CURSOR
def SHOW_FADE_TO_LAURENT(self,n):
image.LAURENT.set_alpha(n)
BLIT(image.LAURENT,image.ORIGIN)
def laurent_event_handler(self):
mouse_pos = get_MOUSEPOS()
if -1000 < mouse_pos[0] < 284:
self.BACK = True
self.BLUE_CURSOR = False
self.SHOW_CURSOR_BACK = True
else:
self.SHOW_CURSOR_BACK = False
self.BLUE_CURSOR = True
BACK = False
for event in pg.event.get():
if event.type == QUIT:
scenes.create_scene('EXIT DIALOG')
elif event.type == KEYDOWN:
if event.key == K_f: #---------------- f
game_window.toggle_fullscreen()
elif event.key == K_m:
sounds.toggle_mute('CREDITS')
elif event.type == MOUSEBUTTONDOWN:
click = CLICK()
if click[0]:
if self.BACK:
scenes.from_outside = False
self.__FADE = True
scenes.from_inside = True
scenes.create_scene('CREDITS')
self.BACK = False
def start_LAURENT(self):
self.counter = 0
while scenes.scene == 'LAURENT':
if self.counter < 300:
self.counter +=6
self.SHOW_FADE_TO_LAURENT(self.counter)
self.laurent_event_handler()
self.cursor_toggle()
if self.__FADE:
fade_out.start_fade_out()
self.__FADE = False
UPDATE()
laurent = LAURENT()
if __name__ == '__main__':
scenes.scene = 'LAURENT'
laurent = LAURENT()
laurent.start_LAURENT() |
the-stack_106_28983 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to deal with result cache."""
from __future__ import print_function
import glob
import hashlib
import os
import pickle
import re
import tempfile
import json
from cros_utils import command_executer
from cros_utils import misc
from image_checksummer import ImageChecksummer
import results_report
import test_flag
SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
RESULTS_FILE = 'results.txt'
MACHINE_FILE = 'machine.txt'
AUTOTEST_TARBALL = 'autotest.tbz2'
PERF_RESULTS_FILE = 'perf-results.txt'
CACHE_KEYS_FILE = 'cache_keys.txt'
class Result(object):
"""Class for holding the results of a single test run.
This class manages what exactly is stored inside the cache without knowing
what the key of the cache is. For runs with perf, it stores perf.data,
perf.report, etc. The key generation is handled by the ResultsCache class.
"""
def __init__(self, logger, label, log_level, machine, cmd_exec=None):
self.chromeos_root = label.chromeos_root
self._logger = logger
self.ce = cmd_exec or command_executer.GetCommandExecuter(
self._logger, log_level=log_level)
self.temp_dir = None
self.label = label
self.results_dir = None
self.log_level = log_level
self.machine = machine
self.perf_data_files = []
self.perf_report_files = []
self.results_file = []
self.chrome_version = ''
self.err = None
self.chroot_results_dir = ''
self.test_name = ''
self.keyvals = None
self.board = None
self.suite = None
self.retval = None
self.out = None
def CopyFilesTo(self, dest_dir, files_to_copy):
file_index = 0
for file_to_copy in files_to_copy:
if not os.path.isdir(dest_dir):
command = 'mkdir -p %s' % dest_dir
self.ce.RunCommand(command)
dest_file = os.path.join(
dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
if ret:
raise IOError('Could not copy results file: %s' % file_to_copy)
def CopyResultsTo(self, dest_dir):
self.CopyFilesTo(dest_dir, self.perf_data_files)
self.CopyFilesTo(dest_dir, self.perf_report_files)
if len(self.perf_data_files) or len(self.perf_report_files):
self._logger.LogOutput('Perf results files stored in %s.' % dest_dir)
def GetNewKeyvals(self, keyvals_dict):
# Initialize 'units' dictionary.
units_dict = {}
for k in keyvals_dict:
units_dict[k] = ''
results_files = self.GetDataMeasurementsFiles()
for f in results_files:
# Make sure we can find the results file
if os.path.exists(f):
data_filename = f
else:
# Otherwise get the base filename and create the correct
# path for it.
_, f_base = misc.GetRoot(f)
data_filename = os.path.join(self.chromeos_root, 'chroot/tmp',
self.temp_dir, f_base)
if data_filename.find('.json') > 0:
raw_dict = dict()
if os.path.exists(data_filename):
with open(data_filename, 'r') as data_file:
raw_dict = json.load(data_file)
if 'charts' in raw_dict:
raw_dict = raw_dict['charts']
for k1 in raw_dict:
field_dict = raw_dict[k1]
for k2 in field_dict:
result_dict = field_dict[k2]
key = k1 + '__' + k2
if 'value' in result_dict:
keyvals_dict[key] = result_dict['value']
elif 'values' in result_dict:
values = result_dict['values']
if ('type' in result_dict and
result_dict['type'] == 'list_of_scalar_values' and values and
values != 'null'):
keyvals_dict[key] = sum(values) / float(len(values))
else:
keyvals_dict[key] = values
units_dict[key] = result_dict['units']
else:
if os.path.exists(data_filename):
with open(data_filename, 'r') as data_file:
lines = data_file.readlines()
for line in lines:
tmp_dict = json.loads(line)
graph_name = tmp_dict['graph']
graph_str = (graph_name + '__') if graph_name else ''
key = graph_str + tmp_dict['description']
keyvals_dict[key] = tmp_dict['value']
units_dict[key] = tmp_dict['units']
return keyvals_dict, units_dict
def AppendTelemetryUnits(self, keyvals_dict, units_dict):
"""keyvals_dict is the dict of key-value used to generate Crosperf reports.
units_dict is a dictionary of the units for the return values in
keyvals_dict. We need to associate the units with the return values,
for Telemetry tests, so that we can include the units in the reports.
This function takes each value in keyvals_dict, finds the corresponding
unit in the units_dict, and replaces the old value with a list of the
old value and the units. This later gets properly parsed in the
ResultOrganizer class, for generating the reports.
"""
results_dict = {}
for k in keyvals_dict:
# We don't want these lines in our reports; they add no useful data.
if k == '' or k == 'telemetry_Crosperf':
continue
val = keyvals_dict[k]
units = units_dict[k]
new_val = [val, units]
results_dict[k] = new_val
return results_dict
def GetKeyvals(self):
results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
if not self.temp_dir:
self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
command = 'cp -r {0}/* {1}'.format(self.results_dir, self.temp_dir)
self.ce.RunCommand(command, print_to_console=False)
command = ('python generate_test_report --no-color --csv %s' %
(os.path.join('/tmp', os.path.basename(self.temp_dir))))
_, out, _ = self.ce.ChrootRunCommandWOutput(
self.chromeos_root, command, print_to_console=False)
keyvals_dict = {}
tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
self.temp_dir)
for line in out.splitlines():
tokens = re.split('=|,', line)
key = tokens[-2]
if key.startswith(tmp_dir_in_chroot):
key = key[len(tmp_dir_in_chroot) + 1:]
value = tokens[-1]
keyvals_dict[key] = value
# Check to see if there is a perf_measurements file and get the
# data from it if so.
keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
if self.suite == 'telemetry_Crosperf':
# For telemtry_Crosperf results, append the units to the return
# results, for use in generating the reports.
keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
return keyvals_dict
def GetResultsDir(self):
mo = re.search(r'Results placed in (\S+)', self.out)
if mo:
result = mo.group(1)
return result
raise RuntimeError('Could not find results directory.')
def FindFilesInResultsDir(self, find_args):
if not self.results_dir:
return None
command = 'find %s %s' % (self.results_dir, find_args)
ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
if ret:
raise RuntimeError('Could not run find command!')
return out
def GetResultsFile(self):
return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
def GetPerfDataFiles(self):
return self.FindFilesInResultsDir('-name perf.data').splitlines()
def GetPerfReportFiles(self):
return self.FindFilesInResultsDir('-name perf.data.report').splitlines()
def GetDataMeasurementsFiles(self):
result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
if not result:
result = \
self.FindFilesInResultsDir('-name results-chart.json').splitlines()
return result
def GeneratePerfReportFiles(self):
perf_report_files = []
for perf_data_file in self.perf_data_files:
# Generate a perf.report and store it side-by-side with the perf.data
# file.
chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
perf_data_file)
perf_report_file = '%s.report' % perf_data_file
if os.path.exists(perf_report_file):
raise RuntimeError(
'Perf report file already exists: %s' % perf_report_file)
chroot_perf_report_file = misc.GetInsideChrootPath(
self.chromeos_root, perf_report_file)
perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
perf_file = '/usr/sbin/perf'
if os.path.exists(perf_path):
perf_file = '/usr/bin/perf'
command = ('%s report '
'-n '
'--symfs /build/%s '
'--vmlinux /build/%s/usr/lib/debug/boot/vmlinux '
'--kallsyms /build/%s/boot/System.map-* '
'-i %s --stdio '
'> %s' % (perf_file, self.board, self.board, self.board,
chroot_perf_data_file, chroot_perf_report_file))
self.ce.ChrootRunCommand(self.chromeos_root, command)
# Add a keyval to the dictionary for the events captured.
perf_report_files.append(
misc.GetOutsideChrootPath(self.chromeos_root,
chroot_perf_report_file))
return perf_report_files
def GatherPerfResults(self):
report_id = 0
for perf_report_file in self.perf_report_files:
with open(perf_report_file, 'r') as f:
report_contents = f.read()
for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
num_events = group[0]
event_name = group[1]
key = 'perf_%s_%s' % (report_id, event_name)
value = str(misc.UnitToNumber(num_events))
self.keyvals[key] = value
def PopulateFromRun(self, out, err, retval, test, suite):
self.board = self.label.board
self.out = out
self.err = err
self.retval = retval
self.test_name = test
self.suite = suite
self.chroot_results_dir = self.GetResultsDir()
self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root,
self.chroot_results_dir)
self.results_file = self.GetResultsFile()
self.perf_data_files = self.GetPerfDataFiles()
# Include all perf.report data in table.
self.perf_report_files = self.GeneratePerfReportFiles()
# TODO(asharif): Do something similar with perf stat.
# Grab keyvals from the directory.
self.ProcessResults()
def ProcessJsonResults(self):
# Open and parse the json results file generated by telemetry/test_that.
if not self.results_file:
raise IOError('No results file found.')
filename = self.results_file[0]
if not filename.endswith('.json'):
raise IOError('Attempt to call json on non-json file: %s' % filename)
if not os.path.exists(filename):
return {}
keyvals = {}
with open(filename, 'r') as f:
raw_dict = json.load(f)
if 'charts' in raw_dict:
raw_dict = raw_dict['charts']
for k, field_dict in raw_dict.iteritems():
for item in field_dict:
keyname = k + '__' + item
value_dict = field_dict[item]
if 'value' in value_dict:
result = value_dict['value']
elif 'values' in value_dict:
values = value_dict['values']
if not values:
continue
if ('type' in value_dict and
value_dict['type'] == 'list_of_scalar_values' and
values != 'null'):
result = sum(values) / float(len(values))
else:
result = values
units = value_dict['units']
new_value = [result, units]
keyvals[keyname] = new_value
return keyvals
def ProcessResults(self, use_cache=False):
# Note that this function doesn't know anything about whether there is a
# cache hit or miss. It should process results agnostic of the cache hit
# state.
if self.results_file and self.results_file[0].find(
'results-chart.json') != -1:
self.keyvals = self.ProcessJsonResults()
else:
if not use_cache:
print('\n ** WARNING **: Had to use deprecated output-method to '
'collect results.\n')
self.keyvals = self.GetKeyvals()
self.keyvals['retval'] = self.retval
# Generate report from all perf.data files.
# Now parse all perf report files and include them in keyvals.
self.GatherPerfResults()
def GetChromeVersionFromCache(self, cache_dir):
# Read chrome_version from keys file, if present.
chrome_version = ''
keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
if os.path.exists(keys_file):
with open(keys_file, 'r') as f:
lines = f.readlines()
for l in lines:
if l.startswith('Google Chrome '):
chrome_version = l
if chrome_version.endswith('\n'):
chrome_version = chrome_version[:-1]
break
return chrome_version
def PopulateFromCacheDir(self, cache_dir, test, suite):
self.test_name = test
self.suite = suite
# Read in everything from the cache directory.
with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
self.out = pickle.load(f)
self.err = pickle.load(f)
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
self.temp_dir = tempfile.mkdtemp(dir=os.path.join(self.chromeos_root,
'chroot', 'tmp'))
command = ('cd %s && tar xf %s' %
(self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
ret = self.ce.RunCommand(command, print_to_console=False)
if ret:
raise RuntimeError('Could not untar cached tarball')
self.results_dir = self.temp_dir
self.results_file = self.GetDataMeasurementsFiles()
self.perf_data_files = self.GetPerfDataFiles()
self.perf_report_files = self.GetPerfReportFiles()
self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
self.ProcessResults(use_cache=True)
def CleanUp(self, rm_chroot_tmp):
if rm_chroot_tmp and self.results_dir:
dirname, basename = misc.GetRoot(self.results_dir)
if basename.find('test_that_results_') != -1:
command = 'rm -rf %s' % self.results_dir
else:
command = 'rm -rf %s' % dirname
self.ce.RunCommand(command)
if self.temp_dir:
command = 'rm -rf %s' % self.temp_dir
self.ce.RunCommand(command)
def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
# Create the dir if it doesn't exist.
temp_dir = tempfile.mkdtemp()
# Store to the temp directory.
with open(os.path.join(temp_dir, RESULTS_FILE), 'w') as f:
pickle.dump(self.out, f)
pickle.dump(self.err, f)
pickle.dump(self.retval, f)
if not test_flag.GetTestMode():
with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
f.write('%s\n' % self.label.name)
f.write('%s\n' % self.label.chrome_version)
f.write('%s\n' % self.machine.checksum_string)
for k in key_list:
f.write(k)
f.write('\n')
if self.results_dir:
tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
command = ('cd %s && '
'tar '
'--exclude=var/spool '
'--exclude=var/log '
'-cjf %s .' % (self.results_dir, tarball))
ret = self.ce.RunCommand(command)
if ret:
raise RuntimeError("Couldn't store autotest output directory.")
# Store machine info.
# TODO(asharif): Make machine_manager a singleton, and don't pass it into
# this function.
with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
f.write(machine_manager.machine_checksum_string[self.label.name])
if os.path.exists(cache_dir):
command = 'rm -rf {0}'.format(cache_dir)
self.ce.RunCommand(command)
command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
command += 'chmod g+x {0} && '.format(temp_dir)
command += 'mv {0} {1}'.format(temp_dir, cache_dir)
ret = self.ce.RunCommand(command)
if ret:
command = 'rm -rf {0}'.format(temp_dir)
self.ce.RunCommand(command)
raise RuntimeError('Could not move dir %s to dir %s' % (temp_dir,
cache_dir))
@classmethod
def CreateFromRun(cls,
logger,
log_level,
label,
machine,
out,
err,
retval,
test,
suite='telemetry_Crosperf'):
if suite == 'telemetry':
result = TelemetryResult(logger, label, log_level, machine)
else:
result = cls(logger, label, log_level, machine)
result.PopulateFromRun(out, err, retval, test, suite)
return result
@classmethod
def CreateFromCacheHit(cls,
logger,
log_level,
label,
machine,
cache_dir,
test,
suite='telemetry_Crosperf'):
if suite == 'telemetry':
result = TelemetryResult(logger, label, log_level, machine)
else:
result = cls(logger, label, log_level, machine)
try:
result.PopulateFromCacheDir(cache_dir, test, suite)
except RuntimeError as e:
logger.LogError('Exception while using cache: %s' % e)
return None
return result
class TelemetryResult(Result):
"""Class to hold the results of a single Telemetry run."""
def __init__(self, logger, label, log_level, machine, cmd_exec=None):
super(TelemetryResult, self).__init__(logger, label, log_level, machine,
cmd_exec)
def PopulateFromRun(self, out, err, retval, test, suite):
self.out = out
self.err = err
self.retval = retval
self.ProcessResults()
# pylint: disable=arguments-differ
def ProcessResults(self):
# The output is:
# url,average_commit_time (ms),...
# www.google.com,33.4,21.2,...
# We need to convert to this format:
# {"www.google.com:average_commit_time (ms)": "33.4",
# "www.google.com:...": "21.2"}
# Added note: Occasionally the output comes back
# with "JSON.stringify(window.automation.GetResults())" on
# the first line, and then the rest of the output as
# described above.
lines = self.out.splitlines()
self.keyvals = {}
if lines:
if lines[0].startswith('JSON.stringify'):
lines = lines[1:]
if not lines:
return
labels = lines[0].split(',')
for line in lines[1:]:
fields = line.split(',')
if len(fields) != len(labels):
continue
for i in xrange(1, len(labels)):
key = '%s %s' % (fields[0], labels[i])
value = fields[i]
self.keyvals[key] = value
self.keyvals['retval'] = self.retval
def PopulateFromCacheDir(self, cache_dir, test, suite):
self.test_name = test
self.suite = suite
with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
self.out = pickle.load(f)
self.err = pickle.load(f)
self.retval = pickle.load(f)
self.chrome_version = \
super(TelemetryResult, self).GetChromeVersionFromCache(cache_dir)
self.ProcessResults()
class CacheConditions(object):
"""Various Cache condition values, for export."""
# Cache hit only if the result file exists.
CACHE_FILE_EXISTS = 0
# Cache hit if the checksum of cpuinfo and totalmem of
# the cached result and the new run match.
MACHINES_MATCH = 1
# Cache hit if the image checksum of the cached result and the new run match.
CHECKSUMS_MATCH = 2
# Cache hit only if the cached result was successful
RUN_SUCCEEDED = 3
# Never a cache hit.
FALSE = 4
# Cache hit if the image path matches the cached image path.
IMAGE_PATH_MATCH = 5
# Cache hit if the uuid of hard disk mataches the cached one
SAME_MACHINE_MATCH = 6
class ResultsCache(object):
"""Class to handle the cache for storing/retrieving test run results.
This class manages the key of the cached runs without worrying about what
is exactly stored (value). The value generation is handled by the Results
class.
"""
CACHE_VERSION = 6
def __init__(self):
# Proper initialization happens in the Init function below.
self.chromeos_image = None
self.chromeos_root = None
self.test_name = None
self.iteration = None
self.test_args = None
self.profiler_args = None
self.board = None
self.cache_conditions = None
self.machine_manager = None
self.machine = None
self._logger = None
self.ce = None
self.label = None
self.share_cache = None
self.suite = None
self.log_level = None
self.show_all = None
self.run_local = None
def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
profiler_args, machine_manager, machine, board, cache_conditions,
logger_to_use, log_level, label, share_cache, suite,
show_all_results, run_local):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.test_name = test_name
self.iteration = iteration
self.test_args = test_args
self.profiler_args = profiler_args
self.board = board
self.cache_conditions = cache_conditions
self.machine_manager = machine_manager
self.machine = machine
self._logger = logger_to_use
self.ce = command_executer.GetCommandExecuter(
self._logger, log_level=log_level)
self.label = label
self.share_cache = share_cache
self.suite = suite
self.log_level = log_level
self.show_all = show_all_results
self.run_local = run_local
def GetCacheDirForRead(self):
matching_dirs = []
for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
matching_dirs += glob.glob(glob_path)
if matching_dirs:
# Cache file found.
return matching_dirs[0]
return None
def GetCacheDirForWrite(self, get_keylist=False):
cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
if get_keylist:
args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
self.run_local)
version, image = results_report.ParseChromeosImage(
self.label.chromeos_image)
keylist = [
version, image, self.label.board, self.machine.name, self.test_name,
str(self.iteration), args_str
]
return cache_path, keylist
return cache_path
def FormCacheDir(self, list_of_strings):
cache_key = ' '.join(list_of_strings)
cache_dir = misc.GetFilenameFromString(cache_key)
if self.label.cache_dir:
cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
cache_path = [os.path.join(cache_home, cache_dir)]
else:
cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
if len(self.share_cache):
for path in [x.strip() for x in self.share_cache.split(',')]:
if os.path.exists(path):
cache_path.append(os.path.join(path, cache_dir))
else:
self._logger.LogFatal('Unable to find shared cache: %s' % path)
return cache_path
def GetCacheKeyList(self, read):
if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
machine_checksum = '*'
else:
machine_checksum = self.machine_manager.machine_checksum[self.label.name]
if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
checksum = '*'
elif self.label.image_type == 'trybot':
checksum = hashlib.md5(self.label.chromeos_image).hexdigest()
elif self.label.image_type == 'official':
checksum = '*'
else:
checksum = ImageChecksummer().Checksum(self.label, self.log_level)
if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
image_path_checksum = '*'
else:
image_path_checksum = hashlib.md5(self.chromeos_image).hexdigest()
machine_id_checksum = ''
if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
machine_id_checksum = '*'
else:
if self.machine and self.machine.name in self.label.remote:
machine_id_checksum = self.machine.machine_id_checksum
else:
for machine in self.machine_manager.GetMachines(self.label):
if machine.name == self.label.remote[0]:
machine_id_checksum = machine.machine_id_checksum
break
temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
self.run_local)
test_args_checksum = hashlib.md5(temp_test_args).hexdigest()
return (image_path_checksum, self.test_name, str(self.iteration),
test_args_checksum, checksum, machine_checksum, machine_id_checksum,
str(self.CACHE_VERSION))
def ReadResult(self):
if CacheConditions.FALSE in self.cache_conditions:
cache_dir = self.GetCacheDirForWrite()
command = 'rm -rf %s' % (cache_dir,)
self.ce.RunCommand(command)
return None
cache_dir = self.GetCacheDirForRead()
if not cache_dir:
return None
if not os.path.isdir(cache_dir):
return None
if self.log_level == 'verbose':
self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
self.machine, cache_dir, self.test_name,
self.suite)
if not result:
return None
if (result.retval == 0 or
CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
return result
return None
def StoreResult(self, result):
cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
class MockResultsCache(ResultsCache):
"""Class for mock testing, corresponding to ResultsCache class."""
def Init(self, *args):
pass
def ReadResult(self):
return None
def StoreResult(self, result):
pass
class MockResult(Result):
"""Class for mock testing, corresponding to Result class."""
def PopulateFromRun(self, out, err, retval, test, suite):
self.out = out
self.err = err
self.retval = retval
|
the-stack_106_28987 | from itertools import chain
from django import forms
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
class SortedCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
class Media:
js = (
'admin/js/jquery.init.js',
'sortedm2m/widget.js',
'sortedm2m/jquery-ui.js',
)
css = {'screen': (
'sortedm2m/widget.css',
)}
def build_attrs(self, attrs=None, **kwargs): # pylint: disable=arguments-differ
attrs = dict(attrs or {}, **kwargs)
attrs = super().build_attrs(attrs)
classes = attrs.setdefault('class', '').split()
classes.append('sortedm2m')
attrs['class'] = ' '.join(classes)
return attrs
def render(self, name, value, attrs=None, choices=(), renderer=None): # pylint: disable=arguments-differ
if value is None:
value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
# Normalize to strings
str_values = [force_str(v) for v in value]
selected = []
unselected = []
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = ' for="%s"' % conditional_escape(final_attrs['id'])
else:
label_for = ''
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_str(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_str(option_label))
item = {
'label_for': label_for,
'rendered_cb': rendered_cb,
'option_label': option_label,
'option_value': option_value
}
if option_value in str_values:
selected.append(item)
else:
unselected.append(item)
# Reorder `selected` array according str_values which is a set of `option_value`s in the
# order they should be shown on screen
ordered = []
for s in str_values:
for select in selected:
if s == select['option_value']:
ordered.append(select)
selected = ordered
html = render_to_string(
'sortedm2m/sorted_checkbox_select_multiple_widget.html',
{'selected': selected, 'unselected': unselected})
return mark_safe(html)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if isinstance(value, (str,)):
return [v for v in value.split(',') if v]
return value
class SortedMultipleChoiceField(forms.ModelMultipleChoiceField):
widget = SortedCheckboxSelectMultiple
def clean(self, value):
queryset = super().clean(value)
if value is None or not hasattr(queryset, '__iter__'):
return queryset
key = self.to_field_name or 'pk'
objects = dict((force_str(getattr(o, key)), o) for o in queryset)
return [objects[force_str(val)] for val in value]
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_str(value) for value in self.prepare_value(initial)]
data_set = [force_str(value) for value in data]
return data_set != initial_set
|
the-stack_106_28988 | import sys
import numpy as np
import pandas as pd
import multiprocessing
import time
from joblib import Parallel, delayed
from tqdm import tqdm
from .QCRSC import QCRSC
from .calc_rsd_dratio import calc_rsd_dratio
def batch(BatchTable, PeakTable, gamma='default', transform='log', parametric=True, zeroflag=True, remove_outliers=True, remove_batch=True):
# Make a copy
BatchTable = BatchTable.copy(deep=True)
PeakTable = PeakTable.copy(deep=True)
batch = BatchTable.Batch
bnum = np.unique(batch)
# Default gamma_range
if gamma is 'default':
gamma = (0.5, 5, 0.2)
gamma_range = [x / 100.0 for x in range(int(gamma[0] * 100), int(gamma[1] * 100), int(gamma[2] * 100))]
if len(bnum) > 1:
raise ValueError("Samples in this Batch are labeled as multiple batches")
peak_list = PeakTable.Name
X = BatchTable[PeakTable.Name]
t = BatchTable.Order
qc = BatchTable.QCW
sam = BatchTable.Sample
if zeroflag == True:
X = X.replace(0, np.nan)
if transform is 'log':
X = np.log10(X)
G = np.empty(len(peak_list)) * np.nan
MPA = np.empty(len(peak_list)) * np.nan
Z = np.empty(X.shape)
Z[:] = np.nan
# try loop in parallel
time.sleep(0.5) # Sleep for 0.5 secs to finish printing
num_cores = multiprocessing.cpu_count()
try:
qcrsc_loop = Parallel(n_jobs=num_cores)(delayed(batch_loop_parallel)(i, peak_list, X, t, qc, gamma_range, remove_outliers, remove_batch) for i in tqdm(range(len(peak_list)), desc="Batch {}".format(bnum[0])))
# Append to list
for i in range(len(qcrsc_loop)):
Z[:, i] = qcrsc_loop[i][0]
G[i] = qcrsc_loop[i][1]
MPA[i] = qcrsc_loop[i][2]
except:
print("Error was raised so parallel won't be used.")
print("Temporary... printing each peak to figure out issue.")
for i in tqdm(range(len(peak_list)), desc="Batch {}".format(bnum[0])):
peak_temp = peak_list[i]
xx, _, _, _, gamma, mpa = QCRSC(X[peak_temp], t, qc, gamma_range, remove_outliers, remove_batch)
print("Peak {}".format(peak_temp))
Z[:, i] = xx
G[i] = gamma
MPA[i] = mpa
# Calculate stats (Pb -> export PeakTable)
Pb = PeakTable
qc_options = ['QCW', 'QCB', 'QCT']
# parametric
for i in qc_options:
RSDqc, RSDsam, Dratio = calc_rsd_dratio(Z, BatchTable[i], sam, transform, True)
Pb['RSD_{}'.format(i)] = RSDqc
Pb['DRatio_{}'.format(i)] = Dratio
# nonparametric
for i in qc_options:
RSDqc, RSDsam, Dratio = calc_rsd_dratio(Z, BatchTable[i], sam, transform, False)
Pb['RSD*_{}'.format(i)] = RSDqc
Pb['DRatio*_{}'.format(i)] = Dratio
# Db -> export DataTable
Db = BatchTable
if transform is 'log':
Z = np.power(10, Z)
MPA = np.power(10, MPA)
for i in range(len(peak_list)):
peak = peak_list[i]
Db[peak] = Z[:, i]
# Additional PeakTable stats
Pb['MPA'] = MPA
Pb['Blank%Mean'] = np.nan # Calc Blank%Mean later
Pb['Gamma'] = G
return Db, Pb
def batch_loop_parallel(i, peak_list, X, t, qc, gamma_range, remove_outliers, remove_batch):
peak_temp = peak_list[i]
xx, _, _, _, gamma, mpa = QCRSC(X[peak_temp], t, qc, gamma_range, remove_outliers, remove_batch)
return [xx, gamma, mpa]
|
the-stack_106_28989 | #
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License 2.0;
# __author__ = 'api.jscudder (Jeff Scudder)'
import re
import unittest
import urllib.error
import urllib.parse
import urllib.request
import atom
import gdata.contacts.service
import gdata.test_config as conf
conf.options.register_option(conf.TEST_IMAGE_LOCATION_OPTION)
class ContactsServiceTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.contacts.service.ContactsService()
conf.configure_service(self.gd_client, 'ContactsServiceTest', 'cp')
self.gd_client.email = conf.options.get_value('username')
def tearDown(self):
conf.close_service(self.gd_client)
def testGetContactsFeed(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testGetContactsFeed')
feed = self.gd_client.GetContactsFeed()
self.assertTrue(isinstance(feed, gdata.contacts.ContactsFeed))
def testDefaultContactList(self):
self.assertEqual('default', self.gd_client.contact_list)
def testCustomContactList(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testCustomContactList')
self.gd_client.contact_list = conf.options.get_value('username')
feed = self.gd_client.GetContactsFeed()
self.assertTrue(isinstance(feed, gdata.contacts.ContactsFeed))
def testGetFeedUriDefault(self):
self.gd_client.contact_list = 'domain.com'
self.assertEqual('/m8/feeds/contacts/domain.com/full',
self.gd_client.GetFeedUri())
def testGetFeedUriCustom(self):
uri = self.gd_client.GetFeedUri(kind='groups',
contact_list='example.com',
projection='base/batch',
scheme='https')
self.assertEqual(
'https://www.google.com/m8/feeds/groups/example.com/base/batch', uri)
def testCreateUpdateDeleteContactAndUpdatePhoto(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testCreateUpdateDeleteContactAndUpdatePhoto')
DeleteTestContact(self.gd_client)
# Create a new entry
new_entry = gdata.contacts.ContactEntry()
new_entry.title = atom.Title(text='Elizabeth Bennet')
new_entry.content = atom.Content(text='Test Notes')
new_entry.email.append(gdata.contacts.Email(
rel='http://schemas.google.com/g/2005#work',
primary='true',
address='[email protected]'))
new_entry.phone_number.append(gdata.contacts.PhoneNumber(
rel='http://schemas.google.com/g/2005#work', text='(206)555-1212'))
new_entry.organization = gdata.contacts.Organization(
org_name=gdata.contacts.OrgName(text='TestCo.'),
rel='http://schemas.google.com/g/2005#work')
entry = self.gd_client.CreateContact(new_entry)
# Generate and parse the XML for the new entry.
self.assertEqual(entry.title.text, new_entry.title.text)
self.assertEqual(entry.content.text, 'Test Notes')
self.assertEqual(len(entry.email), 1)
self.assertEqual(entry.email[0].rel, new_entry.email[0].rel)
self.assertEqual(entry.email[0].address, '[email protected]')
self.assertEqual(len(entry.phone_number), 1)
self.assertEqual(entry.phone_number[0].rel,
new_entry.phone_number[0].rel)
self.assertEqual(entry.phone_number[0].text, '(206)555-1212')
self.assertEqual(entry.organization.org_name.text, 'TestCo.')
# Edit the entry.
entry.phone_number[0].text = '(555)555-1212'
updated = self.gd_client.UpdateContact(entry.GetEditLink().href, entry)
self.assertEqual(updated.content.text, 'Test Notes')
self.assertEqual(len(updated.phone_number), 1)
self.assertEqual(updated.phone_number[0].rel,
entry.phone_number[0].rel)
self.assertEqual(updated.phone_number[0].text, '(555)555-1212')
# Change the contact's photo.
updated_photo = self.gd_client.ChangePhoto(
conf.options.get_value('imgpath'), updated,
content_type='image/jpeg')
# Refetch the contact so that it has the new photo link
updated = self.gd_client.GetContact(updated.GetSelfLink().href)
self.assertTrue(updated.GetPhotoLink() is not None)
# Fetch the photo data.
hosted_image = self.gd_client.GetPhoto(updated)
self.assertTrue(hosted_image is not None)
# Delete the entry.
self.gd_client.DeleteContact(updated.GetEditLink().href)
def testCreateAndDeleteContactUsingBatch(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testCreateAndDeleteContactUsingBatch')
# Get random data for creating contact
random_contact_number = 'notRandom12'
random_contact_title = 'Random Contact %s' % (
random_contact_number)
# Set contact data
contact = gdata.contacts.ContactEntry()
contact.title = atom.Title(text=random_contact_title)
contact.email = gdata.contacts.Email(
address='user%[email protected]' % random_contact_number,
primary='true',
rel=gdata.contacts.REL_WORK)
contact.content = atom.Content(text='Contact created by '
'gdata-python-client automated test '
'suite.')
# Form a batch request
batch_request = gdata.contacts.ContactsFeed()
batch_request.AddInsert(entry=contact)
# Execute the batch request to insert the contact.
default_batch_url = gdata.contacts.service.DEFAULT_BATCH_URL
batch_result = self.gd_client.ExecuteBatch(batch_request,
default_batch_url)
self.assertEqual(len(batch_result.entry), 1)
self.assertEqual(batch_result.entry[0].title.text,
random_contact_title)
self.assertEqual(batch_result.entry[0].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEqual(batch_result.entry[0].batch_status.code,
'201')
expected_batch_url = re.compile('default').sub(
urllib.parse.quote(self.gd_client.email),
gdata.contacts.service.DEFAULT_BATCH_URL)
self.assertTrue(batch_result.GetBatchLink().href,
expected_batch_url)
# Create a batch request to delete the newly created entry.
batch_delete_request = gdata.contacts.ContactsFeed()
batch_delete_request.AddDelete(entry=batch_result.entry[0])
batch_delete_result = self.gd_client.ExecuteBatch(
batch_delete_request,
batch_result.GetBatchLink().href)
self.assertEqual(len(batch_delete_result.entry), 1)
self.assertEqual(batch_delete_result.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEqual(batch_result.entry[0].batch_status.code,
'201')
def testCleanUriNeedsCleaning(self):
self.assertEqual('/relative/uri', self.gd_client._CleanUri(
'http://www.google.com/relative/uri'))
def testCleanUriDoesNotNeedCleaning(self):
self.assertEqual('/relative/uri', self.gd_client._CleanUri(
'/relative/uri'))
class ContactsQueryTest(unittest.TestCase):
def testConvertToStringDefaultFeed(self):
query = gdata.contacts.service.ContactsQuery()
self.assertEqual(str(query), '/m8/feeds/contacts/default/full')
query.max_results = 10
self.assertEqual(query.ToUri(),
'/m8/feeds/contacts/default/full?max-results=10')
def testConvertToStringCustomFeed(self):
query = gdata.contacts.service.ContactsQuery('/custom/feed/uri')
self.assertEqual(str(query), '/custom/feed/uri')
query.max_results = '10'
self.assertEqual(query.ToUri(), '/custom/feed/uri?max-results=10')
def testGroupQueryParameter(self):
query = gdata.contacts.service.ContactsQuery()
query.group = 'http://google.com/m8/feeds/groups/liz%40gmail.com/full/270f'
self.assertEqual(query.ToUri(), '/m8/feeds/contacts/default/full'
'?group=http%3A%2F%2Fgoogle.com%2Fm8%2Ffeeds%2Fgroups'
'%2Fliz%2540gmail.com%2Ffull%2F270f')
class ContactsGroupsTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.contacts.service.ContactsService()
conf.configure_service(self.gd_client, 'ContactsServiceTest', 'cp')
def tearDown(self):
conf.close_service(self.gd_client)
def testCreateUpdateDeleteGroup(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client,
'testCreateUpdateDeleteGroup')
test_group = gdata.contacts.GroupEntry(title=atom.Title(
text='test group py'))
new_group = self.gd_client.CreateGroup(test_group)
self.assertTrue(isinstance(new_group, gdata.contacts.GroupEntry))
self.assertEqual(new_group.title.text, 'test group py')
# Change the group's title
new_group.title.text = 'new group name py'
updated_group = self.gd_client.UpdateGroup(new_group.GetEditLink().href,
new_group)
self.assertEqual(updated_group.title.text, new_group.title.text)
# Remove the group
self.gd_client.DeleteGroup(updated_group.GetEditLink().href)
# Utility methods.
def DeleteTestContact(client):
# Get test contact
feed = client.GetContactsFeed()
for entry in feed.entry:
if (entry.title.text == 'Elizabeth Bennet' and
entry.content.text == 'Test Notes' and
entry.email[0].address == '[email protected]'):
client.DeleteContact(entry.GetEditLink().href)
def suite():
return unittest.TestSuite((unittest.makeSuite(ContactsServiceTest, 'test'),
unittest.makeSuite(ContactsQueryTest, 'test'),
unittest.makeSuite(ContactsGroupsTest, 'test'),))
if __name__ == '__main__':
print('Contacts Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
unittest.TextTestRunner().run(suite())
|
the-stack_106_28990 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from urllib.parse import parse_qs
import bottle
import common.auth as _auth
import common.helpers as util
from common.logging import logger
from models.context import Context, ContextModel
from models.round import RoundModel
from models.task import TaskModel
from .tasks import ensure_owner_or_admin
@bottle.get("/contexts/<tid:int>/<rid:int>")
@bottle.get("/contexts/<tid:int>/<rid:int>/min")
def getContext(tid, rid):
query_dict = parse_qs(bottle.request.query_string)
tags = _getTags(query_dict)
return _getContext(tid, rid, tags=tags)
@bottle.get("/contexts/<tid:int>/<rid:int>/uniform")
@_auth.requires_auth_or_turk
def getUniformContext(credentials, tid, rid):
query_dict = parse_qs(bottle.request.query_string)
tags = _getTags(query_dict)
return _getContext(tid, rid, "uniform", tags=tags)
@bottle.get("/contexts/<tid:int>/<rid:int>/least_fooled")
def getRandomMinLeastFooledContext(tid, rid):
query_dict = parse_qs(bottle.request.query_string)
tags = _getTags(query_dict)
return _getContext(tid, rid, "least_fooled", tags=tags)
@bottle.get("/contexts/<tid:int>/<rid:int>/validation_failed")
def getRandomValidationFailedContext(tid, rid):
query_dict = parse_qs(bottle.request.query_string)
tags = _getTags(query_dict)
return _getContext(tid, rid, "validation_failed", tags=tags)
def _getTags(query_dict):
tags = None
if "tags" in query_dict and len(query_dict["tags"]) > 0:
tags = query_dict["tags"][0].split("|")
return tags
def _getContext(tid, rid, method="min", tags=None):
rm = RoundModel()
round = rm.getByTidAndRid(tid, rid)
c = ContextModel()
if method == "uniform":
context = c.getRandom(round.id, n=1, tags=tags)
elif method == "min":
context = c.getRandomMin(round.id, n=1, tags=tags)
elif method == "least_fooled":
context = c.getRandomLeastFooled(round.id, n=1, tags=tags)
elif method == "validation_failed":
tm = TaskModel()
task = tm.get(tid)
context = c.getRandomValidationFailed(
round.id, task.num_matching_validations, n=1, tags=tags
)
if not context:
bottle.abort(500, f"No contexts available ({round.id})")
context = context[0].to_dict()
return util.json_encode(context)
@bottle.post("/contexts/upload/<tid:int>/<rid:int>")
@_auth.requires_auth
def do_upload(credentials, tid, rid):
"""
Upload a contexts file for the current round
and save the contexts to the contexts table
:param credentials:
:return: success info
"""
ensure_owner_or_admin(tid, credentials["id"])
upload = bottle.request.files.get("file")
tm = TaskModel()
task = tm.get(tid)
try:
parsed_upload_data = [
util.json_decode(line)
for line in upload.file.read().decode("utf-8").splitlines()
]
for context_info in parsed_upload_data:
if (
"context" not in context_info
or "tag" not in context_info
or "metadata" not in context_info
or not task.verify_annotation(context_info["context"])
):
bottle.abort(400, "Invalid contexts file")
except Exception as ex:
logger.exception(ex)
bottle.abort(400, "Invalid contexts file")
rm = RoundModel()
round = rm.getByTidAndRid(tid, rid)
r_realid = round.id
contexts_to_add = []
for context_info in parsed_upload_data:
c = Context(
r_realid=r_realid,
context_json=util.json_encode(context_info["context"]),
metadata_json=util.json_encode(context_info["metadata"]),
tag=context_info["tag"],
)
contexts_to_add.append(c)
rm.dbs.bulk_save_objects(contexts_to_add)
rm.dbs.commit()
return util.json_encode({"success": "ok"})
|
the-stack_106_28991 | """
Given an ip address in dotted-decimal representation, determine the
binary representation. For example,
decimal_to_binary(255.0.0.5) returns 11111111.00000000.00000000.00000101
accepts string
returns string
"""
def decimal_to_binary_util(val):
"""
Convert 8-bit decimal number to binary representation
:type val: str
:rtype: str
"""
bits = [128, 64, 32, 16, 8, 4, 2, 1]
val = int(val)
binary_rep = ''
for bit in bits:
if val >= bit:
binary_rep += str(1)
val -= bit
else:
binary_rep += str(0)
return binary_rep
def decimal_to_binary_ip(ip):
"""
Convert dotted-decimal ip address to binary representation with help of decimal_to_binary_util
"""
values = ip.split('.')
binary_list = []
for val in values:
binary_list.append(decimal_to_binary_util(val))
return '.'.join(binary_list)
|
the-stack_106_28994 | # pydicom_Tkinter.py
#
# Copyright (c) 2009 Daniel Nanz
# This file is released under the pydicom (http://code.google.com/p/pydicom/)
# license, see the file license.txt available at
# (http://code.google.com/p/pydicom/)
#
# revision history:
# Dec-08-2009: version 0.1
#
# 0.1: tested with pydicom version 0.9.3, Python version 2.6.2 (32-bit)
# under Windows XP Professional 2002, and Mac OS X 10.5.5,
# using numpy 1.3.0 and a small random selection of MRI and
# CT images.
'''
View DICOM images from pydicom
requires numpy: http://numpy.scipy.org/
Usage:
------
>>> import dicom # pydicom
>>> import dicom.contrib.pydicom_Tkinter as pydicom_Tkinter # this module
>>> df = dicom.read_file(filename)
>>> pydicom_Tkinter.show_image(df)
'''
import Tkinter
import tempfile
import os
have_numpy = True
try:
import numpy as np
except:
# will not work...
have_numpy = False
def get_PGM_bytedata_string(arr):
'''Given a 2D numpy array as input write gray-value image data in the PGM
format into a byte string and return it.
arr: single-byte unsigned int numpy array
note: Tkinter's PhotoImage object seems to accept only single-byte data
'''
if arr.dtype != np.uint8:
raise ValueError
if len(arr.shape) != 2:
raise ValueError
# array.shape is (#rows, #cols) tuple; PGM input needs this reversed
col_row_string = ' '.join(reversed(map(str, arr.shape)))
bytedata_string = '\n'.join(('P5',
col_row_string,
str(arr.max()),
arr.tostring()))
return bytedata_string
def get_PGM_from_numpy_arr(arr, window_center, window_width,
lut_min=0, lut_max=255):
'''real-valued numpy input -> PGM-image formatted byte string
arr: real-valued numpy array to display as grayscale image
window_center, window_width: to define max/min values to be mapped to the
lookup-table range. WC/WW scaling is done
according to DICOM-3 specifications.
lut_min, lut_max: min/max values of (PGM-) grayscale table: do not change
'''
if np.isreal(arr).sum() != arr.size:
raise ValueError
# currently only support 8-bit colors
if lut_max != 255:
raise ValueError
if arr.dtype != np.float64:
arr = arr.astype(np.float64)
# LUT-specific array scaling
# width >= 1 (DICOM standard)
window_width = max(1, window_width)
wc, ww = np.float64(window_center), np.float64(window_width)
lut_range = np.float64(lut_max) - lut_min
minval = wc - 0.5 - (ww - 1.0) / 2.0
maxval = wc - 0.5 + (ww - 1.0) / 2.0
min_mask = (minval >= arr)
to_scale = (arr > minval) & (arr < maxval)
max_mask = (arr >= maxval)
if min_mask.any():
arr[min_mask] = lut_min
if to_scale.any():
arr[to_scale] = ((arr[to_scale] - (wc - 0.5)) /
(ww - 1.0) + 0.5) * lut_range + lut_min
if max_mask.any():
arr[max_mask] = lut_max
# round to next integer values and convert to unsigned int
arr = np.rint(arr).astype(np.uint8)
# return PGM byte-data string
return get_PGM_bytedata_string(arr)
def get_tkinter_photoimage_from_pydicom_image(data):
'''
Wrap data.pixel_array in a Tkinter PhotoImage instance,
after conversion into a PGM grayscale image.
This will fail if the "numpy" module is not installed in the attempt of
creating the data.pixel_array.
data: object returned from pydicom.read_file()
side effect: may leave a temporary .pgm file on disk
'''
# get numpy array as representation of image data
arr = data.pixel_array.astype(np.float64)
# pixel_array seems to be the original, non-rescaled array.
# If present, window center and width refer to rescaled array
# -> do rescaling if possible.
if ('RescaleIntercept' in data) and ('RescaleSlope' in data):
intercept = data.RescaleIntercept # single value
slope = data.RescaleSlope
arr = slope * arr + intercept
# get default window_center and window_width values
wc = (arr.max() + arr.min()) / 2.0
ww = arr.max() - arr.min() + 1.0
# overwrite with specific values from data, if available
if ('WindowCenter' in data) and ('WindowWidth' in data):
wc = data.WindowCenter
ww = data.WindowWidth
try:
wc = wc[0] # can be multiple values
except:
pass
try:
ww = ww[0]
except:
pass
# scale array to account for center, width and PGM grayscale range,
# and wrap into PGM formatted ((byte-) string
pgm = get_PGM_from_numpy_arr(arr, wc, ww)
# create a PhotoImage
# for as yet unidentified reasons the following fails for certain
# window center/width values:
# photo_image = Tkinter.PhotoImage(data=pgm, gamma=1.0)
# Error with Python 2.6.2 under Windows XP:
# (self.tk.call(('image', 'create', imgtype, name,) + options)
# _tkinter.TclError: truncated PPM data
# OsX: distorted images
# while all seems perfectly OK for other values of center/width or when
# the PGM is first written to a temporary file and read again
# write PGM file into temp dir
(os_id, abs_path) = tempfile.mkstemp(suffix='.pgm')
with open(abs_path, 'wb') as fd:
fd.write(pgm)
photo_image = Tkinter.PhotoImage(file=abs_path, gamma=1.0)
# close and remove temporary file on disk
# os.close is needed under windows for os.remove not to fail
try:
os.close(os_id)
os.remove(abs_path)
except:
pass # silently leave file on disk in temp-like directory
return photo_image
def show_image(data, block=True, master=None):
'''
Get minimal Tkinter GUI and display a pydicom data.pixel_array
data: object returned from pydicom.read_file()
block: if True run Tk mainloop() to show the image
master: use with block==False and an existing Tk widget as parent widget
side effects: may leave a temporary .pgm file on disk
'''
frame = Tkinter.Frame(master=master, background='#000')
if 'SeriesDescription' in data and 'InstanceNumber' in data:
title = ', '.join(('Ser: ' + data.SeriesDescription,
'Img: ' + str(data.InstanceNumber)))
else:
title = 'pydicom image'
frame.master.title(title)
photo_image = get_tkinter_photoimage_from_pydicom_image(data)
label = Tkinter.Label(frame, image=photo_image, background='#000')
# keep a reference to avoid disappearance upon garbage collection
label.photo_reference = photo_image
label.grid()
frame.grid()
if block:
frame.mainloop()
|
the-stack_106_28995 | # -*- coding: utf-8 -*-
from flask_login import LoginManager
from .views.tokens import ns
from .helpers.tokens import Tokens
class Authenticator(object):
def __init__(self, app=None, **kwargs):
self.app = app
self.tokens = None
self.login_manager = None
if app is not None:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
self.app = app
# LDAP ๅฎไน
self.tokens = Tokens(app)
self.login_manager = LoginManager(app=app)
self.login_manager.request_loader(self.tokens.load_user_from_request)
app.api.add_namespace(ns)
setattr(app, 'tokens', self.tokens)
setattr(app, 'authenticator', self)
|
the-stack_106_28996 | """
# Filename: Benchmark.py
# Author: yangchaofan
# Last Modified: 2018-7-18 15:13
# Description: a simple example of multi-thread query
"""
# before you run this example, make sure that you have started up ghttp service (using bin/ghttp db_name port)
import threading
import sys
sys.path.append('../src')
import GstoreConnector
# variables definition
tnum = 3000
correctness = True
threads = []
result = [15, 0, 828, 27, 27, 5916]
sparql = []
sparql0 = "select ?x where\
{\
?x <ub:name> <FullProfessor0> .\
}"
sparql1 = "select distinct ?x where\
{\
?x <rdf:type> <ub:GraduateStudent>.\
?y <rdf:type> <ub:GraduateStudent>.\
?z <rdf:type> <ub:GraduateStudent>.\
?x <ub:memberOf> ?z.\
?z <ub:subOrganizationOf> ?y.\
?x <ub:undergaduateDegreeFrom> ?y.\
}"
sparql2 = "select distinct ?x where\
{\
?x <rdf:type> <ub:Course>.\
?x <ub:name> ?y.\
}"
sparql3 = "select ?x where\
{\
?x <rdf:type> <ub:UndergraduateStudent>.\
?y <ub:name> <Course1>.\
?x <ub:takesCourse> ?y.\
?z <ub:teacherOf> ?y.\
?z <ub:name> <FullProfessor1>.\
?z <ub:worksFor> ?w.\
?w <ub:name> <Department0>.\
}"
sparql4 = "select distinct ?x where\
{\
?x <rdf:type> <ub:UndergraduateStudent>.\
?y <ub:name> <Course1>.\
?x <ub:takesCourse> ?y.\
?z <ub:teacherOf> ?y.\
?z <ub:name> <FullProfessor1>.\
?z <ub:worksFor> ?w.\
?w <ub:name> <Department0>.\
}"
sparql5 = "select distinct ?x where\
{\
?x <rdf:type> <ub:UndergraduateStudent>.\
}"
# thread function
def Mythread(rnum, sparql, filename):
global correctness
# query
gc = GstoreConnector.GstoreConnector("172.31.222.94", 9000)
#gc.build("test", "data/lubm/lubm.nt", "root", "123456")
#gc.load("test", "root", "123456")
gc.fquery("root", "123456", "lubm", sparql, filename)
#res = gc.query("root", "123456", "test", sparql)
# read the file to a str
with open(filename, "r") as f:
res = f.read()
# count the nums
m = 0
for i in range(len(sparql)):
if (sparql[i] == "?"):
m = m + 1
if (sparql[i] == "{"):
break
n = 0
for i in range(len(res)):
if (res[i] == "{"):
n = n + 1
Num = (n-3)/(m+1)
# compare the result
if (rnum != Num):
correctness = False
print("sparql: "+sparql)
print("Num: "+str(Num))
# create sparql
sparql.append(sparql0)
sparql.append(sparql1)
sparql.append(sparql2)
sparql.append(sparql3)
sparql.append(sparql4)
sparql.append(sparql5)
#create the threads
for i in range(tnum):
filename = "result/res" + str(i) + ".txt"
t = threading.Thread(target=Mythread, args=(result[i%6],sparql[i%6],filename,))
threads.append(t)
# start threads
for i in threads:
i.start()
# wait for the threads
for i in threads:
i.join()
if (correctness == True):
print("The answers are correct!")
else:
print("The answers exist errors!")
print("Main thread exit")
|
the-stack_106_29000 | """
In order to implement the post_processor you need to follow these steps:
- Step 1 Configuration: In order to configure the post_processor you need just one document
processed through Document AI. Open the configuration GUI calling run_GUI.py from the command
line and set the 'path' flag: 'python run_GUI.py --path /your/path/to/DocumentAI/output',
or using the below 2 lines of code.
- Step 2: open the configuration file that it's created after you pressed save in the configuration GUI
- Step 3: run the post processor on one (a) or multiple (b) documents processed through Document AI
"""
from dai_post_processor import post_processor as pp
import json
import os
# Step 1
path = "/Users/Niolo/OneDrive - Alma Mater Studiorum Universitaฬ di Bologna/Work/DocumentAI/WB/3466828248682961428/0"
doc_ai = pp.open_doc_ai(path)
mywin = pp.configGUI(doc_ai)
mywin.start()
# Step 2
with open("data/config.json") as json_file:
config_dict = json.load(json_file)
# (a) Apply post-processor to single document
doc2 = pp.DocumentClass(doc_ai, config_dict)
a = doc2.create_json()
# (b) Apply post-processor to multiple documents
path = "/Users/Niolo/OneDrive - Alma Mater Studiorum Universitaฬ di Bologna/Work/DocumentAI/WB"
file_list = os.listdir(path)
file_list = [file for file in file_list if not file.startswith('.')]
for file in file_list:
try:
doc_ai, title = pp.open_doc_ai("{}/{}/0".format(path, file), return_title=True)
doc2 = pp.DocumentClass(doc_ai, config_dict)
a = doc2.create_json(file_name=title)
except Exception as e:
print(e)
|
the-stack_106_29003 | # coding=utf-8
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from time import time
from oslo_log import log as logging
from oslo_utils import importutils
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip.fdb_connector_ml2 \
import FDBConnectorML2
from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \
NetworkHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.vcmp import VcmpManager
LOG = logging.getLogger(__name__)
def _get_tunnel_name(network):
# BIG-IP object name for a tunnel
tunnel_type = network['provider:network_type']
tunnel_id = network['provider:segmentation_id']
return 'tunnel-' + str(tunnel_type) + '-' + str(tunnel_id)
def _get_tunnel_fake_mac(network, local_ip):
# create a fake mac for l2 records for tunnels
network_id = str(network['provider:segmentation_id']).rjust(4, '0')
mac_prefix = '02:' + network_id[:2] + ':' + network_id[2:4] + ':'
ip_parts = local_ip.split('.')
if len(ip_parts) > 3:
mac = [int(ip_parts[-3]),
int(ip_parts[-2]),
int(ip_parts[-1])]
else:
ip_parts = local_ip.split(':')
if len(ip_parts) > 3:
mac = [int('0x' + ip_parts[-3], 16),
int('0x' + ip_parts[-2], 16),
int('0x' + ip_parts[-1], 16)]
else:
mac = [random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return mac_prefix + ':'.join("%02x" % octet for octet in mac)
def _get_vteps(network, vtep_source):
net_type = network['provider:network_type']
vtep_type = net_type + '_vteps'
return vtep_source.get(vtep_type, list())
class L2ServiceBuilder(object):
def __init__(self, driver, f5_global_routed_mode):
self.conf = driver.conf
self.driver = driver
self.f5_global_routed_mode = f5_global_routed_mode
self.vlan_binding = None
self.fdb_connector = None
self.interface_mapping = {}
self.tagging_mapping = {}
self.system_helper = SystemHelper()
self.network_helper = NetworkHelper()
self.service_adapter = ServiceModelAdapter(self.conf)
if not f5_global_routed_mode:
self.fdb_connector = FDBConnectorML2(self.conf)
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
raise
# map format is phynet:interface:tagged
for maps in self.conf.f5_external_physical_mappings:
intmap = maps.split(':')
net_key = str(intmap[0]).strip()
if len(intmap) > 3:
net_key = net_key + ':' + str(intmap[3]).strip()
self.interface_mapping[net_key] = str(intmap[1]).strip()
self.tagging_mapping[net_key] = str(intmap[2]).strip()
LOG.debug('physical_network %s = interface %s, tagged %s'
% (net_key, intmap[1], intmap[2]))
def initialize_vcmp_manager(self):
'''Intialize the vCMP manager when the driver is ready.'''
self.vcmp_manager = VcmpManager(self.driver)
def post_init(self):
if self.vlan_binding:
LOG.debug(
'Getting BIG-IP device interface for VLAN Binding')
self.vlan_binding.register_bigip_interfaces()
def tunnel_sync(self, tunnel_ips):
if self.fdb_connector:
self.fdb_connector.advertise_tunnel_ips(tunnel_ips)
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
if self.fdb_connector:
self.fdb_connector.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
if self.fdb_connector:
self.fdb_connector.set_l2pop_rpc(l2pop_rpc)
def set_context(self, context):
if self.fdb_connector:
self.fdb_connector.set_context(context)
def is_common_network(self, network):
"""Returns True if this belongs in the /Common folder
This object method will return positive if the L2ServiceBuilder object
should be stored under the Common partition on the BIG-IP.
"""
return network['shared'] or \
self.conf.f5_common_networks or \
(network['id'] in self.conf.common_network_ids) or \
('router:external' in network and
network['router:external'] and
self.conf.f5_common_external_networks)
def get_vlan_name(self, network, hostname):
# Construct a consistent vlan name
net_key = network['provider:physical_network']
net_type = network['provider:network_type']
# look for host specific interface mapping
if net_key and net_key + ':' + hostname in self.interface_mapping:
interface = self.interface_mapping[net_key + ':' + hostname]
tagged = self.tagging_mapping[net_key + ':' + hostname]
elif net_key and net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
else:
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
if net_type == "flat":
interface_name = str(interface).replace(".", "-")
if (len(interface_name) > 15):
LOG.warn(
"Interface name is greater than 15 chars in length")
vlan_name = "flat-%s" % (interface_name)
else:
vlan_name = "vlan-%d" % (vlanid)
return vlan_name
def assure_bigip_network(self, bigip, network):
# Ensure bigip has configured network object
if not network:
LOG.error('assure_bigip_network: '
'Attempted to assure a network with no id..skipping.')
return
if network['id'] in bigip.assured_networks:
return
if network['id'] in self.conf.common_network_ids:
LOG.debug('assure_bigip_network: '
'Network is a common global network... skipping.')
return
LOG.debug("assure_bigip_network network: %s" % str(network))
start_time = time()
if self.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.service_adapter.get_folder_name(
network['tenant_id']
)
# setup all needed L2 network segments
if network['provider:network_type'] == 'flat':
network_name = self._assure_device_network_flat(
network, bigip, network_folder)
elif network['provider:network_type'] == 'vlan':
network_name = self._assure_device_network_vlan(
network, bigip, network_folder)
elif network['provider:network_type'] == 'vxlan':
network_name = self._assure_device_network_vxlan(
network, bigip, network_folder)
elif network['provider:network_type'] == 'gre':
network_name = self._assure_device_network_gre(
network, bigip, network_folder)
elif network['provider:network_type'] == 'opflex':
raise f5_ex.NetworkNotReady(
"Opflex network segment definition required")
else:
error_message = 'Unsupported network type %s.' \
% network['provider:network_type'] + \
' Cannot setup network.'
LOG.error(error_message)
raise f5_ex.InvalidNetworkType(error_message)
bigip.assured_networks[network['id']] = network_name
if time() - start_time > .001:
LOG.debug(" assure bigip network took %.5f secs" %
(time() - start_time))
def _assure_device_network_flat(self, network, bigip, network_folder):
# Ensure bigip has configured flat vlan (untagged)
vlan_name = ""
interface = self.interface_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key and net_key + ':' + bigip.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.hostname]
# Do we have a mapping for this network
elif net_key and net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
vlan_name = self.get_vlan_name(network,
bigip.hostname)
self._assure_vcmp_device_network(bigip,
vlan={'name': vlan_name,
'folder': network_folder,
'id': vlanid,
'interface': interface,
'network': network})
if self.vcmp_manager and self.vcmp_manager.get_vcmp_host(bigip):
interface = None
try:
model = {'name': vlan_name,
'interface': interface,
'partition': network_folder,
'description': network['id'],
'route_domain_id': network['route_domain_id']}
self.network_helper.create_vlan(bigip, model)
except Exception as err:
LOG.exception("%s", err.message)
raise f5_ex.VLANCreationException("Failed to create flat network")
return vlan_name
def _assure_device_network_vlan(self, network, bigip, network_folder):
# Ensure bigip has configured tagged vlan
# VLAN names are limited to 64 characters including
# the folder name, so we name them foolish things.
vlan_name = ""
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key and net_key + ':' + bigip.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.hostname]
tagged = self.tagging_mapping[
net_key + ':' + bigip.hostname]
# Do we have a mapping for this network
elif net_key and net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
vlan_name = self.get_vlan_name(network,
bigip.hostname)
self._assure_vcmp_device_network(bigip,
vlan={'name': vlan_name,
'folder': network_folder,
'id': vlanid,
'interface': interface,
'network': network})
if self.vcmp_manager and self.vcmp_manager.get_vcmp_host(bigip):
interface = None
try:
model = {'name': vlan_name,
'interface': interface,
'tag': vlanid,
'partition': network_folder,
'description': network['id'],
'route_domain_id': network['route_domain_id']}
self.network_helper.create_vlan(bigip, model)
except Exception as err:
LOG.exception("%s", err.message)
raise f5_ex.VLANCreationException(
"Failed to create vlan: %s" % vlan_name
)
if self.vlan_binding:
self.vlan_binding.allow_vlan(
device_name=bigip.device_name,
interface=interface,
vlanid=vlanid
)
return vlan_name
def _assure_device_network_vxlan(self, network, bigip, partition):
# Ensure bigip has configured vxlan
tunnel_name = ""
if not bigip.local_ip:
error_message = 'Cannot create tunnel %s on %s' \
% (network['id'], bigip.hostname)
error_message += ' no VTEP SelfIP defined.'
LOG.error('VXLAN:' + error_message)
raise f5_ex.MissingVTEPAddress('VXLAN:' + error_message)
tunnel_name = _get_tunnel_name(network)
# create the main tunnel entry for the fdb records
payload = {'name': tunnel_name,
'partition': partition,
'profile': 'vxlan_ovs',
'key': network['provider:segmentation_id'],
'localAddress': bigip.local_ip,
'description': network['id'],
'route_domain_id': network['route_domain_id']}
try:
self.network_helper.create_multipoint_tunnel(bigip, payload)
except Exception as err:
LOG.exception("%s", err.message)
raise f5_ex.VXLANCreationException(
"Failed to create vxlan tunnel: %s" % tunnel_name
)
if self.fdb_connector:
self.fdb_connector.notify_vtep_added(network, bigip.local_ip)
return tunnel_name
def _assure_device_network_gre(self, network, bigip, partition):
tunnel_name = ""
# Ensure bigip has configured gre tunnel
if not bigip.local_ip:
error_message = 'Cannot create tunnel %s on %s' \
% (network['id'], bigip.hostname)
error_message += ' no VTEP SelfIP defined.'
LOG.error('L2GRE:' + error_message)
raise f5_ex.MissingVTEPAddress('L2GRE:' + error_message)
tunnel_name = _get_tunnel_name(network)
payload = {'name': tunnel_name,
'partition': partition,
'profile': 'gre_ovs',
'key': network['provider:segmentation_id'],
'localAddress': bigip.local_ip,
'description': network['id'],
'route_domain_id': network['route_domain_id']}
try:
self.network_helper.create_multipoint_tunnel(bigip, payload)
except Exception as err:
LOG.exception("%s", err.message)
raise f5_ex.VXLANCreationException(
"Failed to create gre tunnel: %s" % tunnel_name
)
if self.fdb_connector:
self.fdb_connector.notify_vtep_added(network, bigip.local_ip)
return tunnel_name
def _assure_vcmp_device_network(self, bigip, vlan):
if not self.vcmp_manager:
return
vcmp_host = self.vcmp_manager.get_vcmp_host(bigip)
if not vcmp_host:
return
# Create the VLAN on the vCMP Host
model = {'name': vlan['name'],
'partition': 'Common',
'tag': vlan['id'],
'interface': vlan['interface'],
'description': vlan['network']['id'],
'route_domain_id': vlan['network']['route_domain_id']}
try:
self.network_helper.create_vlan(vcmp_host['bigip'], model)
LOG.debug(('Created VLAN %s on vCMP Host %s' %
(vlan['name'], vcmp_host['bigip'].hostname)))
except Exception as exc:
LOG.error(
('Exception creating VLAN %s on vCMP Host %s:%s' %
(vlan['name'], vcmp_host['bigip'].hostname, exc)))
# Associate the VLAN with the vCMP Guest, if necessary
self.vcmp_manager.assoc_vlan_with_vcmp_guest(bigip, vlan)
def delete_bigip_network(self, bigip, network):
# Delete network on bigip
if network['id'] in self.conf.common_network_ids:
LOG.debug('skipping delete of common network %s'
% network['id'])
return
if self.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.service_adapter.get_folder_name(
network['tenant_id'])
if network['provider:network_type'] == 'vlan':
self._delete_device_vlan(bigip, network, network_folder)
elif network['provider:network_type'] == 'flat':
self._delete_device_flat(bigip, network, network_folder)
elif network['provider:network_type'] == 'vxlan':
self._delete_device_vxlan(bigip, network, network_folder)
elif network['provider:network_type'] == 'gre':
self._delete_device_gre(bigip, network, network_folder)
elif network['provider:network_type'] == 'opflex':
raise f5_ex.NetworkNotReady(
"Opflex network segment definition required")
else:
LOG.error('Unsupported network type %s. Can not delete.'
% network['provider:network_type'])
if network['id'] in bigip.assured_networks:
del bigip.assured_networks[network['id']]
def _delete_device_vlan(self, bigip, network, network_folder):
# Delete tagged vlan on specific bigip
vlan_name = self.get_vlan_name(network,
bigip.hostname)
try:
self.network_helper.delete_vlan(
bigip,
vlan_name,
partition=network_folder
)
except Exception as err:
LOG.exception(err)
LOG.error(
"Failed to delete vlan: %s" % vlan_name)
if self.vlan_binding:
interface = self.interface_mapping['default']
tagged = self.tagging_mapping['default']
vlanid = 0
# Do we have host specific mappings?
net_key = network['provider:physical_network']
if net_key and net_key + ':' + bigip.hostname in \
self.interface_mapping:
interface = self.interface_mapping[
net_key + ':' + bigip.hostname]
tagged = self.tagging_mapping[
net_key + ':' + bigip.hostname]
# Do we have a mapping for this network
elif net_key and net_key in self.interface_mapping:
interface = self.interface_mapping[net_key]
tagged = self.tagging_mapping[net_key]
if tagged:
vlanid = network['provider:segmentation_id']
else:
vlanid = 0
self.vlan_binding.prune_vlan(
device_name=bigip.device_name,
interface=interface,
vlanid=vlanid
)
self._delete_vcmp_device_network(bigip, vlan_name)
def _delete_device_flat(self, bigip, network, network_folder):
# Delete untagged vlan on specific bigip
vlan_name = self.get_vlan_name(network,
bigip.hostname)
try:
self.network_helper.delete_vlan(
bigip,
vlan_name,
partition=network_folder
)
except Exception as err:
LOG.exception(err)
LOG.error(
"Failed to delete vlan: %s" % vlan_name)
self._delete_vcmp_device_network(bigip, vlan_name)
def _delete_device_vxlan(self, bigip, network, network_folder):
# Delete vxlan tunnel on specific bigip
tunnel_name = _get_tunnel_name(network)
try:
self.network_helper.delete_all_fdb_entries(
bigip,
tunnel_name,
partition=network_folder)
self.network_helper.delete_tunnel(
bigip,
tunnel_name,
partition=network_folder)
except Exception as err:
# Just log the exception, we want to continue cleanup
LOG.exception(err)
LOG.error(
"Failed to delete vxlan tunnel: %s" % tunnel_name)
if self.fdb_connector:
self.fdb_connector.notify_vtep_removed(network, bigip.local_ip)
def _delete_device_gre(self, bigip, network, network_folder):
# Delete gre tunnel on specific bigip
tunnel_name = _get_tunnel_name(network)
try:
self.network_helper.delete_all_fdb_entries(
bigip,
tunnel_name,
partition=network_folder)
self.network_helper.delete_tunnel(
bigip,
tunnel_name,
partition=network_folder)
except Exception as err:
# Just log the exception, we want to continue cleanup
LOG.exception(err)
LOG.error(
"Failed to delete gre tunnel: %s" % tunnel_name)
if self.fdb_connector:
self.fdb_connector.notify_vtep_removed(network, bigip.local_ip)
def _delete_vcmp_device_network(self, bigip, vlan_name):
'''Disassociated VLAN with vCMP Guest, then delete it from vCMP Host
:param bigip: ManagementRoot object -- vCMP guest
:param vlan_name: str -- name of vlan
'''
if not self.vcmp_manager:
return
vcmp_host = self.vcmp_manager.get_vcmp_host(bigip)
if not vcmp_host:
return
self.vcmp_manager.disassoc_vlan_with_vcmp_guest(bigip, vlan_name)
def add_bigip_fdb(self, bigip, fdb):
# Add entries from the fdb relevant to the bigip
for fdb_operation in \
[{'network_type': 'vxlan',
'get_tunnel_folder': self.network_helper.get_tunnel_folder,
'fdb_method': self.network_helper.add_fdb_entries},
{'network_type': 'gre',
'get_tunnel_folder': self.network_helper.get_tunnel_folder,
'fdb_method': self.network_helper.add_fdb_entries}]:
self._operate_bigip_fdb(bigip, fdb, fdb_operation)
def _operate_bigip_fdb(self, bigip, fdb, fdb_operation):
"""Add L2 records for MAC addresses behind tunnel endpoints.
Description of fdb structure:
{'<network_id>':
'segment_id': <int>
'ports': [ '<vtep>': ['<mac_address>': '<ip_address>'] ]
'<network_id>':
'segment_id':
'ports': [ '<vtep>': ['<mac_address>': '<ip_address>'] ] }
Sample real fdb structure:
{u'45bbbce1-191b-4f7b-84c5-54c6c8243bd2':
{u'segment_id': 1008,
u'ports':
{u'10.30.30.2': [[u'00:00:00:00:00:00', u'0.0.0.0'],
[u'fa:16:3e:3d:7b:7f', u'10.10.1.4']]},
u'network_type': u'vxlan'}}
"""
network_type = fdb_operation['network_type']
get_tunnel_folder = fdb_operation['get_tunnel_folder']
fdb_method = fdb_operation['fdb_method']
for network in fdb:
net_fdb = fdb[network]
if net_fdb['network_type'] == network_type:
net = {'name': network,
'provider:network_type': net_fdb['network_type'],
'provider:segmentation_id': net_fdb['segment_id']}
tunnel_name = _get_tunnel_name(net)
folder = get_tunnel_folder(bigip, tunnel_name=tunnel_name)
net_info = {'network': network,
'folder': folder,
'tunnel_name': tunnel_name,
'net_fdb': net_fdb}
fdbs = self._get_bigip_network_fdbs(bigip, net_info)
if len(fdbs) > 0:
fdb_method(bigip, fdb_entries=fdbs)
def _get_bigip_network_fdbs(self, bigip, net_info):
# Get network fdb entries to add to a bigip
if not net_info['folder']:
return {}
net_fdb = net_info['net_fdb']
fdbs = {}
for vtep in net_fdb['ports']:
# bigip does not need to set fdb entries for local addresses
if vtep == bigip.local_ip:
continue
# most net_info applies to the vtep
vtep_info = dict(net_info)
# but the network fdb is too broad so delete it
del vtep_info['net_fdb']
# use a slice of the fdb for the vtep instead
vtep_info['vtep'] = vtep
vtep_info['fdb_entries'] = net_fdb['ports'][vtep]
self._merge_vtep_fdbs(vtep_info, fdbs)
return fdbs
def _merge_vtep_fdbs(self, vtep_info, fdbs):
# Add L2 records for a specific network+vtep
folder = vtep_info['folder']
tunnel_name = vtep_info['tunnel_name']
for entry in vtep_info['fdb_entries']:
mac_address = entry[0]
if mac_address == '00:00:00:00:00:00':
continue
ip_address = entry[1]
# create/get tunnel data
if tunnel_name not in fdbs:
fdbs[tunnel_name] = {}
tunnel_fdbs = fdbs[tunnel_name]
# update tunnel folder
tunnel_fdbs['folder'] = folder
# maybe create records for tunnel
if 'records' not in tunnel_fdbs:
tunnel_fdbs['records'] = {}
# add entry to records map keyed by mac address
tunnel_fdbs['records'][mac_address] = \
{'endpoint': vtep_info['vtep'], 'ip_address': ip_address}
def update_bigip_fdb(self, bigip, fdb):
# Update l2 records
self.add_bigip_fdb(bigip, fdb)
def remove_bigip_fdb(self, bigip, fdb):
# Add L2 records for MAC addresses behind tunnel endpoints
for fdb_operation in \
[{'network_type': 'vxlan',
'get_tunnel_folder': self.network_helper.get_tunnel_folder,
'fdb_method': self.network_helper.delete_fdb_entries},
{'network_type': 'gre',
'get_tunnel_folder': self.network_helper.get_tunnel_folder,
'fdb_method': self.network_helper.delete_fdb_entries}]:
self._operate_bigip_fdb(bigip, fdb, fdb_operation)
# Utilities
def get_network_name(self, bigip, network):
# This constructs a name for a tunnel or vlan interface
preserve_network_name = False
if network['id'] in self.conf.common_network_ids:
network_name = self.conf.common_network_ids[network['id']]
preserve_network_name = True
elif network['provider:network_type'] == 'vlan':
network_name = self.get_vlan_name(network,
bigip.hostname)
elif network['provider:network_type'] == 'flat':
network_name = self.get_vlan_name(network,
bigip.hostname)
elif network['provider:network_type'] == 'vxlan':
network_name = _get_tunnel_name(network)
elif network['provider:network_type'] == 'gre':
network_name = _get_tunnel_name(network)
else:
error_message = 'Unsupported network type %s.' \
% network['provider:network_type'] + \
' Cannot setup selfip or snat.'
LOG.error(error_message)
raise f5_ex.InvalidNetworkType(error_message)
return network_name, preserve_network_name
def _get_network_folder(self, network):
if self.is_common_network(network):
return 'Common'
else:
return self.service_adapter.get_folder_name(network['tenant_id'])
def add_fdb_entries(self, bigips, loadbalancer, members):
"""Update fdb entries for loadbalancer and member VTEPs.
:param bigips: one or more BIG-IPs to update.
:param loadbalancer: Loadbalancer with VTEPs to update. Can be None.
:param members: List of members. Can be emtpy ([]).
"""
tunnel_records = self.create_fdb_records(loadbalancer, members)
if tunnel_records:
for bigip in bigips:
self.network_helper.add_fdb_entries(bigip,
fdb_entries=tunnel_records)
def delete_fdb_entries(self, bigips, loadbalancer, members):
"""Remove fdb entries for loadbalancer and member VTEPs.
:param bigips: one or more BIG-IPs to update.
:param loadbalancer: Loadbalancer with VTEPs to remove. Can be None.
:param members: List of members. Can be emtpy ([]).
"""
tunnel_records = self.create_fdb_records(loadbalancer, members)
if tunnel_records:
for bigip in bigips:
self.network_helper.delete_fdb_entries(
bigip,
fdb_entries=tunnel_records)
def create_fdb_records(self, loadbalancer, members):
fdbs = dict()
if loadbalancer:
network = loadbalancer['network']
tunnel_name = _get_tunnel_name(network)
fdbs[tunnel_name] = dict()
fdbs[tunnel_name]['folder'] = self._get_network_folder(network)
records = dict()
fdbs[tunnel_name]['records'] = records
self.append_loadbalancer_fdb_records(
network, loadbalancer, records)
for member in members:
network = member['network']
tunnel_name = _get_tunnel_name(network)
if tunnel_name not in fdbs:
fdbs[tunnel_name] = dict()
fdbs[tunnel_name]['folder'] = self._get_network_folder(network)
fdbs[tunnel_name]['records'] = dict()
records = fdbs[tunnel_name]['records']
if 'port' in member and 'mac_address' in member['port']:
mac_addr = member['port']['mac_address']
self.append_member_fdb_records(network,
member,
records,
mac_addr,
ip_address=member['address'])
return fdbs
def append_loadbalancer_fdb_records(self, network, loadbalancer, records):
vteps = _get_vteps(network, loadbalancer)
for vtep in vteps:
# create an arbitrary MAC address for VTEP
mac_addr = _get_tunnel_fake_mac(network, vtep)
records[mac_addr] = {'endpoint': vtep, 'ip_address': ''}
def append_member_fdb_records(self, network, member, records,
mac_addr, ip_address=''):
vteps = _get_vteps(network, member)
for vtep in vteps:
records[mac_addr] = {'endpoint': vtep,
'ip_address': ip_address}
|
the-stack_106_29007 | from flask import Flask, render_template
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route('/')
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route('/scrape')
def scrapper():
mars = mongo.db.mars
mars_data = scrape_mars.scrape()
mars.update({}, mars_data, upsert=True)
return "Scraping Successful"
if __name__ == "__main__":
app.run()
|
the-stack_106_29008 | import logging
from colorlog import ColoredFormatter
LOG_LEVEL = logging.DEBUG
LOG_FORMAT = " %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s"
logging.root.setLevel(LOG_LEVEL)
formatter = ColoredFormatter(LOG_FORMAT)
stream = logging.StreamHandler()
stream.setLevel(LOG_LEVEL)
stream.setFormatter(formatter)
logger = logging.getLogger('pythonConfig')
logger.setLevel(LOG_LEVEL)
logger.addHandler(stream)
logger.propagate = False
def log_message(message, log_level=logging.DEBUG):
message = str(message)
print()
if log_level==logging.DEBUG:
logger.debug(message)
elif log_level==logging.CRITICAL:
logger.critical(message)
elif log_level==logging.WARN:
logger.warning(message)
elif log_level==logging.ERROR:
logger.error(message)
else:
logger.info(message)
|
the-stack_106_29012 | import numpy
from scipy.integrate import quad
from summer.model import order_dict_by_keys, add_zero_to_age_breakpoints
from copy import copy
"""
pallettes of functions that may be useful for creating parameter values to submit to the SUMMER module
"""
def change_parameter_unit(parameter_dict, multiplier):
"""
used to adapt the parameters according their unit - for example, could be used for models that are running in time
steps that are different from the time step assumed by the input parameter
:param parameter_dict: dict
dictionary whose values need to be adjusted
:param multiplier: float
multiplier
:return: dict
dictionary with values multiplied by the multiplier argument
"""
return {
param_key: param_value * multiplier for param_key, param_value in parameter_dict.items()
}
def add_w_to_param_names(parameter_dict):
"""
add a "W" string to the end of the parameter name to indicate that the parameter should over-write up the chain of
stratification, rather than being a multiplier or adjustment function for the upstream parameters
:param parameter_dict: dict
the dictionary before the adjustments
:return: dict
same dictionary but with the "W" string added to each of the keys
"""
return {str(key) + "W": value for key, value in parameter_dict.items()}
def get_average_value_of_function(input_function, start_value, end_value):
"""
use numeric integration to find the average value of a function between two extremes
:param input_function: function
function to be interrogated
:param start_value: float
lower limit of the independent variable over which to integrate the function
:param end_value: float
upper limit of the independent variable over which to integrate the function
"""
return quad(input_function, start_value, end_value)[0] / (end_value - start_value)
def get_parameter_dict_from_function(input_function, breakpoints, upper_value=100.0):
"""
create a dictionary of parameter values from a continuous function, an arbitrary upper value and some breakpoints
within which to evaluate the function
"""
revised_breakpoints = copy.copy(add_zero_to_age_breakpoints(breakpoints))
revised_breakpoints.append(upper_value)
param_values = []
for n_breakpoint in range(len(revised_breakpoints) - 1):
param_values.append(
get_average_value_of_function(
input_function,
revised_breakpoints[n_breakpoint],
revised_breakpoints[n_breakpoint + 1],
)
)
return {str(key): value for key, value in zip(revised_breakpoints, param_values)}
def substratify_parameter(
parameter_to_stratify, stratum_to_split, param_value_dict, breakpoints, stratification="age"
):
"""
produce dictionary revise a stratum of a parameter that has been split at a higher level from dictionary of the
values for each stratum of the higher level of the split
:param parameter_to_stratify: str
name of the parameter that was split at the higher level
:param stratum_to_split: str
stratum whose values should be revised
:param param_value_dict: dict
dictionary with keys age breakpoints and values parameter values
:param breakpoints: list
list of age breakpoints submitted as integer
:param stratification: str
name of the stratification this is being applied to
:return: dict
dictionary with keys being upstream stratified parameter to split and keys dictionaries with their keys the
current stratum of interest and values the parameter multiplier
"""
return {
parameter_to_stratify
+ "X"
+ stratification
+ "_"
+ str(age_group): {stratum_to_split: param_value_dict[str(age_group)]}
for age_group in add_zero_to_age_breakpoints(breakpoints)
}
"""
functions that return a function of an independent variable
expected that this will often be a model quantity, such as time or age
"""
def get_parameter_dict_from_function(input_function, breakpoints, upper_value=100.0):
"""
create a dictionary of parameter values from a continuous function, an arbitrary upper value and some breakpoints
within which to evaluate the function
"""
revised_breakpoints = copy(add_zero_to_age_breakpoints(breakpoints))
revised_breakpoints.append(upper_value)
param_values = []
for n_breakpoint in range(len(revised_breakpoints) - 1):
param_values.append(
get_average_value_of_function(
input_function,
revised_breakpoints[n_breakpoint],
revised_breakpoints[n_breakpoint + 1],
)
)
return {str(key): value for key, value in zip(revised_breakpoints, param_values)}
def create_step_function_from_dict(input_dict):
"""
create a step function out of dictionary with numeric keys and values, where the keys determine the values of the
independent variable at which the steps between the output values occur
:param input_dict: dict
dictionary in standard format with numeric keys for the points at which the steps occur and numeric values for
the values to be returned from these points onwards
:return: function
the function constructed from input data
"""
dict_keys, dict_values = order_dict_by_keys(input_dict)
def step_function(input_value):
if input_value >= dict_keys[-1]:
return dict_values[-1]
else:
for key in range(len(dict_keys)):
if input_value < dict_keys[key + 1]:
return dict_values[key]
return step_function
def sinusoidal_scaling_function(start_time, baseline_value, end_time, final_value):
"""
in order to implement scale-up functions over time, use the cosine function to produce smooth scale-up functions
from one point to another, returning the starting value before the starting point and the final value after the
end point
:param start_time: float
starting value of the independent variable
:param baseline_value: float
starting value of the dependent variable
:param end_time: float
final value of the independent variable
:param final_value: float
final value of the dependent variable
:return:
function scaling from the starting value to the final value
"""
def sinusoidal_function(time):
if not isinstance(time, float):
raise ValueError("value provided to scaling function not a float")
elif start_time > end_time:
raise ValueError("start time is later than end time")
elif time < start_time:
return baseline_value
elif start_time <= time <= end_time:
return baseline_value + (final_value - baseline_value) * (
0.5 - 0.5 * numpy.cos((time - start_time) * numpy.pi / (end_time - start_time))
)
else:
return final_value
return sinusoidal_function
def logistic_scaling_function(parameter):
"""
a specific sigmoidal form of function that scales up from zero to one around the point of the value of parameter
:param parameter: float
the single parameter to the function
:return: function
the logistic function
"""
return lambda x: 1.0 - 1.0 / (1.0 + numpy.exp(-(parameter - x)))
|
the-stack_106_29016 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import array as pyarray
if sys.version > '3':
xrange = range
from numpy import array
from pyspark import RDD
from pyspark import SparkContext
from pyspark.mllib.common import callMLlibFunc, callJavaFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.util import Saveable, Loader, inherit_doc
__all__ = ['KMeansModel', 'KMeans', 'GaussianMixtureModel', 'GaussianMixture']
@inherit_doc
class KMeansModel(Saveable, Loader):
"""A clustering model derived from the k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, runs=30, initializationMode="random",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model.k
2
>>> model.computeCost(sc.parallelize(data))
2.0000000000000004
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> isinstance(model.clusterCenters, list)
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = KMeansModel.load(sc, path)
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
True
>>> try:
... os.removedirs(path)
... except OSError:
... pass
"""
def __init__(self, centers):
self.centers = centers
@property
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
@property
def k(self):
"""Total number of clusters."""
return len(self.centers)
def predict(self, x):
"""Find the cluster to which x belongs in this model."""
best = 0
best_distance = float("inf")
x = _convert_to_vector(x)
for i in xrange(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
def computeCost(self, rdd):
"""
Return the K-means cost (sum of squared distances of points to
their nearest center) for this model on the given data.
"""
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
[_convert_to_vector(c) for c in self.centers])
return cost
def save(self, sc, path):
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
java_model.save(sc._jsc.sc(), path)
@classmethod
def load(cls, sc, path):
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
class KMeans(object):
@classmethod
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||",
seed=None, initializationSteps=5, epsilon=1e-4):
"""Train a k-means clustering model."""
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
runs, initializationMode, seed, initializationSteps, epsilon)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
class GaussianMixtureModel(object):
"""A clustering model derived from the Gaussian Mixture Model method.
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2))
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
True
>>> labels[4]==labels[5]
True
>>> clusterdata_2 = sc.parallelize(array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734]).reshape(5, 3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=10)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]==labels[2]
True
>>> labels[3]==labels[4]
True
"""
def __init__(self, weights, gaussians):
self.weights = weights
self.gaussians = gaussians
self.k = len(self.weights)
def predict(self, x):
"""
Find the cluster to which the points in 'x' has maximum membership
in this model.
:param x: RDD of data points.
:return: cluster_labels. RDD of cluster labels.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
def predictSoft(self, x):
"""
Find the membership of each point in 'x' to all mixture components.
:param x: RDD of data points.
:return: membership_matrix. RDD of array of double values.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
_convert_to_vector(self.weights), means, sigmas)
return membership_matrix.map(lambda x: pyarray.array('d', x))
class GaussianMixture(object):
"""
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
:param data: RDD of data points
:param k: Number of components
:param convergenceTol: Threshold value to check the convergence criteria. Defaults to 1e-3
:param maxIterations: Number of iterations. Default to 100
:param seed: Random Seed
"""
@classmethod
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None):
"""Train a Gaussian Mixture clustering model."""
weight, mu, sigma = callMLlibFunc("trainGaussianMixture",
rdd.map(_convert_to_vector), k,
convergenceTol, maxIterations, seed)
mvg_obj = [MultivariateGaussian(mu[i], sigma[i]) for i in range(k)]
return GaussianMixtureModel(weight, mvg_obj)
def _test():
import doctest
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
the-stack_106_29017 | class Cuboid:
def __init__(self, state, *bounds):
self.state = state
self.bounds = tuple()
for bound in zip(*[iter(bounds)]*2):
self.bounds += (bound,)
@property
def ndim(self):
return len(self.bounds)
def expand_dims(self, n):
deficit = n - self.ndim
if deficit > 0:
for _ in range(deficit):
self.bounds += ((0, 0),)
def is_active(self, axis, position):
return self.bounds[axis][0] <= position <= self.bounds[axis][1]
def in_init_procedure(self, a=-50, b=50):
for l, h in self.bounds:
if not (a <= l <= b) or not (a <= h <= b):
return False
return True
def sweep(cuboids, axis):
events = set()
for c in cuboids:
low, high = c.bounds[axis]
events |= {low, high + 1}
N, events = len(events), sorted(events)
area = 0
if axis == 0: # x-sweep (linear area)
cuboids = cuboids[::-1] # Last cuboid added takes priority
for i, coord in enumerate(events):
for c in cuboids:
if c.is_active(axis, coord):
if c.state:
area += (events[i + 1] - coord if i < N - 1 else 1)
break
else: # y-sweep (area) or z-sweep (volume)
for i, coord in enumerate(events):
sub_cuboids = [c for c in cuboids if c.is_active(axis, coord)]
if sub_cuboids:
area += sweep(sub_cuboids, axis - 1) * (events[i + 1] - coord if i < N - 1 else 1)
return area
def volume(cuboids, ndim=None):
# Automatically detect maximum number of cuboid dimensions if not specified
if ndim is None:
ndim = max(c.ndim for c in cuboids)
for c in cuboids:
c.expand_dims(ndim)
return sweep(cuboids, ndim - 1)
def parse_instruction(instr):
instr = instr \
.replace('..', ' ') \
.replace(' x=', ' ') \
.replace(',y=', ' ') \
.replace(',z=', ' ') \
.split()
instr[0] = (instr[0] == 'on')
return (int(x) for x in instr)
def solve(data):
cuboids = [Cuboid(*parse_instruction(instr)) for instr in data]
init_cuboids = [c for c in cuboids if c.in_init_procedure()]
return volume(init_cuboids), volume(cuboids) |
the-stack_106_29018 | # The MIT License (MIT)
#
# Copyright (c) 2020 NVIDIA CORPORATION
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import nvidia.dali as dali
import argparse
def main(filename):
pipe = dali.pipeline.Pipeline(batch_size=3, num_threads=1, device_id=0)
with pipe:
data = dali.fn.external_source(device="cpu", name="DALI_INPUT_0")
pipe.set_outputs(data)
pipe.serialize(filename=filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Serialize pipeline and save it to file")
parser.add_argument('file_path', type=str, help='Path, where to save serialized pipeline')
args = parser.parse_args()
main(args.file_path)
|
the-stack_106_29019 | """Training related classes and functions."""
import collections
import os
import time
import tensorflow as tf
from opennmt.data import dataset as dataset_util
from opennmt.optimizers import utils as optimizer_util
from opennmt.utils import misc
class Trainer(object):
"""Model trainer."""
def __init__(self, checkpoint, devices=None, mixed_precision=False):
"""Initializes the trainer.
Args:
checkpoint: A :class:`opennmt.utils.checkpoint.Checkpoint` instance.
devices: List of device strings to use for training.
mixed_precision: Whether mixed precision is enabled or not.
"""
if not devices:
devices = misc.get_devices(count=1) # Train with 1 device by default.
self._checkpoint = checkpoint
self._mixed_precision = mixed_precision
self._model = checkpoint.model
self._strategy = tf.distribute.MirroredStrategy(devices=devices)
self._summary_writer = tf.summary.create_file_writer(checkpoint.model_dir)
optimizer = checkpoint.optimizer
if optimizer is None:
raise ValueError("No optimizer is defined")
if mixed_precision:
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(optimizer, "dynamic")
self._optimizer = optimizer
with self._strategy.scope():
# Create some variables under the strategy scope.
_ = self._optimizer.iterations
self._model.create_variables()
self._gradient_accumulator = optimizer_util.GradientAccumulator()
def __call__(self,
dataset,
max_step=None,
accum_steps=1,
report_steps=100,
save_steps=5000,
evaluator=None,
eval_steps=5000,
export_on_best=None):
"""Runs the training.
Args:
dataset: A training dataset.
max_step: The final training step.
accum_steps: The number of gradient accumulation steps.
report_steps: Report status every this many steps.
save_steps: Save a checkpoint every this many steps.
evaluator: A :class:`opennmt.evaluation.Evaluator` instance to call for
evaluation.
eval_steps: Evaluate every this many steps.
export_on_best: Export a SavedModel when this evaluation metric has the
best value so far.
"""
if max_step is not None and self._optimizer.iterations.numpy() >= max_step:
tf.get_logger().warning("Model already reached max_step = %d. Exiting.", max_step)
return
if evaluator is not None and evaluator.should_stop():
tf.get_logger().warning("Early stopping conditions are already met. Exiting.")
return
self._gradient_accumulator.reset()
accum_num_words = collections.defaultdict(int)
last_report_time = time.time()
last_step = 0
with self._summary_writer.as_default():
if self._optimizer.iterations.numpy() == 0:
self._checkpoint.save(0)
self._model.visualize(self._checkpoint.model_dir)
for i, (loss, num_words, skipped) in enumerate(
self._accumulate_next_gradients(dataset, report_steps=report_steps)):
if skipped:
# We assume only the last partial batch can possibly be skipped.
tf.get_logger().warning("Batch %d is partial, i.e. some training replicas "
"received an empty batch as input. Skipping.", i + 1)
break
if tf.math.is_nan(loss):
raise RuntimeError("Model diverged with loss = NaN.")
if i == 0 or (i + 1) % accum_steps == 0:
self._apply_gradients()
for key, value in num_words.items():
accum_num_words[key] += value.numpy()
step = self._optimizer.iterations.numpy()
if step == last_step:
continue # Do not process same step twice.
last_step = step
if step % report_steps == 0:
last_report_time = _report_training_status(
step,
loss,
self._optimizer.learning_rate,
accum_num_words,
last_report_time)
if save_steps is not None and step % save_steps == 0:
self._checkpoint.save(step)
if evaluator is not None and eval_steps is not None and step % eval_steps == 0:
self._evaluate(evaluator, step, export_on_best=export_on_best)
if evaluator.should_stop():
tf.get_logger().warning("Early stopping conditions are met. Exiting.")
break
if step == max_step:
break
if evaluator is not None and step != evaluator.last_evaluated_step:
self._evaluate(evaluator, step, export_on_best=export_on_best)
self._checkpoint.save(step)
def _evaluate(self, evaluator, step, export_on_best=None):
metrics = evaluator(step)
if export_on_best is not None and evaluator.is_best(export_on_best):
export_dir = os.path.join(self._checkpoint.model_dir, "export", str(step))
tf.get_logger().info("Exporting SavedModel to %s (best %s so far: %f)",
export_dir, export_on_best, metrics[export_on_best])
self._model.export(export_dir)
def _accumulate_next_gradients(self, dataset, report_steps=None):
"""Accumulates the gradients from the next element in :obj:`dataset`."""
# We prefer not to use experimental_distribute_dataset here because it
# sometimes fails to split the batches (noticed with tokens batch type).
# We also assume for now that we are training with a single worker
# otherwise we would need to correctly shard the input dataset.
distributed_dataset = self._strategy.experimental_distribute_datasets_from_function(
lambda _: dataset)
@dataset_util.function_on_next(distributed_dataset)
def _fn(next_fn):
tf.summary.experimental.set_step(self._optimizer.iterations)
if report_steps is None:
should_record_summaries = False
else:
should_record_summaries = tf.logical_and(
tf.equal(self._optimizer.iterations % report_steps, 0),
tf.equal(self._gradient_accumulator.step, 0))
with tf.summary.record_if(should_record_summaries):
per_replica_source, per_replica_target = next_fn()
return self._maybe_accumulate_gradients(per_replica_source, per_replica_target)
return _fn() # pylint: disable=no-value-for-parameter
def _maybe_accumulate_gradients(self, per_replica_source, per_replica_target):
"""Accumulates the gradients if all synchronous batches are non empty (cross-replica)."""
def _run():
loss, num_words = self._accumulate_gradients(per_replica_source, per_replica_target)
return loss, num_words, False
def _skip():
loss = tf.constant(0, dtype=tf.float32)
num_words = {}
if "length" in per_replica_source:
num_words["source"] = tf.constant(0, dtype=tf.int32)
if "length" in per_replica_target:
num_words["target"] = tf.constant(0, dtype=tf.int32)
return loss, num_words, True
# We verify here that each replica receives a non empty batch. If not,
# we skip this iteration. This typically happens at the last iteration
# when training on a finite dataset.
# TODO: is there a simpler way to handle this case?
per_replica_non_empty_batch = self._strategy.experimental_run_v2(
lambda tensor: tf.math.count_nonzero(tf.shape(tensor)[0]),
args=(tf.nest.flatten(per_replica_source)[0],))
non_empty_batch_count = self._strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_non_empty_batch, None)
return tf.cond(
tf.math.equal(non_empty_batch_count, self._strategy.num_replicas_in_sync),
true_fn=_run,
false_fn=_skip)
def _accumulate_gradients(self, per_replica_source, per_replica_target):
"""Accumulates the gradients (cross-replica)."""
per_replica_loss, per_replica_words = self._strategy.experimental_run_v2(
self._accumulate_gradients_on_replica,
args=(per_replica_source, per_replica_target))
# TODO: these reductions could be delayed until _step is called.
loss = self._strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
num_words = {
k:self._strategy.reduce(tf.distribute.ReduceOp.SUM, v, None)
for k, v in per_replica_words.items()}
return loss, num_words
def _accumulate_gradients_on_replica(self, source, target):
"""Accumulates the gradients (in replica)."""
outputs, _ = self._model(
source,
labels=target,
training=True,
step=self._optimizer.iterations)
loss = self._model.compute_loss(outputs, target, training=True)
if isinstance(loss, tuple):
training_loss = loss[0] / loss[1]
reported_loss = loss[0] / loss[2]
else:
training_loss, reported_loss = loss, loss
variables = self._model.trainable_variables
training_loss = self._model.regularize_loss(training_loss, variables=variables)
gradients = self._optimizer.get_gradients(training_loss, variables)
self._gradient_accumulator(gradients)
tf.summary.scalar("gradients/global_norm", tf.linalg.global_norm(gradients))
num_words = {}
if "length" in source:
num_words["source"] = tf.reduce_sum(source["length"])
if "length" in target:
num_words["target"] = tf.reduce_sum(target["length"])
return reported_loss, num_words
@tf.function
def _apply_gradients(self):
"""Applies the gradients (cross-replica)."""
self._strategy.experimental_run_v2(self._apply_gradients_on_replica)
def _apply_gradients_on_replica(self):
"""Applies the gradients (in replica)."""
variables = self._model.trainable_variables
# optimizer.apply_gradients will sum the gradients accross replicas.
gradient_scale = self._gradient_accumulator.step * self._strategy.num_replicas_in_sync
grads_and_vars = [
(gradient / tf.cast(gradient_scale, gradient.dtype), variable)
for gradient, variable in zip(self._gradient_accumulator.gradients, variables)]
self._optimizer.apply_gradients(grads_and_vars)
self._gradient_accumulator.reset()
def _report_training_status(step, loss, learning_rate, accum_num_words, last_report_time):
tf.summary.experimental.set_step(step)
new_report_time = time.time()
words_per_sec_fmt = []
for key, value in accum_num_words.items():
avg = int(value / (new_report_time - last_report_time))
accum_num_words[key] = 0
tf.summary.scalar(
"words_per_sec/%s" % key,
avg,
description="%s words per second" % key.capitalize())
fmt = "%s words/s = %d" % (key, avg)
words_per_sec_fmt.append(fmt)
words_per_sec_fmt = sorted(words_per_sec_fmt)
if isinstance(learning_rate, tf.optimizers.schedules.LearningRateSchedule):
learning_rate = learning_rate(step)
tf.get_logger().info(
"Step = %d ; %s ; Learning rate = %f ; Loss = %f",
step,
", ".join(words_per_sec_fmt),
learning_rate,
loss)
tf.summary.scalar("loss", loss, description="Training loss")
tf.summary.scalar("optim/learning_rate", learning_rate, description="Learning rate")
return new_report_time
|
the-stack_106_29021 | import tensorflow as tf
from synthesizer.utils.symbols import symbols
from synthesizer.infolog import log
from synthesizer.models.helpers import TacoTrainingHelper, TacoTestHelper
from synthesizer.models.modules import *
from tensorflow.contrib.seq2seq import dynamic_decode
from synthesizer.models.architecture_wrappers import TacotronEncoderCell, TacotronDecoderCell
from synthesizer.models.custom_decoder import CustomDecoder
from synthesizer.models.attention import LocationSensitiveAttention
from synthesizer.hparams import hparams
import numpy as np
def split_func(x, split_pos):
rst = []
start = 0
# x will be a numpy array with the contents of the placeholder below
for i in range(split_pos.shape[0]):
rst.append(x[:, start:start + split_pos[i]])
start += split_pos[i]
return rst
class Tacotron():
"""Tacotron-2 Feature prediction Model.
"""
def __init__(self, hparams):
self._hparams = hparams
def initialize(self, inputs, input_lengths, embed_targets, mel_targets=None,
stop_token_targets=None, linear_targets=None, targets_lengths=None, gta=False,
global_step=None, is_training=False, is_evaluating=False, split_infos=None):
"""
Initializes the model for inference sets "mel_outputs" and "alignments" fields.
Args:
- inputs: int32 Tensor with shape [N, T_in] where N is batch size, T_in is number of
steps in the input time series, and values are character IDs
- input_lengths: int32 Tensor with shape [N] where N is batch size and values are the
lengths of each sequence in inputs.
- embed_targets: float32 Tensor with shape [N, E] where E is the speaker
embedding size.
- mel_targets: float32 Tensor with shape [N, T_out, M] where N is batch size,
T_out is number of steps in the output time series, M is num_mels, and values are
entries in the mel spectrogram. Only needed for training.
"""
if mel_targets is None and stop_token_targets is not None:
raise ValueError("no multi targets were provided but token_targets were given")
#if mel_targets is not None and stop_token_targets is None and not gta:
# raise ValueError("Mel targets are provided without corresponding token_targets")
if not gta and self._hparams.predict_linear == True and linear_targets is None and \
is_training:
raise ValueError(
"Model is set to use post processing to predict linear spectrograms in training "
"but no linear targets given!")
if gta and linear_targets is not None:
raise ValueError("Linear spectrogram prediction is not supported in GTA mode!")
if is_training and self._hparams.mask_decoder and targets_lengths is None:
raise RuntimeError(
"Model set to mask paddings but no targets lengths provided for the mask!")
if is_training and is_evaluating:
raise RuntimeError(
"Model can not be in training and evaluation modes at the same time!")
split_device = "/cpu:0" if self._hparams.tacotron_num_gpus > 1 or \
self._hparams.split_on_cpu else "/gpu:{}".format(
self._hparams.tacotron_gpu_start_idx)
with tf.device(split_device):
hp = self._hparams
lout_int = [tf.int32] * hp.tacotron_num_gpus
lout_float = [tf.float32] * hp.tacotron_num_gpus
tower_input_lengths = tf.split(input_lengths, num_or_size_splits=hp.tacotron_num_gpus,
axis=0)
tower_targets_lengths = \
tf.split(targets_lengths, num_or_size_splits=hp.tacotron_num_gpus, axis=0) if \
targets_lengths is not None else targets_lengths
### SV2TTS ###
tower_embed_targets = tf.split(embed_targets, num_or_size_splits=hp.tacotron_num_gpus,
axis=0)
##############
#print (inputs)
p_inputs = tf.py_func(split_func, [inputs, split_infos[:, 0]], lout_float)
p_mel_targets = tf.py_func(split_func, [mel_targets, split_infos[:, 1]],
lout_float) if mel_targets is not None else mel_targets
#p_stop_token_targets = tf.py_func(split_func, [stop_token_targets, split_infos[:, 2]],
# lout_float) if stop_token_targets is not None else \
# stop_token_targets
tower_inputs = [] #[batchsize, timestep, 48, 48, 3]
tower_mel_targets = [] #[batchsize, melspecheight, melspecwidth, 1]
#tower_stop_token_targets = [] #HAVE TO REMOVE THIS
batch_size = tf.shape(inputs)[0]
mel_channels = hp.num_mels
for i in range(hp.tacotron_num_gpus):
tower_inputs.append(tf.reshape(p_inputs[i], [batch_size, hparams.T, hparams.img_height, hparams.img_width, 3]))
if p_mel_targets is not None:
tower_mel_targets.append(
tf.reshape(p_mel_targets[i], [batch_size, -1, mel_channels]))
#if p_stop_token_targets is not None:
# tower_stop_token_targets.append(
# tf.reshape(p_stop_token_targets[i], [batch_size, -1]))
#print (type(tower_inputs))
#input("Enter")
self.tower_decoder_output = []
self.tower_alignments = []
#self.tower_stop_token_prediction = []
self.tower_mel_outputs = []
tower_embedded_inputs = []
tower_enc_conv_output_shape = []
tower_encoder_cond_outputs = []
tower_residual = []
tower_projected_residual = []
# 1. Declare GPU Devices
gpus = ["/gpu:{}".format(i) for i in
range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx + hp.tacotron_num_gpus)]
for i in range(hp.tacotron_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device="/cpu:0",
worker_device=gpus[i])):
with tf.variable_scope("inference") as scope:
assert hp.tacotron_teacher_forcing_mode in ("constant", "scheduled")
if hp.tacotron_teacher_forcing_mode == "scheduled" and is_training:
assert global_step is not None
# GTA is only used for predicting mels to train Wavenet vocoder, so we ommit
# post processing when doing GTA synthesis
post_condition = hp.predict_linear and not gta
# Embeddings ==> [batch_size, sequence_length, embedding_dim]
#embedded_inputs = tf.nn.embedding_lookup(self.embedding_table, tower_inputs[i])
#tmp = np.array(tower_inputs[i], dtype=np.float32)
tower_inputs[i] = tf.cast(tower_inputs[i], tf.float32)
embedded_inputs = tower_inputs[i]
# Encoder Cell ==> [batch_size, encoder_steps, encoder_lstm_units]
encoder_cell = TacotronEncoderCell(
EncoderConvolutions3D(is_training, hparams=hp, scope="encoder_convolutions"),
EncoderRNN(is_training, size=hp.encoder_lstm_units,
zoneout=hp.tacotron_zoneout_rate, scope="encoder_LSTM"))
encoder_outputs = encoder_cell(embedded_inputs, tower_input_lengths[i])
# For shape visualization purpose
enc_conv_output_shape = encoder_cell.conv_output_shape
### SV2TT2 ###
# Append the speaker embedding to the encoder output at each timestep
tileable_shape = [-1, 1, self._hparams.speaker_embedding_size]
tileable_embed_targets = tf.reshape(tower_embed_targets[i], tileable_shape)
tiled_embed_targets = tf.tile(tileable_embed_targets,
[1, tf.shape(encoder_outputs)[1], 1])
encoder_cond_outputs = tf.concat((encoder_outputs, tiled_embed_targets), 2)
##############
# Decoder Parts
# Attention Decoder Prenet
prenet = Prenet(is_training, layers_sizes=hp.prenet_layers,
drop_rate=hp.tacotron_dropout_rate, scope="decoder_prenet")
# Attention Mechanism
attention_mechanism = LocationSensitiveAttention(hp.attention_dim,
encoder_cond_outputs,
hparams=hp,
mask_encoder=hp.mask_encoder,
memory_sequence_length=tf.reshape(
tower_input_lengths[i],
[-1]),
smoothing=hp.smoothing,
cumulate_weights=hp.cumulative_weights)
# Decoder LSTM Cells
decoder_lstm = DecoderRNN(is_training, layers=hp.decoder_layers,
size=hp.decoder_lstm_units,
zoneout=hp.tacotron_zoneout_rate,
scope="decoder_LSTM")
# Frames Projection layer
frame_projection = FrameProjection(hp.num_mels * hp.outputs_per_step,
scope="linear_transform_projection")
# <stop_token> projection layer
#stop_projection = StopProjection(is_training or is_evaluating, shape=hp
# .outputs_per_step,
# scope="stop_token_projection")
# Decoder Cell ==> [batch_size, decoder_steps, num_mels * r] (after decoding)
#decoder_cell = TacotronDecoderCell(
# prenet,
# attention_mechanism,
# decoder_lstm,
# frame_projection,
# stop_projection)
decoder_cell = TacotronDecoderCell(
prenet,
attention_mechanism,
decoder_lstm,
frame_projection)
# Define the helper for our decoder
if is_training or is_evaluating or gta:
self.helper = TacoTrainingHelper(batch_size, tower_mel_targets[i], hp, gta,
is_evaluating, global_step)
else:
self.helper = TacoTestHelper(batch_size, hp)
# initial decoder state
decoder_init_state = decoder_cell.zero_state(batch_size=batch_size,
dtype=tf.float32)
# Only use max iterations at synthesis time
max_iters = hp.max_iters if not (is_training or is_evaluating) else None
# Decode
'''
(frames_prediction, stop_token_prediction,
_), final_decoder_state, _ = dynamic_decode(
CustomDecoder(decoder_cell, self.helper, decoder_init_state),
impute_finished=False,
maximum_iterations=max_iters,
swap_memory=hp.tacotron_swap_with_cpu)
'''
(frames_prediction,
_), final_decoder_state, _ = dynamic_decode(
CustomDecoder(decoder_cell, self.helper, decoder_init_state),
impute_finished=False,
maximum_iterations=max_iters,
swap_memory=hp.tacotron_swap_with_cpu)
# Reshape outputs to be one output per entry
# ==> [batch_size, non_reduced_decoder_steps (decoder_steps * r), num_mels]
decoder_output = tf.reshape(frames_prediction, [batch_size, -1, hp.num_mels])
#stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
# Postnet
postnet = Postnet(is_training, hparams=hp, scope="postnet_convolutions")
# Compute residual using post-net ==> [batch_size, decoder_steps * r,
# postnet_channels]
residual = postnet(decoder_output)
# Project residual to same dimension as mel spectrogram
# ==> [batch_size, decoder_steps * r, num_mels]
residual_projection = FrameProjection(hp.num_mels, scope="postnet_projection")
projected_residual = residual_projection(residual)
# Compute the mel spectrogram
mel_outputs = decoder_output + projected_residual
if post_condition:
# Add post-processing CBHG. This does a great job at extracting features
# from mels before projection to Linear specs.
post_cbhg = CBHG(hp.cbhg_kernels, hp.cbhg_conv_channels, hp.cbhg_pool_size,
[hp.cbhg_projection, hp.num_mels],
hp.cbhg_projection_kernel_size, hp.cbhg_highwaynet_layers,
hp.cbhg_highway_units, hp.cbhg_rnn_units, is_training,
name="CBHG_postnet")
# [batch_size, decoder_steps(mel_frames), cbhg_channels]
post_outputs = post_cbhg(mel_outputs, None)
# Linear projection of extracted features to make linear spectrogram
linear_specs_projection = FrameProjection(hp.num_freq,
scope="cbhg_linear_specs_projection")
# [batch_size, decoder_steps(linear_frames), num_freq]
linear_outputs = linear_specs_projection(post_outputs)
# Grab alignments from the final decoder state
alignments = tf.transpose(final_decoder_state.alignment_history.stack(),
[1, 2, 0])
self.tower_decoder_output.append(decoder_output)
self.tower_alignments.append(alignments)
#self.tower_stop_token_prediction.append(stop_token_prediction)
self.tower_mel_outputs.append(mel_outputs)
tower_embedded_inputs.append(embedded_inputs)
tower_enc_conv_output_shape.append(enc_conv_output_shape)
tower_encoder_cond_outputs.append(encoder_cond_outputs)
tower_residual.append(residual)
tower_projected_residual.append(projected_residual)
if post_condition:
self.tower_linear_outputs.append(linear_outputs)
log("initialisation done {}".format(gpus[i]))
if is_training:
self.ratio = self.helper._ratio
self.tower_inputs = tower_inputs
self.tower_input_lengths = tower_input_lengths
self.tower_mel_targets = tower_mel_targets
# self.tower_linear_targets = tower_linear_targets
self.tower_targets_lengths = tower_targets_lengths
#self.tower_stop_token_targets = tower_stop_token_targets
self.all_vars = tf.trainable_variables()
log("Initialized Tacotron model. Dimensions (? = dynamic shape): ")
log(" Train mode: {}".format(is_training))
log(" Eval mode: {}".format(is_evaluating))
log(" GTA mode: {}".format(gta))
log(" Synthesis mode: {}".format(not (is_training or is_evaluating)))
log(" Input: {}".format(inputs.shape))
for i in range(hp.tacotron_num_gpus + hp.tacotron_gpu_start_idx):
log(" device: {}".format(i))
log(" embedding: {}".format(tower_embedded_inputs[i].shape))
log(" enc conv out: {}".format(tower_enc_conv_output_shape[i]))
log(" encoder out (cond): {}".format(tower_encoder_cond_outputs[i].shape))
log(" decoder out: {}".format(self.tower_decoder_output[i].shape))
log(" residual out: {}".format(tower_residual[i].shape))
log(" projected residual out: {}".format(tower_projected_residual[i].shape))
log(" mel out: {}".format(self.tower_mel_outputs[i].shape))
if post_condition:
log(" linear out: {}".format(self.tower_linear_outputs[i].shape))
#log(" <stop_token> out: {}".format(self.tower_stop_token_prediction[i].shape))
# 1_000_000 is causing syntax problems for some people?! Python please :)
log(" Tacotron Parameters {:.3f} Million.".format(
np.sum([np.prod(v.get_shape().as_list()) for v in self.all_vars]) / 1000000))
def add_loss(self):
"""Adds loss to the model. Sets "loss" field. initialize must have been called."""
hp = self._hparams
self.tower_before_loss = []
self.tower_after_loss = []
self.tower_stop_token_loss = []
self.tower_regularization_loss = []
self.tower_linear_loss = []
self.tower_loss = []
total_before_loss = 0
total_after_loss = 0
total_stop_token_loss = 0
total_regularization_loss = 0
total_linear_loss = 0
total_loss = 0
gpus = ["/gpu:{}".format(i) for i in
range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx + hp.tacotron_num_gpus)]
for i in range(hp.tacotron_num_gpus):
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device="/cpu:0",
worker_device=gpus[i])):
with tf.variable_scope("loss") as scope:
if hp.mask_decoder:
# Compute loss of predictions before postnet
before = MaskedMSE(self.tower_mel_targets[i], self.tower_decoder_output[i],
self.tower_targets_lengths[i],
hparams=self._hparams)
# Compute loss after postnet
after = MaskedMSE(self.tower_mel_targets[i], self.tower_mel_outputs[i],
self.tower_targets_lengths[i],
hparams=self._hparams)
# Compute <stop_token> loss (for learning dynamic generation stop)
#stop_token_loss = MaskedSigmoidCrossEntropy(
#self.tower_stop_token_targets[i],
# self.tower_stop_token_prediction[i], self.tower_targets_lengths[i],
# hparams=self._hparams)
# SV2TTS extra L1 loss (disabled for now)
# linear_loss = MaskedLinearLoss(self.tower_mel_targets[i],
# self.tower_decoder_output[i],
# self.tower_targets_lengths[i],
# hparams=self._hparams)
linear_loss = 0.
else:
# Compute loss of predictions before postnet
before = tf.losses.mean_squared_error(self.tower_mel_targets[i],
self.tower_decoder_output[i])
# Compute loss after postnet
after = tf.losses.mean_squared_error(self.tower_mel_targets[i],
self.tower_mel_outputs[i])
# Compute <stop_token> loss (for learning dynamic generation stop)
#stop_token_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# labels=self.tower_stop_token_targets[i],
# logits=self.tower_stop_token_prediction[i]))
# SV2TTS extra L1 loss
l1 = tf.abs(self.tower_mel_targets[i] - self.tower_decoder_output[i])
linear_loss = tf.reduce_mean(l1)
# if hp.predict_linear:
# # Compute linear loss
# # From https://github.com/keithito/tacotron/blob/tacotron2-work-in
# # -progress/models/tacotron.py
# # Prioritize loss for frequencies under 2000 Hz.
# l1 = tf.abs(self.tower_linear_targets[i] - self.tower_linear_outputs[i])
# n_priority_freq = int(2000 / (hp.sample_rate * 0.5) * hp.num_freq)
# linear_loss = 0.5 * tf.reduce_mean(l1) + 0.5 * tf.reduce_mean(
# l1[:, :, 0:n_priority_freq])
# else:
# linear_loss = 0.
# Compute the regularization weight
if hp.tacotron_scale_regularization:
reg_weight_scaler = 1. / (
2 * hp.max_abs_value) if hp.symmetric_mels else 1. / (
hp.max_abs_value)
reg_weight = hp.tacotron_reg_weight * reg_weight_scaler
else:
reg_weight = hp.tacotron_reg_weight
# Regularize variables
# Exclude all types of bias, RNN (Bengio et al. On the difficulty of training recurrent neural networks), embeddings and prediction projection layers.
# Note that we consider attention mechanism v_a weights as a prediction projection layer and we don"t regularize it. (This gave better stability)
regularization = tf.add_n([tf.nn.l2_loss(v) for v in self.all_vars
if not (
"bias" in v.name or "Bias" in v.name or "_projection" in v.name or "inputs_embedding" in v.name
or "RNN" in v.name or "LSTM" in v.name)]) * reg_weight
# Compute final loss term
self.tower_before_loss.append(before)
self.tower_after_loss.append(after)
#self.tower_stop_token_loss.append(stop_token_loss)
self.tower_regularization_loss.append(regularization)
self.tower_linear_loss.append(linear_loss)
#loss = before + after + stop_token_loss + regularization + linear_loss
loss = before + after + regularization + linear_loss
self.tower_loss.append(loss)
for i in range(hp.tacotron_num_gpus):
total_before_loss += self.tower_before_loss[i]
total_after_loss += self.tower_after_loss[i]
#total_stop_token_loss += self.tower_stop_token_loss[i]
total_regularization_loss += self.tower_regularization_loss[i]
total_linear_loss += self.tower_linear_loss[i]
total_loss += self.tower_loss[i]
self.before_loss = total_before_loss / hp.tacotron_num_gpus
self.after_loss = total_after_loss / hp.tacotron_num_gpus
#self.stop_token_loss = total_stop_token_loss / hp.tacotron_num_gpus
self.regularization_loss = total_regularization_loss / hp.tacotron_num_gpus
self.linear_loss = total_linear_loss / hp.tacotron_num_gpus
self.loss = total_loss / hp.tacotron_num_gpus
def add_optimizer(self, global_step):
"""Adds optimizer. Sets "gradients" and "optimize" fields. add_loss must have been called.
Args:
global_step: int32 scalar Tensor representing current global step in training
"""
hp = self._hparams
tower_gradients = []
# 1. Declare GPU Devices
gpus = ["/gpu:{}".format(i) for i in
range(hp.tacotron_gpu_start_idx, hp.tacotron_gpu_start_idx + hp.tacotron_num_gpus)]
grad_device = "/cpu:0" if hp.tacotron_num_gpus > 1 else gpus[0]
with tf.device(grad_device):
with tf.variable_scope("optimizer") as scope:
if hp.tacotron_decay_learning_rate:
self.decay_steps = hp.tacotron_decay_steps
self.decay_rate = hp.tacotron_decay_rate
self.learning_rate = self._learning_rate_decay(
hp.tacotron_initial_learning_rate, global_step)
else:
self.learning_rate = tf.convert_to_tensor(hp.tacotron_initial_learning_rate)
optimizer = tf.train.AdamOptimizer(self.learning_rate, hp.tacotron_adam_beta1,
hp.tacotron_adam_beta2, hp.tacotron_adam_epsilon)
# 2. Compute Gradient
for i in range(hp.tacotron_num_gpus):
# Device placement
with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device="/cpu:0",
worker_device=gpus[i])):
# agg_loss += self.tower_loss[i]
with tf.variable_scope("optimizer") as scope:
gradients = optimizer.compute_gradients(self.tower_loss[i])
tower_gradients.append(gradients)
# 3. Average Gradient
with tf.device(grad_device):
avg_grads = []
vars = []
#print (tower_gradients)
for grad_and_vars in zip(*tower_gradients):
# grads_vars = [(grad1, var), (grad2, var), ...]
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
# Append on a "tower" dimension which we will average over below.
grads.append(expanded_g)
# Average over the "tower" dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
avg_grads.append(grad)
vars.append(v)
self.gradients = avg_grads
# Just for causion
# https://github.com/Rayhane-mamah/Tacotron-2/issues/11
if hp.tacotron_clip_gradients:
clipped_gradients, _ = tf.clip_by_global_norm(avg_grads, 1.) # __mark 0.5 refer
else:
clipped_gradients = avg_grads
# Add dependency on UPDATE_OPS; otherwise batchnorm won"t work correctly. See:
# https://github.com/tensorflow/tensorflow/issues/1122
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.optimize = optimizer.apply_gradients(zip(clipped_gradients, vars),
global_step=global_step)
def _learning_rate_decay(self, init_lr, global_step):
#################################################################
# Narrow Exponential Decay:
# Phase 1: lr = 1e-3
# We only start learning rate decay after 50k steps
# Phase 2: lr in ]1e-5, 1e-3[
# decay reach minimal value at step 310k
# Phase 3: lr = 1e-5
# clip by minimal learning rate value (step > 310k)
#################################################################
hp = self._hparams
# Compute natural exponential decay
lr = tf.train.exponential_decay(init_lr,
global_step - hp.tacotron_start_decay,
# lr = 1e-3 at step 50k
self.decay_steps,
self.decay_rate, # lr = 1e-5 around step 310k
name="lr_exponential_decay")
# clip learning rate by max and min values (initial and final values)
return tf.minimum(tf.maximum(lr, hp.tacotron_final_learning_rate), init_lr)
|
the-stack_106_29023 | import numpy
from rascil.data_models.memory_data_models import Image, Visibility
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.image.operations import image_is_canonical
__all__ = ['get_rowmap', 'get_polarisation_map', 'get_frequency_map']
def get_frequency_map(vis, im: Image = None):
""" Map channels from visibilities to image
"""
# Find the unique frequencies in the visibility
ufrequency = numpy.unique(vis.frequency)
vnchan = len(ufrequency)
if im is None:
spectral_mode = 'channel'
if vis.frequency_map is None:
vfrequencymap = get_rowmap(vis.frequency, ufrequency)
vis.frequencymap = vfrequencymap
else:
vfrequencymap = vis.frequency_map
assert min(vfrequencymap) >= 0, "Invalid frequency map: visibility channel < 0: %s" % str(vfrequencymap)
elif im.data.shape[0] == 1 and vnchan >= 1:
assert image_is_canonical(im)
spectral_mode = 'mfs'
if vis.frequency_map is None:
vfrequencymap = numpy.zeros_like(vis.frequency, dtype='int')
vis.frequencymap = vfrequencymap
else:
vfrequencymap = vis.frequency_map
else:
assert image_is_canonical(im)
# We can map these to image channels
v2im_map = im.wcs.sub(['spectral']).wcs_world2pix(ufrequency, 0)[0].astype('int')
spectral_mode = 'channel'
nrows = len(vis.frequency)
row2vis = numpy.array(get_rowmap(vis.frequency, ufrequency))
vfrequencymap = [v2im_map[row2vis[row]] for row in range(nrows)]
assert min(vfrequencymap) >= 0, "Invalid frequency map: image channel < 0 %s" % str(vfrequencymap)
assert max(vfrequencymap) < im.shape[0], "Invalid frequency map: image channel > number image channels %s" % \
str(vfrequencymap)
return spectral_mode, vfrequencymap
def get_polarisation_map(vis: Visibility, im: Image = None):
""" Get the mapping of visibility polarisations to image polarisations
"""
assert image_is_canonical(im)
if vis.polarisation_frame == im.polarisation_frame:
if vis.polarisation_frame == PolarisationFrame('stokesI'):
return "stokesI->stokesI", lambda pol: 0
elif vis.polarisation_frame == PolarisationFrame('stokesIQUV'):
return "stokesIQUV->stokesIQUV", lambda pol: pol
return "unknown", lambda pol: pol
def get_rowmap(col, ucol=None):
""" Map to unique cols
:param col: Data column
:param ucol: Unique values in col
"""
pdict = {}
def phash(f):
return numpy.round(f).astype('int')
if ucol is None:
ucol = numpy.unique(col)
for i, f in enumerate(ucol):
pdict[phash(f)] = i
# vmap = []
# vmap = [pdict[phash(p)] for p in col]
# for p in col:
# vmap.append(pdict[phash(p)])
n_ucol = numpy.round(col).astype(('int'))
vmap = numpy.vectorize(pdict.__getitem__)(n_ucol)
return vmap.tolist() |
the-stack_106_29024 | import os
import codecs
import shutil
from html.parser import HTMLParser
spells = []
class Spell:
def __init__(self, name=None):
self.name = name
self.description = ''
def __str__(self):
return self.name + '\n' + self.type + '\n' + self.castingTime + '\n' + self.range + '\n' + self.components + '\n' + self.duration + '\n' + self.description
def toJson(self):
return """ {
"name": "%s",
"type": "%s",
"castingTime": "%s",
"range": "%s",
"components": "%s",
"duration": "%s",
"description": "%s"
}
""" % (self.name, self.type, self.castingTime, self.range, self.components, self.duration, self.description.replace('\n', ''))
class SpellHTMLParser(HTMLParser):
def isCastingTime(self, tag):
return tag == 'b' and self.bCounter == 0
def isRange(self, tag):
return tag == 'b' and self.bCounter == 1
def isComponents(self, tag):
return tag == 'b' and self.bCounter == 2
def isDuration(self, tag):
return tag == 'b' and self.bCounter == 3
def isDescription(self, tag):
return tag == 'article'
def handle_starttag(self, tag, attrs):
# print("Starting tag :", tag)
if self.state == 'DESCRIPTION':
if tag == 'b':
self.state = 'DESCRIPTION_B_TAG_DATA'
self.spell.description += '<' + tag + '>'
else:
if tag == 'i':
self.state = 'TYPE_READING'
elif self.isDescription(tag):
self.state = 'DESCRIPTION'
def handle_endtag(self, tag):
# print("Ending tag :", tag)
if self.isDescription(tag):
self.state = ''
if self.state == 'DESCRIPTION':
self.spell.description += '</' + tag + '>'
elif self.state == 'DESCRIPTION_B_TAG_DATA':
if tag == 'b':
self.state = 'DESCRIPTION'
self.spell.description += '</' + tag + '>'
else:
if tag == 'i':
self.state = ''
elif self.isCastingTime(tag):
self.bCounter += 1
self.state = 'CASTING'
elif self.isRange(tag):
self.bCounter += 1
self.state = 'RANGE'
elif self.isComponents(tag):
self.bCounter += 1
self.state = 'COMPONENTS'
elif self.isDuration(tag):
self.bCounter += 1
self.state = 'DURATION'
def handle_data(self, data):
# print("Encountered some data :", data)
# print('State:' + self.state)
if self.state == 'TYPE_READING':
self.state = ''
self.spell.type = data
elif self.state == 'CASTING':
self.state = ''
self.spell.castingTime = data.strip()
elif self.state == 'RANGE':
self.state = ''
self.spell.range = data.strip()
elif self.state == 'COMPONENTS':
self.state = ''
self.spell.components = data.strip()
elif self.state == 'DURATION':
self.state = ''
self.spell.duration = data.strip()
elif self.state == 'DESCRIPTION':
# print("Encountered some data :", data)
self.spell.description += data.replace('โข', '<br/>โข')
elif self.state == 'DESCRIPTION_B_TAG_DATA':
# print("Encountered some data :", data)
self.spell.description += data
def __init__(self, spell):
super().__init__()
self.reset()
self.fed = []
self.spell = spell
self.state = ''
self.bCounter = 0
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for filename in files:
if '.html' in filename:
print(filename)
with open(filename, encoding='utf8') as f:
spell = Spell(filename.split('.')[0])
parser = SpellHTMLParser(spell)
parser.feed(f.read())
spells.append(parser.spell)
with codecs.open('spells.json', 'w', 'utf8') as f:
f.write('[\n')
delimiter = ''
for spell in spells:
f.write(delimiter)
f.write(spell.toJson())
delimiter = ', '
f.write('\n]') |
the-stack_106_29026 | """
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import pandas as pd
import patsy
import numpy.linalg as la
import numpy as np
def adjust_nums(numerical_covariates, drop_idxs):
# if we dropped some values, have to adjust those with a larger index.
if numerical_covariates is None: return drop_idxs
return [nc - sum(nc < di for di in drop_idxs)
for nc in numerical_covariates]
def design_mat(mod, numerical_covariates, batch_levels):
# require levels to make sure they are in the same order as we use in the
# rest of the script.
design = patsy.dmatrix("~ 0 + C(batch, levels=%s)" % str(batch_levels),
mod, return_type="dataframe")
mod = mod.drop(["batch"], axis=1)
numerical_covariates = list(numerical_covariates)
sys.stderr.write("found %i batches\n" % design.shape[1])
other_cols = [c for i, c in enumerate(mod.columns)
if not i in numerical_covariates]
factor_matrix = mod[other_cols]
design = pd.concat((design, factor_matrix), axis=1)
if numerical_covariates is not None:
sys.stderr.write("found %i numerical covariates...\n"
% len(numerical_covariates))
for i, nC in enumerate(numerical_covariates):
cname = mod.columns[nC]
sys.stderr.write("\t{0}\n".format(cname))
design[cname] = mod[mod.columns[nC]]
sys.stderr.write("found %i categorical variables:" % len(other_cols))
sys.stderr.write("\t" + ", ".join(other_cols) + '\n')
return design
def combat(data, batch, model=None, numerical_covariates=None):
"""Correct for batch effects in a dataset
Parameters
----------
data : pandas.DataFrame
A (n_features, n_samples) dataframe of the expression or methylation
data to batch correct
batch : List-like
A column corresponding to the batches in the data, in the same order
as the samples in ``data``
model : patsy.design_info.DesignMatrix, optional
A model matrix describing metadata on the samples which could be
causing batch effects. If not provided, then will attempt to coarsely
correct just from the information provided in ``batch``
numerical_covariates : list-like
List of covariates in the model which are numerical, rather than
categorical
Returns
-------
corrected : pandas.DataFrame
A (n_features, n_samples) dataframe of the batch-corrected data
"""
if isinstance(numerical_covariates, str):
numerical_covariates = [numerical_covariates]
if numerical_covariates is None:
numerical_covariates = []
if model is not None and isinstance(model, pd.DataFrame):
model["batch"] = list(batch)
else:
model = pd.DataFrame({'batch': batch})
batch_items = model.groupby("batch").groups.items()
batch_levels = [k for k, v in batch_items]
batch_info = [v for k, v in batch_items]
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# drop intercept
drop_cols = [cname for cname, inter in ((model == 1).all()).iterkv()
if inter == True]
drop_idxs = [list(model.columns).index(cdrop) for cdrop in drop_cols]
model = model[[c for c in model.columns if not c in drop_cols]]
numerical_covariates = [list(model.columns).index(c)
if isinstance(c, str)
else c
for c in numerical_covariates if not c in drop_cols]
design = design_mat(model, numerical_covariates, batch_levels)
sys.stderr.write("Standardizing Data across genes.\n")
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch,:])
var_pooled = np.dot(((data - np.dot(design, B_hat).T)**2),
np.ones((n_array, 1)) / n_array)
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)),
np.ones((1, n_array)))
tmp = np.array(design.copy())
tmp[:,:n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
s_data = ((data - stand_mean) / np.dot(np.sqrt(var_pooled),
np.ones((1, n_array))))
sys.stderr.write("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
gamma_hat = np.dot(np.dot(la.inv(np.dot(batch_design.T, batch_design)),
batch_design.T), s_data.T)
delta_hat = []
for i, batch_idxs in enumerate(batch_info):
#batches = [list(model.columns).index(b) for b in batches]
delta_hat.append(s_data[batch_idxs].var(axis=1))
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
a_prior = list(map(aprior, delta_hat))
b_prior = list(map(bprior, delta_hat))
sys.stderr.write("Finding parametric adjustments\n")
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
#print '18 20 22 28 29 31 32 33 35 40 46'
#print batch_info[batch_id]
temp = it_sol(s_data[batch_idxs], gamma_hat[i],
delta_hat[i], gamma_bar[i], t2[i], a_prior[i], b_prior[i])
gamma_star.append(temp[0])
delta_star.append(temp[1])
sys.stdout.write("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
for j, batch_idxs in enumerate(batch_info):
dsq = np.sqrt(delta_star[j,:])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(bayesdata[batch_idxs]
- np.dot(batch_design.ix[batch_idxs], gamma_star).T)
bayesdata[batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, n_array))) + stand_mean
return bayesdata
def it_sol(sdat, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001):
n = (1 - np.isnan(sdat)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
while change > conv:
#print g_hat.shape, g_bar.shape, t2.shape
g_new = postmean(g_hat, g_bar, n, d_old, t2)
sum2 = ((sdat - np.dot(g_new.reshape((g_new.shape[0], 1)),
np.ones((1, sdat.shape[1])))) ** 2).sum(axis=1)
d_new = postvar(sum2, n, a, b)
change = max((abs(g_new - g_old) / g_old).max(),
(abs(d_new - d_old) / d_old).max())
g_old = g_new #.copy()
d_old = d_new #.copy()
count = count + 1
adjust = (g_new, d_new)
return adjust
def aprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (2 * s2 +m**2) / s2
def bprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (m*s2+m**3)/s2
def postmean(g_hat, g_bar, n, d_star, t2):
return (t2*n*g_hat+d_star * g_bar) / (t2*n+d_star)
def postvar(sum2, n, a, b):
return (0.5 * sum2 + b) / (n / 2.0 + a - 1.0)
if __name__ == "__main__":
# NOTE: run this first to get the bladder batch stuff written to files.
"""
source("http://bioconductor.org/biocLite.R")
biocLite("sva")
library("sva")
options(stringsAsFactors=FALSE)
library(bladderbatch)
data(bladderdata)
pheno = pData(bladderEset)
# add fake age variable for numeric
pheno$age = c(1:7, rep(1:10, 5))
write.table(data.frame(cel=rownames(pheno), pheno),
row.names=F, quote=F, sep="\t", file="bladder-pheno.txt")
edata = exprs(bladderEset)
write.table(edata, row.names=T, quote=F, sep="\t", file="bladder-expr.txt")
# use dataframe instead of matrix
mod = model.matrix(~as.factor(cancer) + age, data=pheno)
t = Sys.time()
cdata = ComBat(dat=edata, batch=as.factor(pheno$batch),
mod=mod, numCov=match("age", colnames(mod)))
print(Sys.time() - t)
print(cdata[1:5, 1:5])
write.table(cdata, row.names=True, quote=F, sep="\t", file="r-batch.txt")
"""
pheno = pd.read_table('bladder-pheno.txt', index_col=0)
dat = pd.read_table('bladder-expr.txt', index_col=0)
mod = patsy.dmatrix("~ age + cancer", pheno, return_type="dataframe")
import time
t = time.time()
ebat = combat(dat, pheno.batch, mod, "age")
sys.stdout.write("%.2f seconds\n" % (time.time() - t))
sys.stdout.write(str(ebat.ix[:5, :5]))
ebat.to_csv("py-batch.txt", sep="\t")
mod = False
ebat = combat(dat, pheno.batch, mod)
|
the-stack_106_29027 | '''
We define to be a permutation of the first natural numbers in the range . Let denote the value at position in permutation using -based indexing.
P is considered to be an absolute permutation if holds true for every .
Given and , print the lexicographically smallest absolute permutation . If no absolute permutation exists, print -1.
For example, let giving us an array . If we use based indexing, create a permutation where every . If , we could rearrange them to :
pos[i] i |Difference|
3 1 2
4 2 2
1 3 2
2 4 2
Function Description
Complete the absolutePermutation function in the editor below. It should return an integer that represents the smallest lexicographically smallest permutation, or if there is none.
absolutePermutation has the following parameter(s):
n: the upper bound of natural numbers to consider, inclusive
k: the integer difference between each element and its index
Input Format
The first line contains an integer , the number of test cases.
Each of the next lines contains space-separated integers, and .
Constraints
Output Format
On a new line for each test case, print the lexicographically smallest absolute permutation. If no absolute permutation exists, print -1.
Sample Input
3
2 1
3 0
3 2
Sample Output
2 1
1 2 3
-1
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the absolutePermutation function below.
def absolutePermutation(n, k):
if k == 0:
return [i+1 for i in range(n)]
elif n % (2*k) != 0 or 2*k > n:
return [-1]
return [(i+1)+(1 if (i//k)%2==0 else -1)*k for i in range(n)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
nk = input().split()
n = int(nk[0])
k = int(nk[1])
result = absolutePermutation(n, k)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
the-stack_106_29032 | # no shebang-line as Python scripts aren't really executable on Windows
# use runner = ["python", "runner.py"] in your Cargo config instead
from hashlib import sha3_224
from shlex import quote
import os
import pathlib
import platform
import sys
import subprocess as sp
# "parse" command line
EXECUTABLE = sys.argv[1]
ARGUMENTS = sys.argv[2:]
# get relative part of executable path and convert to POSIX (as host may be Windows)
EXECUTABLE_RELATIVE = pathlib.Path(os.path.relpath(EXECUTABLE)).as_posix()
# create a (statistically) unique name for the remote working directory copy
WORKDIR = sha3_224((platform.node() + ':' + os.getcwd()).encode('utf-8')).hexdigest()
# the target hardware (Bela.io) has passwordless root login
# for normal systems you'll need to handle user authentication in a smarter manner
SSH_NONINTERACTIVE = ['ssh', '-qTo', 'BatchMode yes', '[email protected]']
SSH_INTERACTIVE = ['ssh', '-qt', '[email protected]']
# use rsync via WSL when on Windows
RSYNC = (['wsl'] if platform.system() == 'Windows' else []) + ['rsync']
# ensure base directory exists
sp.run(SSH_NONINTERACTIVE + [
'mkdir', '-p', '.cargo_runner'
], stdout=sp.DEVNULL, stderr=sp.DEVNULL, check=True)
# synchronize working directory to remote
sp.run(RSYNC + [
'-rlptz',
# prevent syncing the .git folder
'--exclude', '.git',
# the following files can be very large and are usually not required on the Bela
'--exclude', '*.d',
'--exclude', '*.rlib',
'--exclude', '*.rmeta',
'--exclude', '*.exe',
'--exclude', '*.exp',
'--exclude', '*.lib',
'--exclude', '*.pdb',
# delete old files (otherwise they'll get copied back later)
'--delete',
'.',
f'[email protected]:.cargo_runner/{WORKDIR}/'
], stdout=sp.DEVNULL, stderr=sp.DEVNULL, check=True)
# run executable remotely, explicitly without checking, as partial results should still be copied
code = sp.run(SSH_INTERACTIVE + [
f'cd .cargo_runner/{WORKDIR} && {quote(EXECUTABLE_RELATIVE)} {" ".join(map(quote, ARGUMENTS))}'
]).returncode
# synchronize working directory from remote (for Criterion reports etc.)
sp.run(RSYNC + [
'-rlptz',
f'[email protected]:.cargo_runner/{WORKDIR}/',
'.',
], stdout=sp.DEVNULL, stderr=sp.DEVNULL, check=True)
# exit with the code from the actual run
sys.exit(code)
|
the-stack_106_29033 | """Etcd Transport.
It uses Etcd as a store to transport messages in Queues
It uses python-etcd for talking to Etcd's HTTP API
"""
from __future__ import absolute_import, unicode_literals
import os
import socket
from collections import defaultdict
from contextlib import contextmanager
from kombu.exceptions import ChannelError
from kombu.five import Empty
from kombu.log import get_logger
from kombu.utils.json import loads, dumps
from kombu.utils.objects import cached_property
from . import virtual
try:
import etcd
except ImportError:
etcd = None
logger = get_logger('kombu.transport.etcd')
DEFAULT_PORT = 2379
DEFAULT_HOST = 'localhost'
class Channel(virtual.Channel):
"""Etcd Channel class which talks to the Etcd."""
prefix = 'kombu'
index = None
timeout = 10
session_ttl = 30
lock_ttl = 10
def __init__(self, *args, **kwargs):
if etcd is None:
raise ImportError('Missing python-etcd library')
super(Channel, self).__init__(*args, **kwargs)
port = self.connection.client.port or self.connection.default_port
host = self.connection.client.hostname or DEFAULT_HOST
logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout)
self.queues = defaultdict(dict)
self.client = etcd.Client(host=host, port=int(port))
def _key_prefix(self, queue):
"""Create and return the `queue` with the proper prefix.
Arguments:
queue (str): The name of the queue.
"""
return '{0}/{1}'.format(self.prefix, queue)
@contextmanager
def _queue_lock(self, queue):
"""Try to acquire a lock on the Queue.
It does so by creating a object called 'lock' which is locked by the
current session..
This way other nodes are not able to write to the lock object which
means that they have to wait before the lock is released.
Arguments:
queue (str): The name of the queue.
"""
lock = etcd.Lock(self.client, queue)
lock._uuid = self.lock_value
logger.debug('Acquiring lock {0}'.format(lock.name))
lock.acquire(blocking=True, lock_ttl=self.lock_ttl)
try:
yield
finally:
logger.debug('Releasing lock {0}'.format(lock.name))
lock.release()
def _new_queue(self, queue, **_):
"""Create a new `queue` if the `queue` doesn't already exist.
Arguments:
queue (str): The name of the queue.
"""
self.queues[queue] = queue
with self._queue_lock(queue):
try:
return self.client.write(
key=self._key_prefix(queue), dir=True, value=None)
except etcd.EtcdNotFile:
logger.debug('Queue "{0}" already exists'.format(queue))
return self.client.read(key=self._key_prefix(queue))
def _has_queue(self, queue, **kwargs):
"""Verify that queue exists.
Returns:
bool: Should return :const:`True` if the queue exists
or :const:`False` otherwise.
"""
try:
self.client.read(self._key_prefix(queue))
return True
except etcd.EtcdKeyNotFound:
return False
def _delete(self, queue, *args, **_):
"""Delete a `queue`.
Arguments:
queue (str): The name of the queue.
"""
self.queues.pop(queue, None)
self._purge(queue)
def _put(self, queue, payload, **_):
"""Put `message` onto `queue`.
This simply writes a key to the Etcd store
Arguments:
queue (str): The name of the queue.
payload (dict): Message data which will be dumped to etcd.
"""
with self._queue_lock(queue):
key = self._key_prefix(queue)
if not self.client.write(
key=key,
value=dumps(payload),
append=True):
raise ChannelError('Cannot add key {0!r} to etcd'.format(key))
def _get(self, queue, timeout=None):
"""Get the first available message from the queue.
Before it does so it acquires a lock on the store so
only one node reads at the same time. This is for read consistency
Arguments:
queue (str): The name of the queue.
timeout (int): Optional seconds to wait for a response.
"""
with self._queue_lock(queue):
key = self._key_prefix(queue)
logger.debug('Fetching key %s with index %s', key, self.index)
try:
result = self.client.read(
key=key, recursive=True,
index=self.index, timeout=self.timeout)
if result is None:
raise Empty()
item = result._children[-1]
logger.debug('Removing key {0}'.format(item['key']))
msg_content = loads(item['value'])
self.client.delete(key=item['key'])
return msg_content
except (TypeError, IndexError, etcd.EtcdException) as error:
logger.debug('_get failed: {0}:{1}'.format(type(error), error))
raise Empty()
def _purge(self, queue):
"""Remove all `message`s from a `queue`.
Arguments:
queue (str): The name of the queue.
"""
with self._queue_lock(queue):
key = self._key_prefix(queue)
logger.debug('Purging queue at key {0}'.format(key))
return self.client.delete(key=key, recursive=True)
def _size(self, queue):
"""Return the size of the `queue`.
Arguments:
queue (str): The name of the queue.
"""
with self._queue_lock(queue):
size = 0
try:
key = self._key_prefix(queue)
logger.debug('Fetching key recursively %s with index %s',
key, self.index)
result = self.client.read(
key=key, recursive=True,
index=self.index)
size = len(result._children)
except TypeError:
pass
logger.debug('Found %s keys under %s with index %s',
size, key, self.index)
return size
@cached_property
def lock_value(self):
return '{0}.{1}'.format(socket.gethostname(), os.getpid())
class Transport(virtual.Transport):
"""Etcd storage Transport for Kombu."""
Channel = Channel
default_port = DEFAULT_PORT
driver_type = 'etcd'
driver_name = 'python-etcd'
polling_interval = 3
implements = virtual.Transport.implements.extend(
exchange_type=frozenset(['direct']))
def __init__(self, *args, **kwargs):
"""Create a new instance of etcd.Transport."""
if etcd is None:
raise ImportError('Missing python-etcd library')
super(Transport, self).__init__(*args, **kwargs)
self.connection_errors = (
virtual.Transport.connection_errors + (etcd.EtcdException, )
)
self.channel_errors = (
virtual.Transport.channel_errors + (etcd.EtcdException, )
)
def verify_connection(self, connection):
"""Verify the connection works."""
port = connection.client.port or self.default_port
host = connection.client.hostname or DEFAULT_HOST
logger.debug('Verify Etcd connection to %s:%s', host, port)
try:
etcd.Client(host=host, port=int(port))
return True
except ValueError:
pass
return False
def driver_version(self):
"""Return the version of the etcd library.
.. note::
python-etcd has no __version__. This is a workaround.
"""
try:
import pip.commands.freeze
for x in pip.commands.freeze.freeze():
if x.startswith('python-etcd'):
return x.split('==')[1]
except (ImportError, IndexError):
logger.warning('Unable to find the python-etcd version.')
return 'Unknown'
|
the-stack_106_29034 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def compound_split(segment):
segment = segment.replace(".", " . ")
segment = segment.replace(",", " , ")
segment = segment.replace(":", " : ")
segment = segment.replace("!", " ! ")
segment = segment.replace("?", " ? ")
segment = segment.replace("-", " ##AT##-##AT## ")
segment = segment.replace("\"", " "e ")
segment = segment.replace("%", " % ")
return segment.split()
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i : i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
"""Computes BLEU for translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation.
Each reference should be tokenized into a list of
tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of
n-gram precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = (matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0)
else:
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1.0 / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.0
else:
bp = math.exp(1 - 1.0 / (ratio + 1e-6))
bleu = geo_mean * bp
precisions = [p * 100 for p in precisions]
return (
bleu * 100,
precisions,
bp,
ratio,
translation_length,
reference_length,
)
|
the-stack_106_29035 | from sys import path
import pygame
from pygame import rect
from pygame.sprite import Sprite
class Bullet(Sprite):
"""
The definition of bullets of the game.
Args:
setting(Settings):the basic values of the game.
screen(Any):the values and function of the game screen.
plane(Plane):values and fuctions of Plane.
"""
def __init__(self, setting,screen,plane):
super().__init__()
self.screen=screen
self.rect=pygame.Rect(0,0,setting.bullet_width,setting.bullet_height)
self.rect.centerx=plane.rect.centerx
self.rect.top=plane.rect.top
self.y=float(self.rect.y)
self.color=setting.bullet_color
self.speed=setting.bullet_speed
def update(self) :
"""
update values of the position of the bullet
Args:
y(Bullet):value of the next vertical direction of the bullet.
rect(Rect|None):value of the vertical direction of the bullet.
"""
self.y-=self.speed
self.rect.y=self.y
def draw_bullet(self):
"""
draw bullet on the screen.
"""
pygame.draw.rect(self.screen,self.color,self.rect) |
the-stack_106_29036 | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import collections
import logging
import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import cpu_count
import tempfile
from pathlib import Path
from collections import Counter
import numpy as np
import pandas as pd
import torch
from scipy.stats import mode, pearsonr
from scipy.special import softmax
from sklearn.metrics import (
confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef,
mean_squared_error,
roc_curve,
auc,
average_precision_score,
)
from torch.utils.tensorboard import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from tqdm.contrib import tenumerate
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.optimization import AdamW, Adafactor
from transformers import (
AlbertConfig,
AlbertTokenizer,
AlbertForSequenceClassification,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
BertConfig,
BertTokenizerFast,
BertForSequenceClassification,
BertweetTokenizer,
BigBirdConfig,
BigBirdTokenizer,
BigBirdForSequenceClassification,
CamembertConfig,
CamembertTokenizerFast,
CamembertForSequenceClassification,
DebertaConfig,
DebertaForSequenceClassification,
DebertaTokenizer,
DebertaV2Config,
DebertaV2ForSequenceClassification,
DebertaV2Tokenizer,
DistilBertConfig,
DistilBertTokenizerFast,
DistilBertForSequenceClassification,
ElectraConfig,
ElectraTokenizerFast,
ElectraForSequenceClassification,
FlaubertConfig,
FlaubertTokenizer,
FlaubertForSequenceClassification,
HerbertTokenizerFast,
LayoutLMConfig,
LayoutLMTokenizerFast,
LayoutLMForSequenceClassification,
LongformerConfig,
LongformerTokenizerFast,
LongformerForSequenceClassification,
MPNetConfig,
MPNetForSequenceClassification,
MPNetTokenizerFast,
MobileBertConfig,
MobileBertTokenizerFast,
MobileBertForSequenceClassification,
RobertaConfig,
RobertaTokenizerFast,
RobertaForSequenceClassification,
SqueezeBertConfig,
SqueezeBertForSequenceClassification,
SqueezeBertTokenizerFast,
WEIGHTS_NAME,
XLMConfig,
XLMRobertaConfig,
XLMRobertaTokenizerFast,
XLMRobertaForSequenceClassification,
XLMTokenizer,
XLMForSequenceClassification,
XLNetConfig,
XLNetTokenizerFast,
XLNetForSequenceClassification,
)
from transformers.convert_graph_to_onnx import convert, quantize
from simpletransformers.classification.classification_utils import (
InputExample,
LazyClassificationDataset,
ClassificationDataset,
convert_examples_to_features,
load_hf_dataset,
flatten_results,
)
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import ClassificationArgs
from simpletransformers.config.utils import sweep_config_to_sweep_values
from simpletransformers.losses.loss_utils import init_loss
# from simpletransformers.custom_models.models import ElectraForSequenceClassification
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODELS_WITHOUT_CLASS_WEIGHTS_SUPPORT = ["squeezebert", "deberta", "mpnet"]
MODELS_WITH_EXTRA_SEP_TOKEN = [
"roberta",
"camembert",
"xlmroberta",
"longformer",
"mpnet",
]
MODELS_WITH_ADD_PREFIX_SPACE = [
"roberta",
"camembert",
"xlmroberta",
"longformer",
"mpnet",
]
MODELS_WITHOUT_SLIDING_WINDOW_SUPPORT = ["squeezebert"]
class ClassificationModel:
def __init__(
self,
model_type,
model_name,
tokenizer_type=None,
tokenizer_name=None,
num_labels=None,
weight=None,
args=None,
use_cuda=True,
cuda_device=-1,
onnx_execution_provider=None,
**kwargs,
):
"""
Initializes a ClassificationModel model.
Args:
model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
tokenizer_type: The type of tokenizer (auto, bert, xlnet, xlm, roberta, distilbert, etc.) to use. If a string is passed, Simple Transformers will try to initialize a tokenizer class from the available MODEL_CLASSES.
Alternatively, a Tokenizer class (subclassed from PreTrainedTokenizer) can be passed.
tokenizer_name: The name/path to the tokenizer. If the tokenizer_type is not specified, the model_type will be used to determine the type of the tokenizer.
num_labels (optional): The number of labels or classes in the dataset.
weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
onnx_execution_provider (optional): ExecutionProvider to use with ONNX Runtime. Will use CUDA (if use_cuda) or CPU (if use_cuda is False) by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"auto": (AutoConfig, AutoModelForSequenceClassification, AutoTokenizer),
"bert": (BertConfig, BertForSequenceClassification, BertTokenizerFast),
"bertweet": (
RobertaConfig,
RobertaForSequenceClassification,
BertweetTokenizer,
),
"bigbird": (
BigBirdConfig,
BigBirdForSequenceClassification,
BigBirdTokenizer,
),
"camembert": (
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizerFast,
),
"deberta": (
DebertaConfig,
DebertaForSequenceClassification,
DebertaTokenizer,
),
"debertav2": (
DebertaV2Config,
DebertaV2ForSequenceClassification,
DebertaV2Tokenizer,
),
"distilbert": (
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizerFast,
),
"electra": (
ElectraConfig,
ElectraForSequenceClassification,
ElectraTokenizerFast,
),
"flaubert": (
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
),
"herbert": (
BertConfig,
BertForSequenceClassification,
HerbertTokenizerFast,
),
"layoutlm": (
LayoutLMConfig,
LayoutLMForSequenceClassification,
LayoutLMTokenizerFast,
),
"longformer": (
LongformerConfig,
LongformerForSequenceClassification,
LongformerTokenizerFast,
),
"mobilebert": (
MobileBertConfig,
MobileBertForSequenceClassification,
MobileBertTokenizerFast,
),
"mpnet": (MPNetConfig, MPNetForSequenceClassification, MPNetTokenizerFast),
"roberta": (
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizerFast,
),
"squeezebert": (
SqueezeBertConfig,
SqueezeBertForSequenceClassification,
SqueezeBertTokenizerFast,
),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"xlmroberta": (
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizerFast,
),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizerFast),
}
self.args = self._load_model_args(model_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ClassificationArgs):
self.args = args
if (
model_type in MODELS_WITHOUT_SLIDING_WINDOW_SUPPORT
and self.args.sliding_window
):
raise ValueError(
"{} does not currently support sliding window".format(model_type)
)
if self.args.thread_count:
torch.set_num_threads(self.args.thread_count)
if "sweep_config" in kwargs:
self.is_sweeping = True
sweep_config = kwargs.pop("sweep_config")
sweep_values = sweep_config_to_sweep_values(sweep_config)
self.args.update_from_dict(sweep_values)
else:
self.is_sweeping = False
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if self.args.labels_list:
if num_labels:
assert num_labels == len(self.args.labels_list)
if self.args.labels_map:
try:
assert list(self.args.labels_map.keys()) == self.args.labels_list
except AssertionError:
assert [
int(key) for key in list(self.args.labels_map.keys())
] == self.args.labels_list
self.args.labels_map = {
int(key): value for key, value in self.args.labels_map.items()
}
else:
self.args.labels_map = {
label: i for i, label in enumerate(self.args.labels_list)
}
else:
len_labels_list = 2 if not num_labels else num_labels
self.args.labels_list = [i for i in range(len_labels_list)]
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
if tokenizer_type is not None:
if isinstance(tokenizer_type, str):
_, _, tokenizer_class = MODEL_CLASSES[tokenizer_type]
else:
tokenizer_class = tokenizer_type
if num_labels:
self.config = config_class.from_pretrained(
model_name, num_labels=num_labels, **self.args.config
)
self.num_labels = num_labels
else:
self.config = config_class.from_pretrained(model_name, **self.args.config)
self.num_labels = self.config.num_labels
if model_type in MODELS_WITHOUT_CLASS_WEIGHTS_SUPPORT and weight is not None:
raise ValueError(
"{} does not currently support class weights".format(model_type)
)
else:
self.weight = weight
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
self.loss_fct = init_loss(
weight=self.weight, device=self.device, args=self.args
)
if self.args.onnx:
from onnxruntime import InferenceSession, SessionOptions
if not onnx_execution_provider:
onnx_execution_provider = (
"CUDAExecutionProvider" if use_cuda else "CPUExecutionProvider"
)
options = SessionOptions()
if self.args.dynamic_quantize:
model_path = quantize(Path(os.path.join(model_name, "onnx_model.onnx")))
self.model = InferenceSession(
model_path.as_posix(), options, providers=[onnx_execution_provider]
)
else:
model_path = os.path.join(model_name, "onnx_model.onnx")
self.model = InferenceSession(
model_path, options, providers=[onnx_execution_provider]
)
else:
if not self.args.quantized_model:
self.model = model_class.from_pretrained(
model_name, config=self.config, **kwargs
)
else:
quantized_weights = torch.load(
os.path.join(model_name, "pytorch_model.bin")
)
self.model = model_class.from_pretrained(
None, config=self.config, state_dict=quantized_weights
)
if self.args.dynamic_quantize:
self.model = torch.quantization.quantize_dynamic(
self.model, {torch.nn.Linear}, dtype=torch.qint8
)
if self.args.quantized_model:
self.model.load_state_dict(quantized_weights)
if self.args.dynamic_quantize:
self.args.quantized_model = True
self.results = {}
if not use_cuda:
self.args.fp16 = False
if self.args.fp16:
try:
from torch.cuda import amp
except AttributeError:
raise AttributeError(
"fp16 requires Pytorch >= 1.6. Please update Pytorch or turn off fp16."
)
if tokenizer_name is None:
tokenizer_name = model_name
if tokenizer_name in [
"vinai/bertweet-base",
"vinai/bertweet-covid19-base-cased",
"vinai/bertweet-covid19-base-uncased",
]:
self.tokenizer = tokenizer_class.from_pretrained(
tokenizer_name,
do_lower_case=self.args.do_lower_case,
normalization=True,
**kwargs,
)
else:
self.tokenizer = tokenizer_class.from_pretrained(
tokenizer_name, do_lower_case=self.args.do_lower_case, **kwargs
)
if self.args.special_tokens_list:
self.tokenizer.add_tokens(
self.args.special_tokens_list, special_tokens=True
)
self.model.resize_token_embeddings(len(self.tokenizer))
self.args.model_name = model_name
self.args.model_type = model_type
self.args.tokenizer_name = tokenizer_name
self.args.tokenizer_type = tokenizer_type
if model_type in ["camembert", "xlmroberta"]:
warnings.warn(
f"use_multiprocessing automatically disabled as {model_type}"
" fails when using multiprocessing for feature conversion."
)
self.args.use_multiprocessing = False
if self.args.wandb_project and not wandb_available:
warnings.warn(
"wandb_project specified but wandb is not available. Wandb disabled."
)
self.args.wandb_project = None
def train_model(
self,
train_df,
multi_label=False,
output_dir=None,
show_running_loss=True,
args=None,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_df'
Args:
train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
global_step: Number of global steps trained
training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.silent:
show_running_loss = False
if self.args.evaluate_during_training and eval_df is None:
raise ValueError(
"evaluate_during_training is enabled but eval_df is not specified."
" Pass eval_df to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if (
os.path.exists(output_dir)
and os.listdir(output_dir)
and not self.args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set overwrite_output_dir: True to automatically overwrite.".format(
output_dir
)
)
self._move_model_to_device()
if self.args.use_hf_datasets:
if self.args.sliding_window:
raise ValueError(
"HuggingFace Datasets cannot be used with sliding window."
)
if self.args.model_type == "layoutlm":
raise NotImplementedError(
"HuggingFace Datasets support is not implemented for LayoutLM models"
)
train_dataset = load_hf_dataset(
train_df, self.tokenizer, self.args, multi_label=multi_label
)
elif isinstance(train_df, str) and self.args.lazy_loading:
if self.args.sliding_window:
raise ValueError("Lazy loading cannot be used with sliding window.")
if self.args.model_type == "layoutlm":
raise NotImplementedError(
"Lazy loading is not implemented for LayoutLM models"
)
train_dataset = LazyClassificationDataset(
train_df, self.tokenizer, self.args
)
else:
if self.args.lazy_loading:
raise ValueError(
"Input must be given as a path to a file when using lazy loading"
)
if "text" in train_df.columns and "labels" in train_df.columns:
if self.args.model_type == "layoutlm":
train_examples = [
InputExample(i, text, None, label, x0, y0, x1, y1)
for i, (text, label, x0, y0, x1, y1) in enumerate(
zip(
train_df["text"].astype(str),
train_df["labels"],
train_df["x0"],
train_df["y0"],
train_df["x1"],
train_df["y1"],
)
)
]
else:
train_examples = (
train_df["text"].astype(str).tolist(),
train_df["labels"].tolist(),
)
elif "text_a" in train_df.columns and "text_b" in train_df.columns:
if self.args.model_type == "layoutlm":
raise ValueError("LayoutLM cannot be used with sentence-pair tasks")
else:
train_examples = (
train_df["text_a"].astype(str).tolist(),
train_df["text_b"].astype(str).tolist(),
train_df["labels"].tolist(),
)
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
train_examples = (
train_df.iloc[:, 0].astype(str).tolist(),
train_df.iloc[:, 1].tolist(),
)
train_dataset = self.load_and_cache_examples(
train_examples, verbose=verbose
)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=self.args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
os.makedirs(output_dir, exist_ok=True)
global_step, training_details = self.train(
train_dataloader,
output_dir,
multi_label=multi_label,
show_running_loss=show_running_loss,
eval_df=eval_df,
verbose=verbose,
**kwargs,
)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
self.save_model(model=self.model)
if verbose:
logger.info(
" Training of {} model complete. Saved to {}.".format(
self.args.model_type, output_dir
)
)
return global_step, training_details
def train(
self,
train_dataloader,
output_dir,
multi_label=False,
show_running_loss=True,
eval_df=None,
test_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(log_dir=args.tensorboard_dir)
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [
p for n, p in model.named_parameters() if n in params
]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names
and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names
and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = (
warmup_steps if args.warmup_steps == 0 else args.warmup_steps
)
if args.optimizer == "AdamW":
optimizer = AdamW(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adam_epsilon,
)
elif args.optimizer == "Adafactor":
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adafactor_eps,
clip_threshold=args.adafactor_clip_threshold,
decay_rate=args.adafactor_decay_rate,
beta1=args.adafactor_beta1,
weight_decay=args.weight_decay,
scale_parameter=args.adafactor_scale_parameter,
relative_step=args.adafactor_relative_step,
warmup_init=args.adafactor_warmup_init,
)
print("Using Adafactor for T5")
else:
raise ValueError(
"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.".format(
args.optimizer
)
)
if args.scheduler == "constant_schedule":
scheduler = get_constant_schedule(optimizer)
elif args.scheduler == "constant_schedule_with_warmup":
scheduler = get_constant_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps
)
elif args.scheduler == "linear_schedule_with_warmup":
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
)
elif args.scheduler == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "cosine_with_hard_restarts_schedule_with_warmup":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "polynomial_decay_schedule_with_warmup":
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
lr_end=args.polynomial_decay_schedule_lr_end,
power=args.polynomial_decay_schedule_power,
)
else:
raise ValueError("{} is not a valid scheduler.".format(args.scheduler))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0
)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
current_loss = "Initializing"
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (
len(train_dataloader) // args.gradient_accumulation_steps
)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(
" Continuing training from checkpoint, will skip to saved global_step"
)
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the current epoch",
steps_trained_in_current_epoch,
)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(
multi_label, **kwargs
)
if args.wandb_project:
if not wandb.setup().settings.sweep_id:
logger.info(" Initializing WandB run for training.")
wandb.init(
project=args.wandb_project,
config={**asdict(args)},
**args.wandb_kwargs,
)
wandb.run._label(repo="simpletransformers")
self.wandb_run_id = wandb.run.id
wandb.watch(self.model)
if self.args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for _ in train_iterator:
model.train()
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(
f"Epoch {epoch_number + 1} of {args.num_train_epochs}"
)
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if self.args.fp16:
with amp.autocast():
loss, *_ = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
else:
loss, *_ = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
if args.n_gpu > 1:
loss = (
loss.mean()
) # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if self.args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if self.args.fp16:
scaler.unscale_(optimizer)
if args.optimizer == "AdamW":
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
if self.args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar(
"lr", scheduler.get_last_lr()[0], global_step
)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir_current = os.path.join(
output_dir, "checkpoint-{}".format(global_step)
)
self.save_model(
output_dir_current, optimizer, scheduler, model=model
)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
output_dir_current = os.path.join(
output_dir, "checkpoint-{}".format(global_step)
)
if args.save_eval_checkpoints:
self.save_model(
output_dir_current,
optimizer,
scheduler,
model=model,
results=results,
)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
if test_df is not None:
test_results, _, _ = self.eval_model(
test_df,
verbose=verbose
and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
for key in test_results:
training_progress_scores["test_" + key].append(
test_results[key]
)
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(
args.output_dir, "training_progress_scores.csv"
),
index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores))
for key, value in flatten_results(
self._get_last_metrics(training_progress_scores)
):
try:
tb_writer.add_scalar(key, value, global_step)
except (NotImplementedError, AssertionError):
if verbose:
logger.warning(
f"can't log value of type: {type(value)} to tensorboar"
)
tb_writer.flush()
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir,
optimizer,
scheduler,
model=model,
results=results,
)
if best_eval_metric and args.early_stopping_metric_minimize:
if (
best_eval_metric - results[args.early_stopping_metric]
> args.early_stopping_delta
):
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir,
optimizer,
scheduler,
model=model,
results=results,
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if (
early_stopping_counter
< args.early_stopping_patience
):
early_stopping_counter += 1
if verbose:
logger.info(
f" No improvement in {args.early_stopping_metric}"
)
logger.info(
f" Current step: {early_stopping_counter}"
)
logger.info(
f" Early stopping patience: {args.early_stopping_patience}"
)
else:
if verbose:
logger.info(
f" Patience of {args.early_stopping_patience} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
else:
if (
results[args.early_stopping_metric] - best_eval_metric
> args.early_stopping_delta
):
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir,
optimizer,
scheduler,
model=model,
results=results,
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if (
early_stopping_counter
< args.early_stopping_patience
):
early_stopping_counter += 1
if verbose:
logger.info(
f" No improvement in {args.early_stopping_metric}"
)
logger.info(
f" Current step: {early_stopping_counter}"
)
logger.info(
f" Early stopping patience: {args.early_stopping_patience}"
)
else:
if verbose:
logger.info(
f" Patience of {args.early_stopping_patience} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
model.train()
epoch_number += 1
output_dir_current = os.path.join(
output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number)
)
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
self.save_model(
output_dir_current, optimizer, scheduler, results=results
)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
if test_df is not None:
test_results, _, _ = self.eval_model(
test_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
for key in test_results:
training_progress_scores["test_" + key].append(
test_results[key]
)
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"),
index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores))
for key, value in flatten_results(
self._get_last_metrics(training_progress_scores)
):
try:
tb_writer.add_scalar(key, value, global_step)
except (NotImplementedError, AssertionError):
if verbose:
logger.warning(
f"can't log value of type: {type(value)} to tensorboar"
)
tb_writer.flush()
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir,
optimizer,
scheduler,
model=model,
results=results,
)
if best_eval_metric and args.early_stopping_metric_minimize:
if (
best_eval_metric - results[args.early_stopping_metric]
> args.early_stopping_delta
):
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir,
optimizer,
scheduler,
model=model,
results=results,
)
early_stopping_counter = 0
else:
if (
args.use_early_stopping
and args.early_stopping_consider_epochs
):
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(
f" No improvement in {args.early_stopping_metric}"
)
logger.info(
f" Current step: {early_stopping_counter}"
)
logger.info(
f" Early stopping patience: {args.early_stopping_patience}"
)
else:
if verbose:
logger.info(
f" Patience of {args.early_stopping_patience} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
else:
if (
results[args.early_stopping_metric] - best_eval_metric
> args.early_stopping_delta
):
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir,
optimizer,
scheduler,
model=model,
results=results,
)
early_stopping_counter = 0
else:
if (
args.use_early_stopping
and args.early_stopping_consider_epochs
):
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(
f" No improvement in {args.early_stopping_metric}"
)
logger.info(
f" Current step: {early_stopping_counter}"
)
logger.info(
f" Early stopping patience: {args.early_stopping_patience}"
)
else:
if verbose:
logger.info(
f" Patience of {args.early_stopping_patience} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
def eval_model(
self,
eval_df,
multi_label=False,
output_dir=None,
verbose=True,
silent=False,
wandb_log=True,
**kwargs,
):
"""
Evaluates the model on eval_df. Saves results to output_dir.
Args:
eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
wandb_log: If True, evaluation results will be logged to wandb.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results.
model_outputs: List of model outputs for each row in eval_df
wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
result, model_outputs, wrong_preds = self.evaluate(
eval_df,
output_dir,
multi_label=multi_label,
verbose=verbose,
silent=silent,
wandb_log=wandb_log,
**kwargs,
)
self.results.update(result)
if verbose:
logger.info(self.results)
return result, model_outputs, wrong_preds
def evaluate(
self,
eval_df,
output_dir,
multi_label=False,
prefix="",
verbose=True,
silent=False,
wandb_log=True,
**kwargs,
):
"""
Evaluates the model on eval_df.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
if self.args.use_hf_datasets:
if self.args.sliding_window:
raise ValueError(
"HuggingFace Datasets cannot be used with sliding window."
)
if self.args.model_type == "layoutlm":
raise NotImplementedError(
"HuggingFace Datasets support is not implemented for LayoutLM models"
)
eval_dataset = load_hf_dataset(
eval_df, self.tokenizer, self.args, multi_label=multi_label
)
eval_examples = None
elif isinstance(eval_df, str) and self.args.lazy_loading:
if self.args.model_type == "layoutlm":
raise NotImplementedError(
"Lazy loading is not implemented for LayoutLM models"
)
eval_dataset = LazyClassificationDataset(eval_df, self.tokenizer, self.args)
eval_examples = None
else:
if self.args.lazy_loading:
raise ValueError(
"Input must be given as a path to a file when using lazy loading"
)
if "text" in eval_df.columns and "labels" in eval_df.columns:
if self.args.model_type == "layoutlm":
eval_examples = [
InputExample(i, text, None, label, x0, y0, x1, y1)
for i, (text, label, x0, y0, x1, y1) in enumerate(
zip(
eval_df["text"].astype(str),
eval_df["labels"],
eval_df["x0"],
eval_df["y0"],
eval_df["x1"],
eval_df["y1"],
)
)
]
else:
eval_examples = (
eval_df["text"].astype(str).tolist(),
eval_df["labels"].tolist(),
)
elif "text_a" in eval_df.columns and "text_b" in eval_df.columns:
if self.args.model_type == "layoutlm":
raise ValueError("LayoutLM cannot be used with sentence-pair tasks")
else:
eval_examples = (
eval_df["text_a"].astype(str).tolist(),
eval_df["text_b"].astype(str).tolist(),
eval_df["labels"].tolist(),
)
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
eval_examples = (
eval_df.iloc[:, 0].astype(str).tolist(),
eval_df.iloc[:, 1].tolist(),
)
if args.sliding_window:
eval_dataset, window_counts = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(eval_output_dir, exist_ok=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
n_batches = len(eval_dataloader)
preds = np.empty((len(eval_dataset), self.num_labels))
if multi_label:
out_label_ids = np.empty((len(eval_dataset), self.num_labels))
else:
out_label_ids = np.empty((len(eval_dataset)))
model.eval()
if self.args.fp16:
from torch.cuda import amp
for i, batch in enumerate(
tqdm(
eval_dataloader,
disable=args.silent or silent,
desc="Running Evaluation",
)
):
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
if self.args.fp16:
with amp.autocast():
outputs = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
tmp_eval_loss, logits = outputs[:2]
else:
outputs = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
if self.args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
start_index = self.args.eval_batch_size * i
end_index = (
start_index + self.args.eval_batch_size
if i != (n_batches - 1)
else len(eval_dataset)
)
preds[start_index:end_index] = logits.detach().cpu().numpy()
out_label_ids[start_index:end_index] = (
inputs["labels"].detach().cpu().numpy()
)
# if preds is None:
# preds = logits.detach().cpu().numpy()
# out_label_ids = inputs["labels"].detach().cpu().numpy()
# else:
# preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.sliding_window:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [
preds[window_range[0] : window_range[1]]
for window_range in window_ranges
]
out_label_ids = [
out_label_ids[i]
for i in range(len(out_label_ids))
if i in [window[0] for window in window_ranges]
]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
val_freqs_desc = Counter(pred_row).most_common()
if (
len(val_freqs_desc) > 1
and val_freqs_desc[0][1] == val_freqs_desc[1][1]
):
final_preds.append(args.tie_value)
else:
final_preds.append(val_freqs_desc[0][0])
preds = np.array(final_preds)
elif not multi_label and args.regression is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if not multi_label:
preds = np.argmax(preds, axis=1)
result, wrong = self.compute_metrics(
preds, model_outputs, out_label_ids, eval_examples, **kwargs
)
result["eval_loss"] = eval_loss
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
if (
self.args.wandb_project
and wandb_log
and not multi_label
and not self.args.regression
):
if not wandb.setup().settings.sweep_id:
logger.info(" Initializing WandB run for evaluation.")
wandb.init(
project=args.wandb_project,
config={**asdict(args)},
**args.wandb_kwargs,
)
wandb.run._label(repo="simpletransformers")
if not args.labels_map:
self.args.labels_map = {i: i for i in range(self.num_labels)}
labels_list = sorted(list(self.args.labels_map.keys()))
inverse_labels_map = {
value: key for key, value in self.args.labels_map.items()
}
truth = [inverse_labels_map[out] for out in out_label_ids]
# Confusion Matrix
wandb.sklearn.plot_confusion_matrix(
truth,
[inverse_labels_map[pred] for pred in preds],
labels=labels_list,
)
if not self.args.sliding_window:
# ROC`
wandb.log({"roc": wandb.plots.ROC(truth, model_outputs, labels_list)})
# Precision Recall
wandb.log(
{
"pr": wandb.plots.precision_recall(
truth, model_outputs, labels_list
)
}
)
return results, model_outputs, wrong
def load_and_cache_examples(
self,
examples,
evaluate=False,
no_cache=False,
multi_label=False,
verbose=True,
silent=False,
):
"""
Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
process_count = self.args.process_count
tokenizer = self.tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not multi_label and args.regression:
output_mode = "regression"
else:
output_mode = "classification"
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
if args.sliding_window or self.args.model_type == "layoutlm":
cached_features_file = os.path.join(
args.cache_dir,
"cached_{}_{}_{}_{}_{}".format(
mode,
args.model_type,
args.max_seq_length,
self.num_labels,
len(examples),
),
)
if os.path.exists(cached_features_file) and (
(not args.reprocess_input_data and not no_cache)
or (mode == "dev" and args.use_cached_eval_features and not no_cache)
):
features = torch.load(cached_features_file)
if verbose:
logger.info(
f" Features loaded from cache at {cached_features_file}"
)
else:
if verbose:
logger.info(" Converting to features started. Cache is not used.")
if args.sliding_window:
logger.info(" Sliding window enabled")
if self.args.model_type != "layoutlm":
if len(examples) == 3:
examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(zip(*examples))
]
else:
examples = [
InputExample(i, text_a, None, label)
for i, (text_a, label) in enumerate(zip(*examples))
]
# If labels_map is defined, then labels need to be replaced with ints
if self.args.labels_map and not self.args.regression:
for example in examples:
if multi_label:
example.label = [
self.args.labels_map[label] for label in example.label
]
else:
example.label = self.args.labels_map[example.label]
features = convert_examples_to_features(
examples,
args.max_seq_length,
tokenizer,
output_mode,
# XLNet has a CLS token at the end
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
# RoBERTa uses an extra separator b/w pairs of sentences,
# cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
sep_token_extra=args.model_type in MODELS_WITH_EXTRA_SEP_TOKEN,
# PAD on the left for XLNet
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
process_count=process_count,
multi_label=multi_label,
silent=args.silent or silent,
use_multiprocessing=args.use_multiprocessing_for_evaluation,
sliding_window=args.sliding_window,
flatten=not evaluate,
stride=args.stride,
add_prefix_space=args.model_type in MODELS_WITH_ADD_PREFIX_SPACE,
# avoid padding in case of single example/online inferencing to decrease execution time
pad_to_max_length=bool(len(examples) > 1),
args=args,
)
if verbose and args.sliding_window:
logger.info(
f" {len(features)} features created from {len(examples)} samples."
)
if not no_cache:
torch.save(features, cached_features_file)
if args.sliding_window and evaluate:
features = [
[feature_set] if not isinstance(feature_set, list) else feature_set
for feature_set in features
]
window_counts = [len(sample) for sample in features]
features = [
feature for feature_set in features for feature in feature_set
]
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long
)
all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long
)
if self.args.model_type == "layoutlm":
all_bboxes = torch.tensor(
[f.bboxes for f in features], dtype=torch.long
)
if output_mode == "classification":
all_label_ids = torch.tensor(
[f.label_id for f in features], dtype=torch.long
)
elif output_mode == "regression":
all_label_ids = torch.tensor(
[f.label_id for f in features], dtype=torch.float
)
if self.args.model_type == "layoutlm":
dataset = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_label_ids,
all_bboxes,
)
else:
dataset = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids
)
if args.sliding_window and evaluate:
return dataset, window_counts
else:
return dataset
else:
dataset = ClassificationDataset(
examples,
self.tokenizer,
self.args,
mode=mode,
multi_label=multi_label,
output_mode=output_mode,
no_cache=no_cache,
)
return dataset
def compute_metrics(
self,
preds,
model_outputs,
labels,
eval_examples=None,
multi_label=False,
**kwargs,
):
"""
Computes the evaluation metrics for the model predictions.
Args:
preds: Model predictions
model_outputs: Model outputs
labels: Ground truth labels
eval_examples: List of examples on which evaluation was performed
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results.
For non-binary classification, the dictionary format is: (Matthews correlation coefficient, tp, tn, fp, fn).
For binary classification, the dictionary format is: (Matthews correlation coefficient, tp, tn, fp, fn, AUROC, AUPRC).
wrong: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
assert len(preds) == len(labels)
extra_metrics = {}
for metric, func in kwargs.items():
if metric.startswith("prob_"):
extra_metrics[metric] = func(labels, model_outputs)
else:
extra_metrics[metric] = func(labels, preds)
if multi_label:
threshold_values = self.args.threshold if self.args.threshold else 0.5
if isinstance(threshold_values, list):
mismatched = labels != [
[
self._threshold(pred, threshold_values[i])
for i, pred in enumerate(example)
]
for example in preds
]
else:
mismatched = labels != [
[self._threshold(pred, threshold_values) for pred in example]
for example in preds
]
else:
mismatched = labels != preds
if eval_examples:
wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]
else:
wrong = ["NA"]
if multi_label:
label_ranking_score = label_ranking_average_precision_score(labels, preds)
return {**{"LRAP": label_ranking_score}, **extra_metrics}, wrong
elif self.args.regression:
return {**extra_metrics}, wrong
mcc = matthews_corrcoef(labels, preds)
if self.model.num_labels == 2:
tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()
if self.args.sliding_window:
return (
{
**{"mcc": mcc, "tp": tp, "tn": tn, "fp": fp, "fn": fn},
**extra_metrics,
},
wrong,
)
else:
scores = np.array([softmax(element)[1] for element in model_outputs])
fpr, tpr, thresholds = roc_curve(labels, scores)
auroc = auc(fpr, tpr)
auprc = average_precision_score(labels, scores)
return (
{
**{
"mcc": mcc,
"tp": tp,
"tn": tn,
"fp": fp,
"fn": fn,
"auroc": auroc,
"auprc": auprc,
},
**extra_metrics,
},
wrong,
)
else:
return {**{"mcc": mcc}, **extra_metrics}, wrong
def predict(self, to_predict, multi_label=False):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction.
Returns:
preds: A python list of the predictions (0 or 1) for each text.
model_outputs: A python list of the raw model outputs for each text.
"""
model = self.model
args = self.args
eval_loss = 0.0
nb_eval_steps = 0
preds = np.empty((len(to_predict), self.num_labels))
if multi_label:
out_label_ids = np.empty((len(to_predict), self.num_labels))
else:
out_label_ids = np.empty((len(to_predict)))
if not multi_label and self.args.onnx:
model_inputs = self.tokenizer.batch_encode_plus(
to_predict, return_tensors="pt", padding=True, truncation=True
)
if self.args.model_type in ["bert", "xlnet", "albert", "layoutlm"]:
for i, (input_ids, attention_mask, token_type_ids) in enumerate(
zip(
model_inputs["input_ids"],
model_inputs["attention_mask"],
model_inputs["token_type_ids"],
)
):
input_ids = input_ids.unsqueeze(0).detach().cpu().numpy()
attention_mask = attention_mask.unsqueeze(0).detach().cpu().numpy()
token_type_ids = token_type_ids.unsqueeze(0).detach().cpu().numpy()
inputs_onnx = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
# Run the model (None = get all the outputs)
output = self.model.run(None, inputs_onnx)
preds[i] = output[0]
else:
for i, (input_ids, attention_mask) in enumerate(
zip(model_inputs["input_ids"], model_inputs["attention_mask"])
):
input_ids = input_ids.unsqueeze(0).detach().cpu().numpy()
attention_mask = attention_mask.unsqueeze(0).detach().cpu().numpy()
inputs_onnx = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
# Run the model (None = get all the outputs)
output = self.model.run(None, inputs_onnx)
preds[i] = output[0]
model_outputs = preds
preds = np.argmax(preds, axis=1)
else:
self._move_model_to_device()
dummy_label = (
0
if not self.args.labels_map
else next(iter(self.args.labels_map.keys()))
)
if multi_label:
dummy_label = [dummy_label for i in range(self.num_labels)]
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if isinstance(to_predict[0], list):
eval_examples = (
*zip(*to_predict),
[dummy_label for i in range(len(to_predict))],
)
else:
eval_examples = (
to_predict,
[dummy_label for i in range(len(to_predict))],
)
if args.sliding_window:
eval_dataset, window_counts = self.load_and_cache_examples(
eval_examples, evaluate=True, no_cache=True
)
preds = np.empty((len(eval_dataset), self.num_labels))
if multi_label:
out_label_ids = np.empty((len(eval_dataset), self.num_labels))
else:
out_label_ids = np.empty((len(eval_dataset)))
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, multi_label=multi_label, no_cache=True
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
if self.args.fp16:
from torch.cuda import amp
if self.config.output_hidden_states:
model.eval()
preds = None
out_label_ids = None
for i, batch in enumerate(
tqdm(
eval_dataloader, disable=args.silent, desc="Running Prediction"
)
):
# batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch, no_hf=True)
if self.args.fp16:
with amp.autocast():
outputs = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
tmp_eval_loss, logits = outputs[:2]
else:
outputs = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
tmp_eval_loss, logits = outputs[:2]
embedding_outputs, layer_hidden_states = (
outputs[2][0],
outputs[2][1:],
)
if multi_label:
logits = logits.sigmoid()
if self.args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
all_layer_hidden_states = np.array(
[
state.detach().cpu().numpy()
for state in layer_hidden_states
]
)
all_embedding_outputs = embedding_outputs.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids,
inputs["labels"].detach().cpu().numpy(),
axis=0,
)
all_layer_hidden_states = np.append(
all_layer_hidden_states,
np.array(
[
state.detach().cpu().numpy()
for state in layer_hidden_states
]
),
axis=1,
)
all_embedding_outputs = np.append(
all_embedding_outputs,
embedding_outputs.detach().cpu().numpy(),
axis=0,
)
else:
n_batches = len(eval_dataloader)
for i, batch in enumerate(tqdm(eval_dataloader, disable=args.silent)):
model.eval()
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch, no_hf=True)
if self.args.fp16:
with amp.autocast():
outputs = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
tmp_eval_loss, logits = outputs[:2]
else:
outputs = self._calculate_loss(
model,
inputs,
loss_fct=self.loss_fct,
num_labels=self.num_labels,
args=self.args,
)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
if self.args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
start_index = self.args.eval_batch_size * i
end_index = (
start_index + self.args.eval_batch_size
if i != (n_batches - 1)
else len(eval_dataset)
)
preds[start_index:end_index] = logits.detach().cpu().numpy()
out_label_ids[start_index:end_index] = (
inputs["labels"].detach().cpu().numpy()
)
eval_loss = eval_loss / nb_eval_steps
if args.sliding_window:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [
preds[window_range[0] : window_range[1]]
for window_range in window_ranges
]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args.tie_value)
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args.regression is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if multi_label:
if isinstance(args.threshold, list):
threshold_values = args.threshold
preds = [
[
self._threshold(pred, threshold_values[i])
for i, pred in enumerate(example)
]
for example in preds
]
else:
preds = [
[self._threshold(pred, args.threshold) for pred in example]
for example in preds
]
else:
preds = np.argmax(preds, axis=1)
if self.args.labels_map and not self.args.regression:
inverse_labels_map = {
value: key for key, value in self.args.labels_map.items()
}
preds = [inverse_labels_map[pred] for pred in preds]
if self.config.output_hidden_states:
return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states
else:
return preds, model_outputs
def convert_to_onnx(self, output_dir=None, set_onnx_arg=True):
"""Convert the model to ONNX format and save to output_dir
Args:
output_dir (str, optional): If specified, ONNX model will be saved to output_dir (else args.output_dir will be used). Defaults to None.
set_onnx_arg (bool, optional): Updates the model args to set onnx=True. Defaults to True.
""" # noqa
if not output_dir:
output_dir = os.path.join(self.args.output_dir, "onnx")
os.makedirs(output_dir, exist_ok=True)
if os.listdir(output_dir):
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Output directory for onnx conversion must be empty.".format(
output_dir
)
)
onnx_model_name = os.path.join(output_dir, "onnx_model.onnx")
with tempfile.TemporaryDirectory() as temp_dir:
self.save_model(output_dir=temp_dir, model=self.model)
convert(
framework="pt",
model=temp_dir,
tokenizer=self.tokenizer,
output=Path(onnx_model_name),
pipeline_name="sentiment-analysis",
opset=11,
)
self.args.onnx = True
self.tokenizer.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
self.save_model_args(output_dir)
def _calculate_loss(self, model, inputs, loss_fct, num_labels, args):
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if loss_fct:
logits = outputs[1]
labels = inputs["labels"]
loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))
return (loss, *outputs[1:])
def _threshold(self, x, threshold):
if x >= threshold:
return 1
return 0
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch, no_hf=False):
if self.args.use_hf_datasets and not no_hf:
return {key: value.to(self.device) for key, value in batch.items()}
if isinstance(batch[0], dict):
inputs = {
key: value.squeeze(1).to(self.device) for key, value in batch[0].items()
}
inputs["labels"] = batch[1].to(self.device)
else:
batch = tuple(t.to(self.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
# XLM, DistilBERT and RoBERTa don't use segment_ids
if self.args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2]
if self.args.model_type in ["bert", "xlnet", "albert", "layoutlm"]
else None
)
if self.args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
return inputs
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _create_training_progress_scores(self, multi_label, **kwargs):
return collections.defaultdict(list)
"""extra_metrics = {key: [] for key in kwargs}
if multi_label:
training_progress_scores = {
"global_step": [],
"LRAP": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
if self.model.num_labels == 2:
if self.args.sliding_window:
training_progress_scores = {
"global_step": [],
"tp": [],
"tn": [],
"fp": [],
"fn": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
training_progress_scores = {
"global_step": [],
"tp": [],
"tn": [],
"fp": [],
"fn": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
"auroc": [],
"auprc": [],
**extra_metrics,
}
elif self.model.num_labels == 1:
training_progress_scores = {
"global_step": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
training_progress_scores = {
"global_step": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
return training_progress_scores"""
def save_model(
self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None
):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
if model and not self.args.no_save:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(
optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")
)
torch.save(
scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")
)
self.save_model_args(output_dir)
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = ClassificationArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
|
the-stack_106_29038 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import logging
import random
import sys
import threading
import time
from base64 import b64decode, b64encode
from typing import Optional
import etcd
from torch.distributed import Store, TCPStore, register_rendezvous_handler
from torchelastic.rendezvous import (
RendezvousClosedException,
RendezvousHandler,
RendezvousNonRetryableError,
RendezvousTimeoutException,
)
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class CustomRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class CustomRendezvousRetryImmediately(Exception):
pass
# Default overall timeout for rendezvous barrier.
CONST_DEFAULT_OVERALL_TIMEOUT = 600
# Additional waiting amount after reaching num_min_workers,
# for the case rendezvous is elastic (min != max):
CONST_DEFAULT_LAST_CALL_TIMEOUT = 30
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
# Delay (sleep) for a small random amount to reduce CAS failures.
# This does not affect correctness, but will reduce requests to etcd server.
def cas_delay():
time.sleep(random.uniform(0, 0.1))
class CustomRendezvousHandler(RendezvousHandler):
"""
Implements a :py:class:`torchelastic.rendezvous.RendezvousHandler`
interface backed by
:py:class:`torchelastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
Torchelastic uses a URL to configure the type of rendezvous to use and
to pass implementation specific configurations to the rendezvous module.
The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g.ย does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - torchelastic starts running the job as long as the
cluster size is greater than or equal to ``min_workers`` and admits
up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (โlast callโ) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
def get_run_id(self) -> str:
return self._rdzv_impl._run_id
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class CustomRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
endpoints,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
**kwargs,
):
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
self.client = etcd.Client(host=endpoints, allow_reconnect=True, **kwargs)
log.info("Etcd machines: " + str(self.client.machines))
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutException - timeout waiting for rendezvous
RendezvousNonRetryableError - other persistent errors that
render the rendezvous non-retryable
RendezvousClosedException - rendezvous is or was closed while
waiting
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except CustomRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except CustomRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutException:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedException:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousNonRetryableError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedException - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedException()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise CustomRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise CustomRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousNonRetryableError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousNonRetryableError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise CustomRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise CustomRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
else:
set_ttl = None
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise CustomRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise CustomRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise CustomRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise CustomRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise CustomRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise CustomRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return CustomStore(etcd_client=self.client, etcd_store_prefix=store_path)
# pyre-fixme[11]: Annotation `Store` is not defined as a type.
class CustomStore(Store):
"""
Implements a c10 Store interface by piggybacking on the rendezvous etcd
instance. This is the store object returned by ``EtcdRendezvous``
"""
def __init__(
self,
etcd_client,
etcd_store_prefix,
timeout: Optional[datetime.timedelta] = None,
):
super().__init__() # required for pybind trampoline.
self.client = etcd_client
self.prefix = etcd_store_prefix
# Default timeout same as in c10d/Store.hpp
self.timeout = (
timeout if timeout is not None else datetime.timedelta(seconds=300)
)
if not self.prefix.endswith("/"):
self.prefix += "/"
def set(self, key, value):
"""
Write a key/value pair into ``EtcdStore``.
Both key and value may be either Python ``str`` or ``bytes``.
"""
self.client.set(key=self.prefix + self._encode(key), value=self._encode(value))
def get(self, key) -> bytes:
"""
Get a value by key, possibly doing a blocking wait.
If key is not immediately present, will do a blocking wait
for at most ``timeout`` duration or until the key is published.
Returns:
value ``(bytes)``
Raises:
LookupError - If key still not published after timeout
"""
b64_key = self.prefix + self._encode(key)
kvs = self._try_wait_get([b64_key])
if kvs is None:
raise LookupError(f"Key {key} not found in EtcdStore")
return self._decode(kvs[b64_key])
def add(self, key, num: int) -> int:
"""
Atomically increment a value by an integer amount. The integer is
represented as a string using base 10. If key is not present,
a default value of ``0`` will be assumed.
Returns:
the new (incremented) value
"""
b64_key = self._encode(key)
# c10d Store assumes value is an integer represented as a decimal string
try:
# Assume default value "0", if this key didn't yet:
node = self.client.write(
key=self.prefix + b64_key,
value=self._encode(str(num)), # i.e. 0 + num
prevExist=False,
)
return int(self._decode(node.value))
except etcd.EtcdAlreadyExist:
pass
while True:
# Note: c10d Store does not have a method to delete keys, so we
# can be sure it's still there.
node = self.client.get(key=self.prefix + b64_key)
new_value = self._encode(str(int(self._decode(node.value)) + num))
try:
node = self.client.test_and_set(
key=node.key, value=new_value, prev_value=node.value
)
return int(self._decode(node.value))
except etcd.EtcdCompareFailed:
cas_delay()
def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None):
"""
Waits until all of the keys are published, or until timeout.
Raises:
LookupError - if timeout occurs
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(b64_keys, override_timeout)
if kvs is None:
raise LookupError("Timeout while waiting for keys in EtcdStore")
# No return value on success
def check(self, keys) -> bool:
"""
Check if all of the keys are immediately present (without waiting).
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(
b64_keys,
override_timeout=datetime.timedelta(microseconds=1), # as if no wait
)
return kvs is not None
def set_timeout(self, timeout: datetime.timedelta):
"""
Change the timeout used for all future operations.
"""
self.timeout = timeout
#
# Encode key/value data in base64, so we can store arbitrary binary data
# in EtcdStore. Input can be `str` or `bytes`.
# In case of `str`, utf-8 encoding is assumed.
#
def _encode(self, value) -> str:
if type(value) == bytes:
return b64encode(value).decode()
elif type(value) == str:
return b64encode(value.encode()).decode()
raise ValueError("Value must be of type str or bytes")
#
# Decode a base64 string (of type `str` or `bytes`).
# Return type is `bytes`, which is more convenient with the Store interface.
#
def _decode(self, value) -> bytes:
if type(value) == bytes:
return b64decode(value)
elif type(value) == str:
return b64decode(value.encode())
raise ValueError("Value must be of type str or bytes")
#
# Get all of the (base64-encoded) etcd keys at once, or wait until all the keys
# are published or timeout occurs.
# This is a helper method for the public interface methods.
#
# On success, a dictionary of {etcd key -> etcd value} is returned.
# On timeout, None is returned.
#
def _try_wait_get(self, b64_keys, override_timeout=None):
timeout = self.timeout if override_timeout is None else override_timeout
deadline = time.time() + timeout.total_seconds()
while True:
# Read whole directory (of keys), filter only the ones waited for
all_nodes = self.client.get(key=self.prefix)
req_nodes = {
node.key: node.value
for node in all_nodes.children
if node.key in b64_keys
}
if len(req_nodes) == len(b64_keys):
# All keys are available
return req_nodes
watch_timeout = deadline - time.time()
if watch_timeout <= 0:
return None
try:
self.client.watch(
key=self.prefix,
recursive=True,
timeout=watch_timeout,
index=all_nodes.etcd_index + 1,
)
except etcd.EtcdWatchTimedOut:
if time.time() >= deadline:
return None
else:
continue
except etcd.EtcdEventIndexCleared:
continue
def _get_socket_with_port():
import socket
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
try:
s = socket.socket(family, type, proto)
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
log.info("Socket creation attempt failed: " + e)
raise RuntimeError("Failed to create a socket")
# Helper for _etcd_rendezvous_handler(url)
def _parse_etcd_client_params(params):
kwargs = {}
if "protocol" in params:
protocol = params["protocol"]
assert protocol in ["http", "https"], "Protocol must be http or https."
kwargs["protocol"] = protocol
if "cacert" in params:
kwargs["ca_cert"] = params["cacert"]
if "cert" in params:
if "key" in params:
# python-etcd client expects key as a second element of `cert` tuple
kwargs["cert"] = (params["cert"], params["key"])
else:
kwargs["cert"] = params["cert"]
return kwargs
# Handler for torch.distributed "static" registration
def _custom_rendezvous_handler(url):
"""
Example URLs:
etcd://localhost:2379/123?min_workers=4&max_workers=8&timeout=300
etcd://192.168.0.42/123?etcd_prefix=/custom_prefix/foo&min_workers=4
etcd://localhost:2379/123?min_workers=4&protocol=https&cacert=/etc/kubernetes/certs/ca.crt&cert=/etc/kubernetes/certs/client.crt&key=/etc/kubernetes/certs/client.key
Where:
123 - the run_id (unique id for this training job instance),
min_workers=4 - min number of workers expected to join the rendezvous,
max_workers=8 - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout=300 - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutException is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol=https - http (default) or https to access etcd.
cacert=/etc/kubernetes/certs/ca.crt - CA cert to access etcd,
only makes sense with https.
cert=/etc/kubernetes/certs/client.crt - client cert to access etcd,
only makes sense with https.
key=/etc/kubernetes/certs/client.key - client key to access etcd,
only makes sense with https.
"""
import re
from urllib.parse import urlparse
url = urlparse(url)
assert url.scheme == "custom"
# Etcd endpoints. (Current url format only allows a single host)
endpoint = url.netloc
match = re.match(r"(.+):(\d+)$", endpoint) # check if port was provided
if match:
etcd_endpoints = ((match.group(1), int(match.group(2))),)
else:
# Use default etcd port
etcd_endpoints = ((endpoint, 2379),)
# Run ID value -> unique identifier of this training job instance:
# typically a job_id or name assigned by the scheduler or user
run_id = url.path.strip("/")
# Parse all of query parameters:
params = dict(pair.split("=") for pair in filter(None, url.query.split("&")))
etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p")
num_min_workers = int(params["min_workers"])
num_max_workers = int(params.get("max_workers", num_min_workers))
assert num_min_workers >= 1, "Min number of workers should be at least 1"
assert (
num_max_workers >= num_min_workers
), "Max number of workers cannot be less than min number of workers"
timeout = int(params.get("timeout", CONST_DEFAULT_OVERALL_TIMEOUT))
last_call_timeout = int(
params.get("last_call_timeout", CONST_DEFAULT_LAST_CALL_TIMEOUT)
)
kwargs = _parse_etcd_client_params(params)
# Etcd rendezvous implementation
etcd_rdzv = CustomRendezvous(
endpoints=etcd_endpoints,
prefix=etcd_prefix,
run_id=run_id,
num_min_workers=num_min_workers,
num_max_workers=num_max_workers,
timeout=timeout,
last_call_timeout=last_call_timeout,
**kwargs,
)
return CustomRendezvousHandler(rdzv_impl=etcd_rdzv)
# torchelastic.rendezvous.RendezvousHandler using etcd (API v2):
register_rendezvous_handler("custom", _custom_rendezvous_handler)
|
the-stack_106_29039 | import networkx as nx
import logging
from itertools import combinations
from random import sample
from typing import Union, TypeVar
Node = Union[str, int] # Node type hint: A node is a string or an int
CausalDAG = TypeVar("CausalDAG")
logger = logging.getLogger(__name__)
from .scenario import Scenario
from .variable import Output
def list_all_min_sep(graph: nx.Graph, treatment_node: Node, outcome_node: Node,
treatment_node_set: set[Node], outcome_node_set: set[Node]):
""" A backtracking algorithm for listing all minimal treatment-outcome separators in an undirected graph.
Reference: (Space-optimal, backtracking algorithms to list the minimal vertex separators of a graph, Ken Takata,
2013, p.5, ListMinSep procedure).
:param graph: An undirected graph.
:param treatment_node: The node corresponding to the treatment variable we wish to separate from the output.
:param outcome_node: The node corresponding to the outcome variable we wish to separate from the input.
:param treatment_node_set: Set of treatment nodes.
:param outcome_node_set: Set of outcome nodes.
:return: A list of minimal-sized sets of variables which separate treatment and outcome in the undirected graph.
"""
# 1. Compute the close separator of the treatment set
close_separator_set = close_separator(
graph, treatment_node, outcome_node, treatment_node_set
)
# 2. Use the close separator to separate the graph and obtain the connected components (connected sub-graphs)
components_graph = graph.copy()
components_graph.remove_nodes_from(close_separator_set)
graph_components = nx.connected_components(components_graph)
# 3. Find the connected component that contains the treatment node
treatment_connected_component_node_set = set()
for component in graph_components:
if treatment_node in component:
treatment_connected_component_node_set = component
# 4. Confirm that the connected component containing the treatment node is disjoint with the outcome node set
if not treatment_connected_component_node_set.intersection(outcome_node_set):
# 5. Update the treatment node set to the set of nodes in the connected component containing the treatment node
treatment_node_set = treatment_connected_component_node_set
# 6. Obtain the neighbours of the new treatment node set (this excludes the treatment nodes themselves)
treatment_node_set_neighbours = (
set.union(*[set(nx.neighbors(graph, node)) for node in treatment_node_set])
- treatment_node_set
)
# 7. Check that there exists at least one neighbour of the treatment nodes that is not in the outcome node set
if treatment_node_set_neighbours.difference(outcome_node_set):
# 7.1. If so, sample a random node from the set of treatment nodes' neighbours not in the outcome node set
node = set(
sample(treatment_node_set_neighbours.difference(outcome_node_set), 1)
)
# 7.2. Add this node to the treatment node set and recurse (left branch)
yield from list_all_min_sep(
graph,
treatment_node,
outcome_node,
treatment_node_set.union(node),
outcome_node_set,
)
# 7.3. Add this node to the outcome node set and recurse (right branch)
yield from list_all_min_sep(
graph,
treatment_node,
outcome_node,
treatment_node_set,
outcome_node_set.union(node),
)
else:
# 8. If all neighbours of the treatments nodes are in the outcome node set, return the set of treatment
# node neighbours
yield treatment_node_set_neighbours
def close_separator(graph: nx.Graph, treatment_node: Node, outcome_node: Node, treatment_node_set: set[Node]) -> set[Node]:
""" Compute the close separator for a set of treatments in an undirected graph.
A close separator (relative to a set of variables X) is a separator whose vertices are adjacent to those in X.
An X-Y separator is a set of variables which, once deleted from a graph, create a subgraph in which X and Y
are in different components.
Reference: (Space-optimal, backtracking algorithms to list the minimal vertex separators of a graph, Ken Takata,
2013, p.4, CloseSeparator procedure).
:param graph: An undirected graph.
:param treatment_node: A label for the treatment node (parent of treatments in undirected graph).
:param outcome_node: A label for the outcome node (parent of outcomes in undirected graph).
:param treatment_node_set: The set of variables containing the treatment node ({treatment_node}).
:return: A treatment_node-outcome_node separator whose vertices are adjacent to those in treatments.
"""
treatment_neighbours = set.union(
*[set(nx.neighbors(graph, treatment)) for treatment in treatment_node_set]
)
components_graph = graph.copy()
components_graph.remove_nodes_from(treatment_neighbours)
graph_components = nx.connected_components(components_graph)
for component in graph_components:
if outcome_node in component:
neighbours_of_variables_in_component = set.union(
*[set(nx.neighbors(graph, variable)) for variable in component]
)
# For this algorithm, the neighbours of a node do not include the node itself
neighbours_of_variables_in_component = neighbours_of_variables_in_component.difference(
component
)
return neighbours_of_variables_in_component
raise ValueError(f"No {treatment_node}-{outcome_node} separator in the graph.")
class CausalDAG(nx.DiGraph):
""" A causal DAG is a directed acyclic graph in which nodes represent random variables and edges represent causality
between a pair of random variables. We implement a CausalDAG as a networkx DiGraph with an additional check that
ensures it is acyclic. A CausalDAG must be specified as a dot file.
"""
def __init__(self, dot_path: str = None, **attr):
super().__init__(**attr)
if dot_path:
self.graph = nx.DiGraph(nx.drawing.nx_agraph.read_dot(dot_path))
else:
self.graph = nx.DiGraph()
if not self.is_acyclic():
raise nx.HasACycle("Invalid Causal DAG: contains a cycle.")
def add_edge(self, u_of_edge: Node, v_of_edge: Node, **attr):
""" Add an edge to the causal DAG.
Overrides the default networkx method to prevent users from adding a cycle.
:param u_of_edge: From node
:param v_of_edge: To node
:param attr: Attributes
"""
self.graph.add_edge(u_of_edge, v_of_edge, **attr)
if not self.is_acyclic():
raise nx.HasACycle("Invalid Causal DAG: contains a cycle.")
def is_acyclic(self) -> bool:
"""Checks if the graph is acyclic.
:return: True if acyclic, False otherwise.
"""
return not list(nx.simple_cycles(self.graph))
def get_proper_backdoor_graph(self, treatments: list[str], outcomes: list[str]) -> CausalDAG:
""" Convert the causal DAG to a proper back-door graph.
A proper back-door graph of a causal DAG is obtained by
removing the first edge of every proper causal path from treatments to outcomes. A proper causal path from
X to Y is a path of directed edges that starts from X and ends in Y.
Reference: (Separators and adjustment sets in causal graphs: Complete criteria and an algorithmic framework,
Zander et al., 2019, Definition 3, p.15)
:param treatments: A list of treatment variables.
:param outcomes: A list of outcomes.
:return: A CausalDAG corresponding to the proper back-door graph.
"""
for var in treatments + outcomes:
if var not in self.graph.nodes:
raise IndexError(f"{var} not a node in Causal DAG.\nValid nodes are{self.graph.nodes}.")
proper_backdoor_graph = self.copy()
nodes_on_proper_causal_path = proper_backdoor_graph.proper_causal_pathway(
treatments, outcomes
)
edges_to_remove = [
(u, v)
for (u, v) in proper_backdoor_graph.graph.out_edges(treatments)
if v in nodes_on_proper_causal_path
]
proper_backdoor_graph.graph.remove_edges_from(edges_to_remove)
return proper_backdoor_graph
def get_ancestor_graph(self, treatments: list[str], outcomes: list[str]) -> CausalDAG:
""" Given a list of treament variables and a list of outcome variables, transform a CausalDAG into an ancestor
graph.
An ancestor graph G[An(W)] for a CausalDAG G is a subgraph of G consisting of only the vertices who are
ancestors of the set of variables W and all edges between them. Note that a node is an ancestor of itself.
Reference: (Adjustment Criteria in Causal Diagrams: An Algorithmic Perspective, Textor and Liฬskiewicz, 2012,
p. 3 [Descendants and Ancestors]).
:param treatments: A list of treatment variables to include in the ancestral graph (and their ancestors).
:param outcomes: A list of outcome variables to include in the ancestral graph (and their ancestors).
:return: An ancestral graph relative to the set of variables X union Y.
"""
ancestor_graph = self.copy()
treatment_ancestors = set.union(
*[
nx.ancestors(ancestor_graph.graph, treatment).union({treatment})
for treatment in treatments
]
)
outcome_ancestors = set.union(
*[
nx.ancestors(ancestor_graph.graph, outcome).union({outcome})
for outcome in outcomes
]
)
variables_to_keep = treatment_ancestors.union(outcome_ancestors)
variables_to_remove = set(self.graph.nodes).difference(variables_to_keep)
ancestor_graph.graph.remove_nodes_from(variables_to_remove)
return ancestor_graph
def get_indirect_graph(self, treatments:list[str], outcomes:list[str]) -> CausalDAG:
"""
This is the counterpart of the back-door graph for direct effects. We remove only edges pointing from X to Y.
It is a Python implementation of the indirectGraph function from Dagitty.
:param list[str] treatments: List of treatment names.
:param list[str] outcomes: List of outcome names.
:return: The indirect graph with edges pointing from X to Y removed.
:rtype: CausalDAG
"""
gback = self.copy()
ee = []
for s in treatments:
for t in outcomes:
if (s, t) in gback.graph.edges:
ee.append((s, t))
for v1, v2 in ee:
gback.graph.remove_edge(v1,v2)
return gback
def direct_effect_adjustment_sets(self, treatments:list[str], outcomes:list[str]) -> list[set[str]]:
"""
Get the smallest possible set of variables that blocks all back-door paths between all pairs of treatments
and outcomes for DIRECT causal effect.
This is an Python implementation of the listMsasTotalEffect function from Dagitty using Algorithms presented in
Adjustment Criteria in Causal Diagrams: An Algorithmic Perspective, Textor and Liฬskiewicz, 2012 and extended in
Separators and adjustment sets in causal graphs: Complete criteria and an algorithmic framework, Zander et al.,
2019. These works use the algorithm presented by Takata et al. in their work entitled: Space-optimal,
backtracking algorithms to list the minimal vertex separators of a graph, 2013.
:param list[str] treatments: List of treatment names.
:param list[str] outcomes: List of outcome names.
:return: A list of possible adjustment sets.
:rtype: list[set[str]]
"""
indirect_graph = self.get_indirect_graph(treatments, outcomes)
ancestor_graph = indirect_graph.get_ancestor_graph(treatments, outcomes)
gam = nx.moral_graph(ancestor_graph.graph)
edges_to_add = [("TREATMENT", treatment) for treatment in treatments]
edges_to_add += [("OUTCOME", outcome) for outcome in outcomes]
gam.add_edges_from(edges_to_add)
min_seps = list(list_all_min_sep(gam, "TREATMENT", "OUTCOME", set(treatments), set(outcomes)))
# min_seps.remove(set(outcomes))
return min_seps
def enumerate_minimal_adjustment_sets(self, treatments: list[str], outcomes: list[str]) -> list[set[str]]:
""" Get the smallest possible set of variables that blocks all back-door paths between all pairs of treatments
and outcomes.
This is an implementation of the Algorithm presented in Adjustment Criteria in Causal Diagrams: An
Algorithmic Perspective, Textor and Liฬskiewicz, 2012 and extended in Separators and adjustment sets in causal
graphs: Complete criteria and an algorithmic framework, Zander et al., 2019. These works use the algorithm
presented by Takata et al. in their work entitled: Space-optimal, backtracking algorithms to list the minimal
vertex separators of a graph, 2013.
At a high-level, this algorithm proceeds as follows for a causal DAG G, set of treatments X, and set of
outcomes Y):
1). Transform G to a proper back-door graph G_pbd (remove the first edge from X on all proper causal paths).
2). Transform G_pbd to the ancestor moral graph (G_pbd[An(X union Y)])^m.
3). Apply Takata's algorithm to output all minimal X-Y separators in the graph.
:param treatments: A list of strings representing treatments.
:param outcomes: A list of strings representing outcomes.
:return: A list of strings representing the minimal adjustment set.
"""
# 1. Construct the proper back-door graph's ancestor moral graph
proper_backdoor_graph = self.get_proper_backdoor_graph(treatments, outcomes)
ancestor_proper_backdoor_graph = proper_backdoor_graph.get_ancestor_graph(
treatments, outcomes
)
moralised_proper_backdoor_graph = nx.moral_graph(
ancestor_proper_backdoor_graph.graph
)
# 2. Add an edge X^m to treatment nodes and Y^m to outcome nodes
edges_to_add = [("TREATMENT", treatment) for treatment in treatments]
edges_to_add += [("OUTCOME", outcome) for outcome in outcomes]
moralised_proper_backdoor_graph.add_edges_from(edges_to_add)
# 3. Remove treatment and outcome nodes from graph and connect neighbours
treatment_neighbours = set.union(
*[
set(nx.neighbors(moralised_proper_backdoor_graph, treatment))
for treatment in treatments
]
) - set(treatments)
outcome_neighbours = set.union(
*[
set(nx.neighbors(moralised_proper_backdoor_graph, outcome))
for outcome in outcomes
]
) - set(outcomes)
neighbour_edges_to_add = list(combinations(treatment_neighbours, 2)) + list(
combinations(outcome_neighbours, 2)
)
moralised_proper_backdoor_graph.add_edges_from(neighbour_edges_to_add)
# 4. Find all minimal separators of X^m and Y^m using Takata's algorithm for listing minimal separators
treatment_node_set = {"TREATMENT"}
outcome_node_set = set(
nx.neighbors(moralised_proper_backdoor_graph, "OUTCOME")
).union({"OUTCOME"})
minimum_adjustment_sets = list(
list_all_min_sep(
moralised_proper_backdoor_graph,
"TREATMENT",
"OUTCOME",
treatment_node_set,
outcome_node_set,
)
)
return minimum_adjustment_sets
def adjustment_set_is_minimal(self, treatments: list[str], outcomes: list[str], adjustment_set: set[str]) -> bool:
""" Given a list of treatments X, a list of outcomes Y, and an adjustment set Z, determine whether Z is the
smallest possible adjustment set.
Z is the minimal adjustment set if no element of Z can be removed without breaking the constructive back-door
criterion.
Reference: Separators and adjustment sets in causal graphs: Complete criteria and an algorithmic framework,
Zander et al., 2019, Corollary 5, p.19)
:param treatments: List of treatment variables.
:param outcomes: List of outcome variables.
:param adjustment_set: Set of adjustment variables.
:return: True or False depending on whether the adjustment set is minimal.
"""
proper_backdoor_graph = self.get_proper_backdoor_graph(treatments, outcomes)
# Ensure that constructive back-door criterion is satisfied
if not self.constructive_backdoor_criterion(
proper_backdoor_graph, treatments, outcomes, adjustment_set
):
raise ValueError(f"{adjustment_set} is not a valid adjustment set.")
# Remove each variable one at a time and return false if constructive back-door criterion remains satisfied
for variable in adjustment_set:
smaller_adjustment_set = adjustment_set.copy()
smaller_adjustment_set.remove(variable)
if not smaller_adjustment_set: # Treat None as the empty set
smaller_adjustment_set = set()
if self.constructive_backdoor_criterion(
proper_backdoor_graph, treatments, outcomes, smaller_adjustment_set
):
logger.info(
f"Z={adjustment_set} is not minimal because Z'=Z\\{{'{variable}'}}="
f"{smaller_adjustment_set} is also a valid adjustment set."
)
return False
return True
def constructive_backdoor_criterion(self, proper_backdoor_graph: CausalDAG, treatments: list[str],
outcomes: list[str], covariates: list[str]) -> bool:
""" A variation of Pearl's back-door criterion applied to a proper backdoor graph which enables more efficient
computation of minimal adjustment sets for the effect of a set of treatments on a set of outcomes.
The constructive back-door criterion is satisfied for a causal DAG G, a set of treatments X, a set of outcomes
Y, and a set of covariates Z, if:
(1) Z is not a descendent of any variable on a proper causal path between X and Y.
(2) Z d-separates X and Y in the proper back-door graph relative to X and Y.
Reference: (Separators and adjustment sets in causal graphs: Complete criteria and an algorithmic framework,
Zander et al., 2019, Definition 4, p.16)
:param proper_backdoor_graph: A proper back-door graph relative to the specified treatments and outcomes.
:param treatments: A list of treatment variables that appear in the proper back-door graph.
:param outcomes: A list of outcome variables that appear in the proper back-door graph.
:param covariates: A list of variables that appear in the proper back-door graph that we will check against
the constructive back-door criterion.
:return: True or False, depending on whether the set of covariates satisfies the constructive back-door
criterion.
"""
# Condition (1)
proper_causal_path_vars = self.proper_causal_pathway(treatments, outcomes)
descendents_of_proper_casual_paths = set.union(
*[
set.union(
nx.descendants(self.graph, proper_causal_path_var),
{proper_causal_path_var},
)
for proper_causal_path_var in proper_causal_path_vars
]
)
if not set(covariates).issubset(
set(self.graph.nodes).difference(descendents_of_proper_casual_paths)
):
logger.info(
f"Failed Condition 1: Z={covariates} **is** a descendent of some variable on a proper causal "
f"path between X={treatments} and Y={outcomes}."
)
return False
# Condition (2)
if not nx.d_separated(
proper_backdoor_graph.graph, set(treatments), set(outcomes), set(covariates)
):
logger.info(
f"Failed Condition 2: Z={covariates} **does not** d-separate X={treatments} and Y={outcomes} in"
f" the proper back-door graph relative to X and Y."
)
return False
return True
def proper_causal_pathway(self, treatments: list[str], outcomes: list[str]) -> list[str]:
""" Given a list of treatments and outcomes, compute the proper causal pathways between them.
PCP(X, Y) = {DeX^(X) - X} intersect AnX_(Y)}, where:
- DeX^(X) refers to the descendents of X in the graph obtained by deleting all edges into X.
- AnX_(Y) refers to the ancestors of Y in the graph obtained by deleting all edges leaving X.
:param treatments: A list of treatment variables in the causal DAG.
:param outcomes: A list of outcomes in the causal DAG.
:return vars_on_proper_causal_pathway: Return a list of the variables on the proper causal pathway between
treatments and outcomes.
"""
treatments_descendants = set.union(
*[
nx.descendants(self.graph, treatment).union({treatment})
for treatment in treatments
]
)
treatments_descendants_without_treatments = set(
treatments_descendants
).difference(treatments)
backdoor_graph = self.get_backdoor_graph(set(treatments))
outcome_ancestors = set.union(
*[
nx.ancestors(backdoor_graph, outcome).union({outcome})
for outcome in outcomes
]
)
nodes_on_proper_causal_paths = treatments_descendants_without_treatments.intersection(
outcome_ancestors
)
return nodes_on_proper_causal_paths
def get_backdoor_graph(self, treatments: list[str]) -> CausalDAG:
""" A back-door graph is a graph for the list of treatments is a Causal DAG in which all edges leaving the
treatment nodes are deleted.
:param treatments: The set of treatments whose outgoing edges will be deleted.
:return: A back-door graph corresponding to the given causal DAG and set of treatments.
"""
outgoing_edges = self.graph.out_edges(treatments)
backdoor_graph = self.graph.copy()
backdoor_graph.remove_edges_from(outgoing_edges)
return backdoor_graph
def depends_on_outputs(self, node: Node, scenario: Scenario) -> bool:
"""Check whether a given node in a given scenario is or depends on a
model output in the given scenario. That is, whether or not the model
needs to be run to determine its value.
NOTE: The graph must be acyclic for this to terminate.
:param Node node: The node in the DAG representing the variable of interest.
:param Scenario scenario: The modelling scenario.
:return: Whether the given variable is or depends on an output.
:rtype: bool
"""
if isinstance(scenario.variables[node], Output):
return True
return any(
[
self.depends_on_outputs(n, scenario)
for n in self.graph.predecessors(node)
]
)
def __str__(self):
return f"Nodes: {self.graph.nodes}\nEdges: {self.graph.edges}"
|
the-stack_106_29040 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\vet\vet_clinic_handlers.py
# Compiled at: 2017-08-16 22:23:19
# Size of source mod 2**32: 3879 bytes
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
from vet.vet_clinic_utils import get_vet_clinic_zone_director
import services
vet_clinic_flow_schema = GsiGridSchema(label='Vet/Vet Clinic Flow Log')
vet_clinic_flow_schema.add_field('game_time', label='Game Time', width=1)
vet_clinic_flow_schema.add_field('sims', label='Sim(s)', width=2)
vet_clinic_flow_schema.add_field('source', label='Source', width=1)
vet_clinic_flow_schema.add_field('message', label='message', width=4)
host_archiver = GameplayArchiver('flowLog', vet_clinic_flow_schema, add_to_archive_enable_functions=True)
def log_vet_flow_entry(sims, source, message):
archive_data = {'sims':sims,
'source':source,
'message':message}
host_archiver.archive(data=archive_data)
vet_clinic_customer_schema = GsiGridSchema(label='Vet/Customers')
vet_clinic_customer_schema.add_field('situation_id', label='Situation Id', width=1)
vet_clinic_customer_schema.add_field('waiting_start_time', label='Wait Start Time', width=1)
vet_clinic_customer_schema.add_field('waiting_queue_order', label='Order In Queue', width=1)
vet_clinic_customer_schema.add_field('pet', label='Pet', width=1)
vet_clinic_customer_schema.add_field('owner', label='Owner', width=1)
vet_clinic_customer_schema.add_field('current_state', label='Current State', width=1)
vet_clinic_customer_schema.add_field('vet', label='Vet', width=1)
with vet_clinic_customer_schema.add_view_cheat('situations.destroy', label='Destroy Situation') as (cheat):
cheat.add_token_param('situation_id')
@GsiHandler('vet_customers', vet_clinic_customer_schema)
def generate_customer_data(zone_id: int=None):
customer_situations_data = []
zone_director = get_vet_clinic_zone_director()
if zone_director is None:
return customer_situations_data
waiting_situations_ids = list(zone_director._waiting_situations.keys())
waiting_situations_ids_list_fixed = tuple(waiting_situations_ids)
def add_customer_situation_data(customer_situation):
is_waiting_situation = customer_situation.id in waiting_situations_ids
order_in_queue = waiting_situations_ids_list_fixed.index(customer_situation.id) if is_waiting_situation else 'Not In Queue'
customer_situations_data.append({'waiting_start_time':str(customer_situation.wait_start_time),
'waiting_queue_order':str(order_in_queue),
'situation_id':str(customer_situation.id),
'pet':str(customer_situation.get_pet()),
'owner':str(customer_situation.get_pet_owner()),
'current_state':customer_situation.current_state_type.__name__,
'vet':str(customer_situation.get_vet())})
if is_waiting_situation:
waiting_situations_ids.remove(customer_situation.id)
for customer_situation in zone_director.customer_situations_gen():
add_customer_situation_data(customer_situation)
if waiting_situations_ids:
for customer_situation_id in tuple(waiting_situations_ids):
customer_situation = services.get_zone_situation_manager().get(customer_situation_id)
if customer_situation is not None:
add_customer_situation_data(customer_situation)
return customer_situations_data |
the-stack_106_29042 | import pytest
def pytest_addoption(parser):
parser.addoption(
"--edgetest",
action="store_true",
default=False,
help="run edge tests: rpc server should be launched on the edge device in advance",
)
parser.addoption(
"--tvm_target_device",
action="store",
default="jetson-xavier-nx",
help="Specify the name of the yaml file under config/tvm_target, it will be loaded as TVMConfig for TVM compilation for edge devices.",
)
parser.addoption("--rpc_host", action="store")
parser.addoption("--rpc_port", action="store", type=int, default=5051)
def pytest_configure(config):
config.addinivalue_line("markers", "edgetest: mark test to run rpc test")
def pytest_collection_modifyitems(config, items):
if config.getoption("--edgetest"):
return
edgetest = pytest.mark.skip(reason="need --edgetest option to run")
for item in items:
if "edgetest" in item.keywords:
item.add_marker(edgetest)
|
the-stack_106_29043 | # coding=utf-8
"""
nsfw plugin for DecoraterBot.
"""
import random
from discord.ext import commands
import nsfw_dl
from DecoraterBotUtils.utils import *
class NSFW:
"""
NSFW Commands Plugin Class.
"""
def __init__(self):
self.image = None
self.nsfw_text = PluginTextReader(
file='nsfw.json')
@commands.command(name='rule34', pass_context=True)
async def rule34_command(self, ctx):
"""
::rule34 Search Command for DecoraterBot.
"""
searchterm = ctx.message.content[len(ctx.prefix + 'rule34 '):].strip()
if searchterm != '':
imageerr = None
try:
self.image = await nsfw_dl.rule34_search(
searchterm, ctx.bot.http.session)
except nsfw_dl.errors.NoResultsFound:
self.image = None
imageerr = self.nsfw_text['nsfw_plugin_data'][0]
if self.image is -1:
await ctx.bot.send_message(
ctx.message.channel,
content=self.nsfw_text['nsfw_plugin_data'][1])
else:
if self.image is not None:
await ctx.bot.send_message(
ctx.message.channel,
content='http:' + random.choice(self.image))
else:
if imageerr is not None:
await ctx.bot.send_message(ctx.message.channel,
content=imageerr)
else:
await ctx.bot.send_message(
ctx.message.channel,
content=self.nsfw_text['nsfw_plugin_data'][2])
else:
self.image = await nsfw_dl.rule34_random(ctx.bot.http.session)
if self.image is not None:
await ctx.bot.send_message(ctx.message.channel,
content='http:' + self.image)
else:
await ctx.bot.send_message(ctx.message.channel,
content=self.nsfw_text[
'nsfw_plugin_data'
][2])
def setup(bot):
"""
DecoraterBot's NSFW Plugin.
"""
bot.add_cog(NSFW())
|
the-stack_106_29046 | # Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for Pyxir IO APIs """
import io
import os
import json
import zipfile
from pyxir.graph.xgraph import XGraph
from pyxir.graph.io.xgraph_io import XGraphIO
from pyxir.opaque_func_registry import register_opaque_func, OpaqueFuncRegistry
from pyxir.type import TypeCode
from pyxir.shared.container import StrContainer, BytesContainer
from .util import zip_dir
def visualize(xgraph, pngfile='xgraph.png'):
# type: (XGraph, str) -> None
xgraph.visualize(pngfile)
def save(xgraph, filename):
# type: (str) -> None
"""
Save this XGraph to disk. The network graph information is written to
json and the network paraemeters are written to an h5 file
Arguments
---------
xgraph: XGraph
the XGraph to be saved
filename: str
the name of the files storing the graph inormation and network
parameters
the graph information is stored in `filename`.json
the network paraemeters are stored in `filename`.h5
"""
XGraphIO.save(xgraph, filename)
@register_opaque_func('pyxir.io.save', [TypeCode.XGraph, TypeCode.Str])
def save_opaque_func(xg, filename):
save(xg, filename)
def load(net_file, params_file):
# type: (str, str) -> XGraph
"""
Load the graph network information and weights from the json network file
respectively h5 parameters file
Arguments
---------
net_file: str
the path to the file containing the network graph information
params_file: str
the path to the file containing the network weights
"""
xgraph = XGraphIO.load(net_file, params_file)
return xgraph
@register_opaque_func('pyxir.io.load', [TypeCode.Str, TypeCode.Str, TypeCode.XGraph])
def load_opaque_func(net_file, params_file, xg_callback):
xg_callback.copy_from(load(net_file, params_file))
@register_opaque_func('pyxir.io.load_scheduled_xgraph_from_meta',
[TypeCode.Str, TypeCode.XGraph])
def load_scheduled_xgraph_opaque_func(build_dir: str,
cb_scheduled_xgraph: XGraph):
"""
Expose the load scheduled xgraph function as an opaque function
so it can be called in a language agnostic way
Arguments
---------
build_dir: str
the path to the build directory containing a meta.json file
cb_scheduled_xgraph: XGraph
return the scheduled XGraph
"""
meta_file = os.path.join(build_dir, 'meta.json')
if (not os.path.isfile(meta_file)):
raise ValueError("Could not find meta file at: {}"
.format(meta_file))
with open(meta_file) as json_file:
meta_d = json.load(json_file)
px_net_file = meta_d['px_model']
px_params_file = meta_d['px_params']
if not os.path.isabs(px_net_file):
px_net_file = os.path.join(build_dir, px_net_file)
if not os.path.isabs(px_params_file):
px_params_file = os.path.join(build_dir, px_params_file)
scheduled_xgraph = load(px_net_file, px_params_file)
cb_scheduled_xgraph.copy_from(scheduled_xgraph)
@register_opaque_func('pyxir.io.to_string',
[TypeCode.XGraph, TypeCode.BytesContainer,
TypeCode.BytesContainer])
def write_to_string(xg, xgraph_json_str_callback, xgraph_params_str_callback):
graph_str, data_str = XGraphIO.to_string(xg)
xgraph_json_str_callback.set_bytes(graph_str)
xgraph_params_str_callback.set_bytes(data_str)
def get_xgraph_str(xg: XGraph):
# graph_str, data_str = XGraphIO.to_string(xg)
# return " " + str(len(graph_str)) + " " + graph_str + " " + str(len(data_str) + 1) + " " + data_str
of = OpaqueFuncRegistry.Get("pyxir.io.get_serialized_xgraph")
s = BytesContainer(b"")
of(xg, s)
# import pdb; pdb.set_trace()
return s.get_bytes()
def read_xgraph_str(xg_str: bytes):
of = OpaqueFuncRegistry.Get("pyxir.io.deserialize_xgraph")
xg = XGraph()
s = BytesContainer(xg_str)
# import pdb; pdb.set_trace()
of(xg, s)
return xg
@register_opaque_func('pyxir.io.from_string',
[TypeCode.XGraph, TypeCode.Byte, TypeCode.Byte])
def read_from_string(xg, xgraph_json_str, xgraph_params_str):
# graph_str, data_str = xgraph_str.split(";")
xg_load = XGraphIO.from_string(xgraph_json_str, xgraph_params_str)
xg.copy_from(xg_load)
@register_opaque_func('pyxir.io.serialize_dir',
[TypeCode.Str, TypeCode.BytesContainer])
def serialize_dir(dir_path, serial_str_cb):
if not os.path.isdir(dir_path):
serial_str_cb.set_bytes(b"")
else:
bio = io.BytesIO()
with zipfile.ZipFile(bio, 'w', zipfile.ZIP_DEFLATED) as zip_f:
zip_dir(dir_path, zip_f)
s = bio.getvalue() # .hex()
serial_str_cb.set_bytes(s)
# import pdb; pdb.set_trace()
@register_opaque_func('pyxir.io.deserialize_dir',
[TypeCode.Str, TypeCode.Byte])
def deserialize_dir(dir_path, serial_str):
# import pdb; pdb.set_trace()
if serial_str != b"":
bio = io.BytesIO(serial_str) # .encode('latin1') bytes.fromhex(serial_str))
with zipfile.ZipFile(bio, 'r') as zip_f:
zip_f.extractall(dir_path)
# If empty directory got zipped, recreate empty directory
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# import pdb; pdb.set_trace()
|
the-stack_106_29047 | from djmodels.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('author_app', '0001_initial'),
('book_app', '0001_initial'), # Forces the book table to alter the FK
]
operations = [
migrations.AlterField(
model_name='author',
name='id',
field=models.CharField(max_length=10, primary_key=True),
),
]
|
the-stack_106_29053 | # Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from .gltf2_blender_material_utils import make_texture_block
from ..com.gltf2_blender_conversion import texture_transform_gltf_to_blender
class BlenderPbr():
"""Blender Pbr."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
def create(gltf, pypbr, mat_name, vertex_color):
"""Pbr creation."""
engine = bpy.context.scene.render.engine
if engine in ['CYCLES', 'BLENDER_EEVEE']:
BlenderPbr.create_nodetree(gltf, pypbr, mat_name, vertex_color)
def create_nodetree(gltf, pypbr, mat_name, vertex_color, nodetype='principled'):
"""Nodetree creation."""
material = bpy.data.materials[mat_name]
material.use_nodes = True
node_tree = material.node_tree
# If there is no diffuse texture, but only a color, wihtout
# vertex_color, we set this color in viewport color
if pypbr.color_type == gltf.SIMPLE and not vertex_color:
# Manage some change in beta version on 20190129
if len(material.diffuse_color) == 3:
material.diffuse_color = pypbr.base_color_factor[:3]
else:
material.diffuse_color = pypbr.base_color_factor
# delete all nodes except output
for node in list(node_tree.nodes):
if not node.type == 'OUTPUT_MATERIAL':
node_tree.nodes.remove(node)
output_node = node_tree.nodes[0]
output_node.location = 1250, 0
# create Main node
if nodetype == "principled":
main_node = node_tree.nodes.new('ShaderNodeBsdfPrincipled')
main_node.location = 0, 0
elif nodetype == "unlit":
main_node = node_tree.nodes.new('ShaderNodeEmission')
main_node.location = 750, -300
if pypbr.color_type == gltf.SIMPLE:
if not vertex_color:
# change input values
main_node.inputs[0].default_value = pypbr.base_color_factor
if nodetype == "principled":
# TODO : currently set metallic & specular in same way
main_node.inputs[5].default_value = pypbr.metallic_factor
main_node.inputs[7].default_value = pypbr.roughness_factor
else:
# Create attribute node to get COLOR_0 data
vertexcolor_node = node_tree.nodes.new('ShaderNodeVertexColor')
vertexcolor_node.layer_name = 'Col'
vertexcolor_node.location = -500, 0
if nodetype == "principled":
# TODO : currently set metallic & specular in same way
main_node.inputs[5].default_value = pypbr.metallic_factor
main_node.inputs[7].default_value = pypbr.roughness_factor
# links
rgb_node = node_tree.nodes.new('ShaderNodeMixRGB')
rgb_node.blend_type = 'MULTIPLY'
rgb_node.inputs['Fac'].default_value = 1.0
rgb_node.inputs['Color1'].default_value = pypbr.base_color_factor
node_tree.links.new(rgb_node.inputs['Color2'], vertexcolor_node.outputs[0])
node_tree.links.new(main_node.inputs[0], rgb_node.outputs[0])
elif pypbr.color_type == gltf.TEXTURE_FACTOR:
# TODO alpha ?
if vertex_color:
# TODO tree locations
# Create attribute / separate / math nodes
vertexcolor_node = node_tree.nodes.new('ShaderNodeVertexColor')
vertexcolor_node.layer_name = 'Col'
vc_mult_node = node_tree.nodes.new('ShaderNodeMixRGB')
vc_mult_node.blend_type = 'MULTIPLY'
vc_mult_node.inputs['Fac'].default_value = 1.0
# create UV Map / Mapping / Texture nodes / separate & math and combine
text_node = make_texture_block(
gltf,
node_tree,
pypbr.base_color_texture,
location=(-1000, 500),
label='BASE COLOR',
name='baseColorTexture',
)
mult_node = node_tree.nodes.new('ShaderNodeMixRGB')
mult_node.blend_type = 'MULTIPLY'
mult_node.inputs['Fac'].default_value = 1.0
mult_node.inputs['Color2'].default_value = [
pypbr.base_color_factor[0],
pypbr.base_color_factor[1],
pypbr.base_color_factor[2],
pypbr.base_color_factor[3],
]
# Create links
if vertex_color:
node_tree.links.new(vc_mult_node.inputs[2], vertexcolor_node.outputs[0])
node_tree.links.new(vc_mult_node.inputs[1], mult_node.outputs[0])
node_tree.links.new(main_node.inputs[0], vc_mult_node.outputs[0])
else:
node_tree.links.new(main_node.inputs[0], mult_node.outputs[0])
# Common for both mode (non vertex color / vertex color)
node_tree.links.new(mult_node.inputs[1], text_node.outputs[0])
elif pypbr.color_type == gltf.TEXTURE:
# TODO alpha ?
if vertex_color:
# Create attribute / separate / math nodes
vertexcolor_node = node_tree.nodes.new('ShaderNodeVertexColor')
vertexcolor_node.layer_name = 'Col'
vertexcolor_node.location = -2000, 250
vc_mult_node = node_tree.nodes.new('ShaderNodeMixRGB')
vc_mult_node.blend_type = 'MULTIPLY'
vc_mult_node.inputs['Fac'].default_value = 1.0
# create UV Map / Mapping / Texture nodes / separate & math and combine
if vertex_color:
location = -2000, 500
else:
location = -500, 500
text_node = make_texture_block(
gltf,
node_tree,
pypbr.base_color_texture,
location=location,
label='BASE COLOR',
name='baseColorTexture',
)
# Create links
if vertex_color:
node_tree.links.new(vc_mult_node.inputs[2], vertexcolor_node.outputs[0])
node_tree.links.new(vc_mult_node.inputs[1], text_node.outputs[0])
node_tree.links.new(main_node.inputs[0], vc_mult_node.outputs[0])
else:
node_tree.links.new(main_node.inputs[0], text_node.outputs[0])
if nodetype == 'principled':
# Says metallic, but it means metallic & Roughness values
if pypbr.metallic_type == gltf.SIMPLE:
main_node.inputs[4].default_value = pypbr.metallic_factor
main_node.inputs[7].default_value = pypbr.roughness_factor
elif pypbr.metallic_type == gltf.TEXTURE:
metallic_text = make_texture_block(
gltf,
node_tree,
pypbr.metallic_roughness_texture,
location=(-500, 0),
label='METALLIC ROUGHNESS',
name='metallicRoughnessTexture',
colorspace='NONE',
)
metallic_separate = node_tree.nodes.new('ShaderNodeSeparateRGB')
metallic_separate.location = -250, 0
# links
node_tree.links.new(metallic_separate.inputs[0], metallic_text.outputs[0])
node_tree.links.new(main_node.inputs[4], metallic_separate.outputs[2]) # metallic
node_tree.links.new(main_node.inputs[7], metallic_separate.outputs[1]) # Roughness
elif pypbr.metallic_type == gltf.TEXTURE_FACTOR:
metallic_text = make_texture_block(
gltf,
node_tree,
pypbr.metallic_roughness_texture,
location=(-1000, 0),
label='METALLIC ROUGHNESS',
name='metallicRoughnessTexture',
colorspace='NONE',
)
metallic_separate = node_tree.nodes.new('ShaderNodeSeparateRGB')
metallic_separate.location = -500, 0
metallic_math = node_tree.nodes.new('ShaderNodeMath')
metallic_math.operation = 'MULTIPLY'
metallic_math.inputs[1].default_value = pypbr.metallic_factor
metallic_math.location = -250, 100
roughness_math = node_tree.nodes.new('ShaderNodeMath')
roughness_math.operation = 'MULTIPLY'
roughness_math.inputs[1].default_value = pypbr.roughness_factor
roughness_math.location = -250, -100
# links
node_tree.links.new(metallic_separate.inputs[0], metallic_text.outputs[0])
# metallic
node_tree.links.new(metallic_math.inputs[0], metallic_separate.outputs[2])
node_tree.links.new(main_node.inputs[4], metallic_math.outputs[0])
# roughness
node_tree.links.new(roughness_math.inputs[0], metallic_separate.outputs[1])
node_tree.links.new(main_node.inputs[7], roughness_math.outputs[0])
# link node to output
if nodetype == 'principled':
node_tree.links.new(output_node.inputs[0], main_node.outputs[0])
elif nodetype == 'unlit':
mix = node_tree.nodes.new('ShaderNodeMixShader')
mix.location = 1000, 0
path = node_tree.nodes.new('ShaderNodeLightPath')
path.location = 500, 300
if pypbr.color_type != gltf.SIMPLE:
math = node_tree.nodes.new('ShaderNodeMath')
math.location = 750, 200
math.operation = 'MULTIPLY'
# Set material alpha mode to blend
# This is needed for Eevee
material.blend_method = 'HASHED' # TODO check best result in eevee
transparent = node_tree.nodes.new('ShaderNodeBsdfTransparent')
transparent.location = 750, 0
node_tree.links.new(output_node.inputs[0], mix.outputs[0])
node_tree.links.new(mix.inputs[2], main_node.outputs[0])
node_tree.links.new(mix.inputs[1], transparent.outputs[0])
if pypbr.color_type != gltf.SIMPLE:
node_tree.links.new(math.inputs[0], path.outputs[0])
node_tree.links.new(math.inputs[1], text_node.outputs[1])
node_tree.links.new(mix.inputs[0], math.outputs[0])
else:
node_tree.links.new(mix.inputs[0], path.outputs[0])
|
the-stack_106_29054 | # -*- coding: utf-8 -*-
import warnings
import glob
import numpy as np
from scipy import misc, io
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
gdal_available = True
try:
from osgeo import gdal
except ImportError as gdal_import_error:
gdal_available = False
warnings.warn(gdal_import_error.msg)
import csv
import h5py
from lippmann import *
import color_tools as ct
def from_viewing_angle_to_theta_i(theta_2, alpha, n1, n2, deg=True):
#convert to radians
if deg:
theta_2 = np.deg2rad(theta_2)
alpha = np.deg2rad(alpha)
theta_1_prime = theta_2+alpha
theta_1 = np.arcsin(n2/n1*np.sin(theta_1_prime))
theta_0_prime = alpha-theta_1
if deg:
return np.rad2deg(theta_0_prime)
else:
return theta_0_prime
def load_multispectral_image_PURDUE(path):
if not gdal_available:
raise ImportError("to use PURDUE image module osgeo is required (need gdal.Open)")
gtif = gdal.Open( path + "/data.tif" )
#extract wavelengths
wavelength_data = np.genfromtxt( path + "/wavelengths.txt", delimiter=' ')
indices = np.where( 1-np.isnan(wavelength_data[:,2]) )
wavelengths = wavelength_data[indices, 1].flatten()*1E-9
shape = gtif.GetRasterBand(1).GetDataset().ReadAsArray()[0].shape
lippmann_plate = LippmannPlate(wavelengths, shape[1], shape[0]//2)
# lippmann_plate = LippmannPlate(wavelengths, 1, 1)
for idx in range( gtif.RasterCount ):
print("[ GETTING BAND ]: ", idx)
band = gtif.GetRasterBand(idx+1)
data = band.GetDataset().ReadAsArray()[idx]
#reduce the shape
lippmann_plate.spectrum[idx] = data[shape[0]//2:, :].transpose()
return lippmann_plate
def load_multispectral_image_CAVE(path, image_type='png', direction=np.array([0,0,1])):
list_images = glob.glob(path + '/*.' + image_type)
image = misc.imread(list_images[0]).astype(float)/255.0
n_bands = 31
wavelengths = np.linspace(400E-9, 700E-9, n_bands)
lippmann_plate = LippmannContinuous(wavelengths, image.shape[0], image.shape[1], direction=direction)
lippmann_plate.rgb_ref = misc.imread(glob.glob(path + '/*.bmp')[0]).astype(float)/255.0
idx = 0
for idx, image_name in enumerate(list_images):
image = misc.imread(image_name).astype(float)/255.0
#gamma 'uncorrection'
# image = np.power(image, 2.2)
lippmann_plate.spectrum[idx] = image
return lippmann_plate
def load_multispectral_image_SCIEN(path):
mat_data = io.loadmat(path)
wavelengths = mat_data['wave'].flatten()*1E-9
intensities = mat_data['photons']
lippmann_plate = LippmannContinuous(wavelengths, intensities.shape[0], intensities.shape[1])
lippmann_plate.spectrum = Spectrum3D(wavelengths, intensities)
lippmann_plate.rgb_ref = lippmann_plate.spectrum.compute_rgb()
return lippmann_plate
def load_multispectral_image_Suwannee(path):
mat_data = io.loadmat(path)
wavelengths = mat_data['HDR']['wavelength'][0][0][0]*1E-9
intensities = mat_data['I']
lippmann_plate = LippmannContinuous(wavelengths, intensities.shape[0], intensities.shape[1])
lippmann_plate.spectrum = Spectrum3D(wavelengths, intensities)
lippmann_plate.rgb_ref = lippmann_plate.spectrum.compute_rgb()
return lippmann_plate
def load_multispectral_image_Gamaya(path, filename):
if not gdal_available:
raise ImportError("to use Gamaya image module osgeo is required (need gdal.Open)")
with open(path + '/wavs.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
wavelengths = np.array(next(reader), dtype=float)*1E-9
gtif = gdal.Open(path + '/' + filename)
shape = gtif.GetRasterBand(1).GetDataset().ReadAsArray()[0].shape
lippmann_plate = LippmannContinuous(wavelengths, shape[0], shape[1])
for idx in range( gtif.RasterCount ):
band = gtif.GetRasterBand(idx+1)
data = np.float_(band.GetDataset().ReadAsArray()[idx])/255.
data = np.nan_to_num(data)
data[data<0] = 0.
lippmann_plate.spectrum[idx] = data
return lippmann_plate
def load_multispectral_image_HySpex(path):
f = h5py.File(path)
data = np.array(f['D'])
wavelengths = np.array(f['wavelengths']).flatten()*1E-9
#put the spectral dimension at the end
data = np.rollaxis(data, 2)
data = np.rollaxis(data, 2)
print(data.shape)
print(wavelengths)
#this dataset is huge, cut a bit... or not
intensity = data[600:1200,350:950,:]
#normalize
intensity -= np.min(intensity)
intensity /= np.max(intensity)
lippmann_plate = LippmannContinuous(wavelengths, intensity.shape[0], intensity.shape[1])
lippmann_plate.spectrum = Spectrum3D(wavelengths, intensity)
lippmann_plate.rgb_ref = lippmann_plate.spectrum.compute_rgb()
return lippmann_plate
def create_multispectral_image_discrete(path, N_prime):
#read image
im = misc.imread(path).astype(float)/255.0
#create Lippmann plate object
lippmann_plate = LippmannDiscrete( N_prime, im.shape[0], im.shape[1])
#comppute the spectrum
im_xyz = ct.from_rgb_to_xyz(im)
lippmann_plate.spectrum.intensities = ct.from_xyz_to_spectrum(im_xyz, lippmann_plate.wavelengths)
return lippmann_plate
def create_multispectral_image(path, N_prime):
#read image
im = misc.imread(path).astype(float)/255.0
wavelengths = np.linspace(390-9, 700E-9, N_prime)
#crate Lippmann plate object
lippmann_plate = LippmannContinuous( wavelengths, im.shape[0], im.shape[1])
#comppute the spectrum
im_xyz = ct.from_rgb_to_xyz(im)
lippmann_plate.spectrum.intensities = ct.from_xyz_to_spectrum(im_xyz, wavelengths)
return lippmann_plate
def extract_layers_for_artwork(lippmann_plate, row_idx, subtract_mean=True, normalize=False, negative=False):
plt.imsave('image_slice.png', lippmann_plate.spectrum.rgb_colors[:row_idx+1,:,:])
r = lippmann_plate.reflectances
min_r = np.min(r)
max_r = np.max(r)
row = r[row_idx,:,:].T
#remove the mean
if subtract_mean:
row-= np.mean(row, axis=0)[np.newaxis, :]
if negative:
min_r, max_r = -max_r, -min_r
row = -row
if normalize:
plt.imsave('front_slice.png', row, vmin=min_r, vmax=max_r )
else:
plt.imsave('front_slice.png', 1.-np.power(np.abs(row), 1./3.) )
col = r[:row_idx+1,0,:].T
#remove the mean
if subtract_mean:
col-= np.mean(col, axis=0)[np.newaxis, :]
if negative:
col = -col
if normalize:
plt.imsave('left_slice.png', col, vmin=min_r, vmax=max_r)
else:
plt.imsave('left_slice.png', 1.-np.power(np.abs(col/np.max(row)), 1./3.), vmax=1., vmin=0.)
def generate_interference_images(lippmann_plate):
r = lippmann_plate.reflectances
min_r = np.min(r)
max_r = np.max(r)
for z in range(r.shape[2]):
im = r[:,:,z]
plt.imsave('interferences/'+str(z).zfill(3) + '.png', im, vmin=min_r, vmax=max_r)
def image_perspective_transform(im, angle=np.pi/4, d=0.):
nbre_samples = 10
rows = im.shape[0]
cols = im.shape[1]
if d == 0.:
d = im.shape[1]*10
h = im.shape[0]
l = im.shape[1]
h1 = np.cos(angle)*h
delta_d = np.sin(angle)*h
h2 = d*h1/(d + delta_d)
l2 = d*l/(d + delta_d)
# l2 = h2
src_row = np.linspace(0, h, nbre_samples)
src_col = np.linspace(0, l, nbre_samples)
src_row, src_col = np.meshgrid(src_row, src_col)
src = np.dstack([src_col.flat, src_row.flat])[0]
dst_row = h-np.linspace(h2, 0, nbre_samples)
dst_col = np.linspace(0, l, nbre_samples)
dst_row, dst_col = np.meshgrid(dst_row, dst_col)
scale = np.linspace(l2/l, 1, nbre_samples)
shift = np.linspace(l-l2, 0, nbre_samples)/2.
dst_col = dst_col*scale[np.newaxis,:]+shift[np.newaxis,:]
dst = np.dstack([dst_col.flat, dst_row.flat])[0]
transform = PiecewiseAffineTransform()
transform.estimate(dst, src)
return warp(im, transform, output_shape=im.shape)
|
the-stack_106_29055 | from flask import g, jsonify, request
from flask_security import roles_required
from meltano.api.api_blueprint import APIBlueprint
from meltano.api.security.auth import block_if_readonly
from meltano.core.error import PluginInstallError
from meltano.core.plugin import PluginType
from meltano.core.plugin.project_plugin import ProjectPlugin
from meltano.core.plugin_discovery_service import (
PluginDiscoveryService,
PluginNotFoundError,
)
from meltano.core.plugin_install_service import (
PluginInstallReason,
PluginInstallService,
)
from meltano.core.project import Project
from meltano.core.project_add_service import ProjectAddService
from meltano.core.project_plugins_service import ProjectPluginsService
def plugin_def_json(plugin_def):
return {
"name": plugin_def.name,
"namespace": plugin_def.namespace,
"hidden": plugin_def.hidden,
"label": plugin_def.label,
"logo_url": plugin_def.logo_url,
"description": plugin_def.description,
"variants": [
{"name": v.name, "default": i == 0, "deprecated": v.deprecated}
for i, v in enumerate(plugin_def.variants)
],
}
pluginsBP = APIBlueprint("plugins", __name__)
@pluginsBP.errorhandler(PluginInstallError)
def _handle(ex):
return (jsonify({"error": True, "code": str(ex)}), 502)
@pluginsBP.route("/all", methods=["GET"])
def all():
project = Project.find()
discovery = PluginDiscoveryService(project)
all_plugins = {
plugin_type: [plugin_def_json(plugin_def) for plugin_def in plugin_defs]
for plugin_type, plugin_defs in discovery.plugins_by_type().items()
}
return jsonify(all_plugins)
@pluginsBP.route("/installed", methods=["GET"])
def installed():
project = Project.find()
plugins_service = ProjectPluginsService(project)
def plugin_json(plugin: ProjectPlugin):
plugin_json = {"name": plugin.name}
try:
plugin_json.update(plugin_def_json(plugin))
plugin_json["variant"] = plugin.variant
plugin_json["docs"] = plugin.docs
except PluginNotFoundError:
pass
return plugin_json
installed_plugins = {
plugin_type: [plugin_json(plugin) for plugin in plugins]
for plugin_type, plugins in plugins_service.plugins_by_type().items()
}
return jsonify(installed_plugins)
@pluginsBP.route("/add", methods=["POST"])
@block_if_readonly
def add():
payload = request.get_json()
plugin_type = PluginType(payload["plugin_type"])
plugin_name = payload["name"]
variant = payload.get("variant", None)
project = Project.find()
add_service = ProjectAddService(project)
plugin = add_service.add(plugin_type, plugin_name, variant=variant)
return jsonify(plugin.canonical())
@pluginsBP.route("/install/batch", methods=["POST"])
@block_if_readonly
def install_batch():
payload = request.get_json()
plugin_type = PluginType(payload["plugin_type"])
plugin_name = payload["name"]
project = Project.find()
plugins_service = ProjectPluginsService(project)
plugin = plugins_service.find_plugin(plugin_name, plugin_type=plugin_type)
add_service = ProjectAddService(project, plugins_service=plugins_service)
related_plugins = add_service.add_related(plugin)
# We will install the plugins in reverse order, since dependencies
# are listed after their dependents in `related_plugins`, but should
# be installed first.
related_plugins.reverse()
install_service = PluginInstallService(project, plugins_service=plugins_service)
install_results = install_service.install_plugins(
related_plugins, reason=PluginInstallReason.ADD
)
for result in install_results:
if not result.sucessful:
raise PluginInstallError(result.message)
return jsonify([plugin.canonical() for plugin in related_plugins])
@pluginsBP.route("/install", methods=["POST"])
@block_if_readonly
def install():
payload = request.get_json()
plugin_type = PluginType(payload["plugin_type"])
plugin_name = payload["name"]
project = Project.find()
plugins_service = ProjectPluginsService(project)
plugin = plugins_service.find_plugin(plugin_name, plugin_type=plugin_type)
install_service = PluginInstallService(project, plugins_service=plugins_service)
install_service.install_plugin(plugin, reason=PluginInstallReason.ADD)
return jsonify(plugin.canonical())
|
the-stack_106_29058 | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`license_show` - PyFOS util to show installed licenses on a switch.
*************************************************************************
The :mod:`license_show` provides option to display installed licenses on a
switch.
This module is a standalone script that can be used to display installed
licenses on a switch.
* inputs:
| Infrastructure options:
| -i,--ipaddr=IPADDR IP address of FOS switch.
| -L,--login=LOGIN login name.
| -P,--password=PASSWORD password.
| -f,--vfid=VFID VFID to which the request is directed to [OPTIONAL].
| -s,--secured=MODE HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose verbose mode[OPTIONAL].
* outputs:
* Information of all installed licenses on the switch.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.utils import brcd_util
from pyfos.pyfos_brocade_license import license
from pyfos.pyfos_brocade_chassis import chassis
def get_license_info(session):
license_obj = license()
result = license_obj.get(session)
return result
def main(argv):
filters = []
inputs = brcd_util.parse(argv, license, filters)
session = brcd_util.getsession(inputs)
result = get_license_info(inputs['session'])
pyfos_util.response_print(result)
chassis_obj = chassis()
result = chassis_obj.get(inputs['session'])
if pyfos_util.is_failed_resp(result):
pyfos_util.response_print(result)
else:
print("License ID:", result.peek_license_id())
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_106_29059 | from django.conf.urls import url
from . import rel_views
from . import views
app_name = 'apis_relations'
urlpatterns = [
url(r'^ajax/get/$', views.get_form_ajax, name='get_form_ajax'),
url(
r'^ajax/save/(?P<entity_type>\w+)/(?P<kind_form>\w+)/(?P<SiteID>[0-9]+)(?:/(?P<ObjectID>[0-9]*))?/$',
views.save_ajax_form, name='save_ajax_form'
),
url(r'^(?P<entity>[a-z]+)/list/$',
rel_views.GenericRelationView.as_view(), name='generic_relations_list'),
url(r'^(?P<entity>[a-z]+)/(?P<pk>[0-9]+)/detail$',
rel_views.GenericRelationDetailView.as_view(),
name='generic_relations_detail_view'),
]
|
the-stack_106_29060 | # support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from chartpy import Chart, Style, Canvas
# get your own free bQuandl API key from https://www.quandl.com/
try:
from chartpy.chartcred import ChartCred
cred = ChartCred()
quandl_api_key = cred.quandl_api_key
except:
quandl_api_key = "x"
# choose run_example = 0 for everything
# run_example = 1 - create a plain and Keen.io based template for a chart webpage
run_example = 0
if run_example == 1 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ"]
# Chart object is initialised with the dataframe and our chart style
chart_bokeh = Chart(df=df, chart_type='line', engine='bokeh',
style=Style(title="US GDP", source="Quandl/Fred", scale_factor=-2, width=500, height=300, silent_display=True))
chart_plotly = Chart(df=df, chart_type='line', engine='plotly',
style=Style(title="US GDP", source="Quandl/Fred", scale_factor=-2, width=500, height=300, silent_display=True))
chart_matplotlib = Chart(df=df, chart_type='line', engine='matplotlib',
style=Style(title="US GDP", source="Quandl/Fred", scale_factor=-2, width=500, height=300, silent_display=True))
text = "A demo of chartpy canvas!!"
# using plain template
canvas = Canvas([[text, chart_bokeh], [chart_plotly, df.tail(n=5)]])
canvas.generate_canvas(silent_display=False, canvas_plotter='plain')
# using the Keen template (needs static folder in the same place as final HTML file)
canvas = Canvas([[chart_bokeh, chart_plotly], [chart_plotly, chart_matplotlib]])
canvas.generate_canvas(silent_display=False, canvas_plotter='keen') |
the-stack_106_29062 | from pygears.hls import ir
from functools import partial
from pygears.typing import Array, Integer, Queue, code, typeof, Integral, Tuple, Union, Int, Unit
from .sv_keywords import sv_keywords
# TODO: Use precedence of operators to induce parenthesis around expressions
SLICE_FUNC_TEMPLATE = """function {4} [{2}:0] slice_{0}_{1}_{3}(input [{3}:0] val);
slice_{0}_{1}_{3} = val[{0}:{1}];
endfunction
"""
INDEX_FUNC_TEMPLATE = """function {0} [{1}-1:0] index_{1}_{2}_{3}(input [{2}:0] val, input [{3}:0] ind);
index_{1}_{2}_{3} = val[ind*{1}+:{1}];
endfunction
"""
def get_slice_func(aux_funcs, start, stop, din_width, signed):
name = f'slice_{stop}_{start}_{din_width-1}'
if name not in aux_funcs:
aux_funcs[name] = SLICE_FUNC_TEMPLATE.format(stop, start, stop - start, din_width - 1,
'signed' if signed else '')
return name
def get_index_func(aux_funcs, din_width, ind_width, part_width, signed):
name = f'index_{part_width}_{din_width-1}_{ind_width-1}'
if name not in aux_funcs:
aux_funcs[name] = INDEX_FUNC_TEMPLATE.format('signed' if signed else '', part_width,
din_width - 1, ind_width - 1)
return name
def index_to_sv_slice(dtype, key):
subtype = dtype[key]
if isinstance(key, slice):
key = min(key.start, key.stop)
if key is None or key == 0:
low_pos = 0
else:
low_pos = dtype[:key].width
high_pos = low_pos + subtype.width - 1
return f'{high_pos}:{low_pos}'
def sieve_slices(dtype, keys):
if not isinstance(keys, tuple):
keys = (keys, )
return list(
map(partial(index_to_sv_slice, dtype),
filter(lambda i: getattr(dtype[i], 'width', 0) > 0, keys)))
class SVExpressionVisitor:
def __init__(self, aux_funcs=None):
self.separator = '.'
self.expr = svexpr
if aux_funcs is None:
aux_funcs = {}
self.aux_funcs = aux_funcs
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def visit_OperandVal(self, node):
if node.context:
return f'{node.op.name}_{node.context}'
def visit_ResExpr(self, node):
if isinstance(node.val, tuple):
res = []
for op in reversed(node.val):
res_op = ir.ResExpr(op)
if res_op != ir.ResExpr(Unit()):
svexpr = self.visit(res_op)
res.append(self.cast_svexpr(svexpr, res_op.dtype, type(op)))
if not res:
return None
return '{' + ', '.join(res) + '}'
if getattr(node.val, 'unknown', False):
return f"{node.dtype.width}'bx"
val = node.val
if not isinstance(node.val, Integer):
val = Integer(code(node.val, int))
sign = '-' if val < 0 else ''
return f"{sign}{val.width}'d{abs(int(val))}"
def visit_FunctionCall(self, node):
return (f'{node.name}(' + ', '.join(str(self.visit(op)) for op in node.operands) + ')')
def visit_Interface(self, node):
return node.name
def visit_Variable(self, node):
breakpoint()
return f'{node.name}_v'
def visit_Register(self, node):
return f'{node.name}_v'
def visit_Name(self, node):
name = node.name
if name in sv_keywords:
name = f'pg_{name}'
if node.ctx == 'store' and isinstance(node.obj, ir.Variable) and node.obj.reg:
return f'{name}_next'
if node.ctx in ['en']:
return f'{name}_{node.ctx}'
return name
def visit_Await(self, node):
return self.visit(node.expr)
def visit_Component(self, node):
if (node.field == 'data'):
return f'{node.val.name}_s'
else:
return self.separator.join([self.visit(node.val), node.field])
def visit_InterfacePull(self, node):
# return f'{node.intf.name}{self.separator}data'
return f'{node.intf.name}_s'
def visit_InterfaceReady(self, node):
# return f'{node.intf.name}{self.separator}data'
return f'{node.intf.name}.ready'
def visit_InterfaceAck(self, node):
# return f'{node.intf.name}{self.separator}data'
return f'{node.intf.name}.valid && {node.intf.name}.ready'
def visit_IntfReadyExpr(self, node):
res = []
if not isinstance(node.port, (list, tuple)):
return f'{node.name}{self.separator}ready'
for port in node.port:
# if port.context:
# inst = self.expr(
# BinOpExpr(
# (f'{port.name}{self.separator}ready', port.context),
# '&&'))
# res.append(f'({inst})')
# else:
res.append(f'{port.name}{self.separator}ready')
res = ' || '.join(res)
if len(node.port) > 1:
return f'({res})'
return f'{res}'
def visit_CallExpr(self, node):
breakpoint()
def visit_AttrExpr(self, node):
val = [self.visit(node.val)]
# if node.attr:
# if typeof(node.val.dtype, Queue):
# try:
# node.val.dtype[node.attr[0]]
# except KeyError:
# val.append('data')
return self.separator.join(val + [node.attr])
def cast_sign(self, expr, expr_dtype, cast_dtype):
res_signed = getattr(expr_dtype, 'signed', False)
op_signed = getattr(cast_dtype, 'signed', False)
if res_signed != op_signed:
sign = 'signed' if res_signed else 'unsigned'
expr = f"{sign}'({expr})"
return expr
def cast_width(self, expr, expr_dtype, cast_dtype):
if len(cast_dtype) != len(expr_dtype):
expr = f"{expr_dtype.width}'({expr})"
return expr
def cast_svexpr(self, svexpr, expr_dtype, cast_dtype):
expr_signed = getattr(expr_dtype, 'signed', False)
res_signed = getattr(cast_dtype, 'signed', False)
expr_width = expr_dtype.width
cast_width = cast_dtype.width
if cast_width == 0:
return None
if res_signed != expr_signed:
if res_signed:
svexpr = f"signed'({{1'b0, {svexpr}}})"
expr_width += 1
else:
svexpr = f"unsigned'({svexpr})"
if cast_width != expr_width:
svexpr = f"{cast_width}'({svexpr})"
return svexpr
def cast_expr(self, expr, cast_dtype):
return self.cast_svexpr(self.visit(expr), expr.dtype, cast_dtype)
def visit_CastExpr(self, node):
return self.cast_expr(node.operand, node.cast_to)
def visit_ConcatExpr(self, node):
svexprs = []
for op in reversed(node.operands):
if op == ir.ResExpr(Unit()):
continue
sv = self.visit(op)
if sv is None:
continue
svexprs.append(str(sv))
if svexprs:
return '{' + ', '.join(svexprs) + '}'
return None
def visit_ArrayOpExpr(self, node):
val = self.visit(node.array)
return f'{ir.OPMAP[node.operator]}({val})'
def visit_UnaryOpExpr(self, node):
val = self.visit(node.operand)
if val is None:
return None
res = f'{ir.OPMAP[node.operator]}({val})'
if node.operator in [ir.opc.Invert]:
res = self.cast_svexpr(res, node.dtype, node.operand.dtype)
# f"{node.dtype.width}'({res})"
return res
def visit_BinOpExpr(self, node):
ops = [self.visit(op) for op in node.operands]
op_dtypes = [op.dtype for op in node.operands]
op_sign = [getattr(dtype, 'signed', False) for dtype in op_dtypes]
if node.operator in [
ir.opc.Add, ir.opc.Sub, ir.opc.Mult, ir.opc.BitOr, ir.opc.BitAnd, ir.opc.BitXor
]:
cast_dtype = node.dtype
ops = [self.cast_svexpr(expr, dtype, cast_dtype) for expr, dtype in zip(ops, op_dtypes)]
elif node.operator == ir.opc.LShift:
if op_dtypes[0].width < node.dtype.width:
ops[0] = self.cast_svexpr(ops[0], op_dtypes[0], node.dtype)
elif node.operator in [
ir.opc.Eq, ir.opc.Gt, ir.opc.GtE, ir.opc.Lt, ir.opc.LtE, ir.opc.NotEq, ir.opc.And,
ir.opc.Or
]:
if op_sign[0] and not op_sign[1]:
ops[1] = self.cast_svexpr(ops[1], op_dtypes[1], op_dtypes[0])
elif op_sign[1] and not op_sign[0]:
ops[0] = self.cast_svexpr(ops[0], op_dtypes[0], op_dtypes[1])
res = f'({ops[0]}) {ir.OPMAP[node.operator]} ({ops[1]})'
if node.operator in [ir.opc.RShift]:
res = self.cast_svexpr(res, op_dtypes[0], node.dtype)
return res
def visit_SubscriptExpr(self, node):
val = self.visit(node.val)
if val is None:
return None
if isinstance(node.index, ir.ResExpr):
index = node.index.val
index = node.val.dtype.index_norm(index)[0]
if isinstance(index, slice):
stop = int(index.stop) - 1
start = int(index.start)
if isinstance(node.val, (ir.Name, ir.AttrExpr)):
return f'{val}[{stop}:{start}]'
else:
start = index
stop = start
index = int(index)
if isinstance(node.val, (ir.Name, ir.AttrExpr, ir.Component)):
if typeof(node.val.dtype, (Tuple, Union, Queue)):
return f'{val}{self.separator}{node.val.dtype.fields[index]}'
else:
return f'{val}[{index}]'
if isinstance(node.val, ir.ResExpr):
if typeof(node.val.dtype, Array):
return f'{val}[{index}]'
elif typeof(node.val.dtype, Integral):
return f'{val}[{index}]'
elif typeof(node.val.dtype, (Tuple, Union, Queue)):
return f'{val}{self.separator}{node.val.dtype.fields[index]}'
else:
start = sum(node.val.dtype[i].width for i in range(start))
stop = sum(node.val.dtype[i].width for i in range(stop + 1)) - 1
fname = get_slice_func(self.aux_funcs, start, stop, node.val.dtype.width,
getattr(node.dtype, 'signed', False))
return f'{fname}({val})'
if typeof(node.val.dtype, (Array, Queue, Integer, Tuple, Union)):
ind = self.visit(node.index)
if isinstance(node.val, ir.Name):
return f'{val}[{ind}]'
else:
fname = get_index_func(self.aux_funcs, node.val.dtype.width, node.index.dtype.width,
node.dtype.width, getattr(node.dtype, 'signed', False))
return f'{fname}({val}, {ind})'
breakpoint()
raise Exception('Unsupported slicing')
def visit_ConditionalExpr(self, node):
cond = self.visit(node.cond)
ops = [self.visit(op) for op in node.operands]
return f'(({cond}) ? ({ops[0]}) : ({ops[1]}))'
def _parse_intf(self, node, context=None):
if context is None:
context = getattr(node, 'context', None)
if context:
if context == 'eot':
return f'&{node.name}_s{self.separator}{context}'
return f'{node.name}{self.separator}{context}'
return f'{node.name}_s'
def generic_visit(self, node):
return node
def svexpr(expr, aux_funcs=None):
sv_visit = SVExpressionVisitor(aux_funcs)
return sv_visit.visit(expr)
|
the-stack_106_29063 | #! /usr/bin/env python3
# Read from stdin, spit out C header or body.
import argparse
import copy
import fileinput
import re
from collections import namedtuple
Enumtype = namedtuple('Enumtype', ['name', 'value'])
type2size = {
'pad': 1,
'struct channel_id': 32,
'struct short_channel_id': 8,
'struct ipv6': 16,
'secp256k1_ecdsa_signature': 64,
'struct preimage': 32,
'struct pubkey': 33,
'struct sha256': 32,
'struct bitcoin_blkid': 32,
'struct bitcoin_txid': 32,
'struct secret': 32,
'u64': 8,
'u32': 4,
'u16': 2,
'u8': 1,
'bool': 1
}
# These struct array helpers require a context to allocate from.
varlen_structs = [
'peer_features',
'gossip_getnodes_entry',
'failed_htlc',
'utxo',
'bitcoin_tx',
'wirestring',
]
class FieldType(object):
def __init__(self, name):
self.name = name
def is_assignable(self):
return self.name in ['u8', 'u16', 'u32', 'u64', 'bool'] or self.name.startswith('enum ')
# We only accelerate the u8 case: it's common and trivial.
def has_array_helper(self):
return self.name in ['u8']
# Returns base size
@staticmethod
def _typesize(typename):
if typename in type2size:
return type2size[typename]
elif typename.startswith('struct ') or typename.startswith('enum '):
# We allow unknown structures/enums, for extensibility (can only happen
# if explicitly specified in csv)
return 0
else:
raise ValueError('Unknown typename {}'.format(typename))
# Full (message, fieldname)-mappings
typemap = {
('update_fail_htlc', 'reason'): FieldType('u8'),
('node_announcement', 'alias'): FieldType('u8'),
('update_add_htlc', 'onion_routing_packet'): FieldType('u8'),
('update_fulfill_htlc', 'payment_preimage'): FieldType('struct preimage'),
('error', 'data'): FieldType('u8'),
('shutdown', 'scriptpubkey'): FieldType('u8'),
('node_announcement', 'rgb_color'): FieldType('u8'),
('node_announcement', 'addresses'): FieldType('u8'),
('node_announcement', 'ipv6'): FieldType('struct ipv6'),
('announcement_signatures', 'short_channel_id'): FieldType('struct short_channel_id'),
('channel_announcement', 'short_channel_id'): FieldType('struct short_channel_id'),
('channel_update', 'short_channel_id'): FieldType('struct short_channel_id'),
('revoke_and_ack', 'per_commitment_secret'): FieldType('struct secret'),
('channel_reestablish_option_data_loss_protect', 'your_last_per_commitment_secret'): FieldType('struct secret')
}
# Partial names that map to a datatype
partialtypemap = {
'signature': FieldType('secp256k1_ecdsa_signature'),
'features': FieldType('u8'),
'channel_id': FieldType('struct channel_id'),
'chain_hash': FieldType('struct bitcoin_blkid'),
'funding_txid': FieldType('struct bitcoin_txid'),
'pad': FieldType('pad'),
}
# Size to typename match
sizetypemap = {
33: FieldType('struct pubkey'),
32: FieldType('struct sha256'),
8: FieldType('u64'),
4: FieldType('u32'),
2: FieldType('u16'),
1: FieldType('u8')
}
# It would be nicer if we had put '*u8' in spec and disallowed bare lenvar.
# In practice we only recognize lenvar when it's the previous field.
# size := baresize | arraysize
# baresize := simplesize | lenvar
# simplesize := number | type
# arraysize := length '*' type
# length := lenvar | number
class Field(object):
def __init__(self, message, name, size, comments, prevname):
self.message = message
self.comments = comments
self.name = name
self.is_len_var = False
self.lenvar = None
self.num_elems = 1
self.optional = False
# ? means optional field (not supported for arrays)
if size.startswith('?'):
self.optional = True
size = size[1:]
# If it's an arraysize, swallow prefix.
elif '*' in size:
number = size.split('*')[0]
if number == prevname:
self.lenvar = number
else:
self.num_elems = int(number)
size = size.split('*')[1]
elif options.bolt and size == prevname:
# Raw length field, implies u8.
self.lenvar = size
size = '1'
# Bolts use just a number: Guess type based on size.
if options.bolt:
base_size = int(size)
self.fieldtype = Field._guess_type(message, self.name, base_size)
# There are some arrays which we have to guess, based on sizes.
tsize = FieldType._typesize(self.fieldtype.name)
if base_size % tsize != 0:
raise ValueError('Invalid size {} for {}.{} not a multiple of {}'
.format(base_size,
self.message,
self.name,
tsize))
self.num_elems = int(base_size / tsize)
else:
# Real typename.
self.fieldtype = FieldType(size)
def basetype(self):
base = self.fieldtype.name
if base.startswith('struct '):
base = base[7:]
elif base.startswith('enum '):
base = base[5:]
return base
def is_padding(self):
return self.name.startswith('pad')
# Padding is always treated as an array.
def is_array(self):
return self.num_elems > 1 or self.is_padding()
def is_variable_size(self):
return self.lenvar is not None
def needs_ptr_to_ptr(self):
return self.is_variable_size() or self.optional
def is_assignable(self):
if self.is_array() or self.needs_ptr_to_ptr():
return False
return self.fieldtype.is_assignable()
def has_array_helper(self):
return self.fieldtype.has_array_helper()
# Returns FieldType
@staticmethod
def _guess_type(message, fieldname, base_size):
# Check for full (message, fieldname)-matches
if (message, fieldname) in typemap:
return typemap[(message, fieldname)]
# Check for partial field names
for k, v in partialtypemap.items():
if k in fieldname:
return v
# Check for size matches
if base_size in sizetypemap:
return sizetypemap[base_size]
raise ValueError('Unknown size {} for {}'.format(base_size, fieldname))
fromwire_impl_templ = """bool fromwire_{name}({ctx}const void *p{args})
{{
{fields}
\tconst u8 *cursor = p;
\tsize_t plen = tal_count(p);
\tif (fromwire_u16(&cursor, &plen) != {enum.name})
\t\treturn false;
{subcalls}
\treturn cursor != NULL;
}}
"""
fromwire_header_templ = """bool fromwire_{name}({ctx}const void *p{args});
"""
towire_header_templ = """u8 *towire_{name}(const tal_t *ctx{args});
"""
towire_impl_templ = """u8 *towire_{name}(const tal_t *ctx{args})
{{
{field_decls}
\tu8 *p = tal_arr(ctx, u8, 0);
\ttowire_u16(&p, {enumname});
{subcalls}
\treturn memcheck(p, tal_count(p));
}}
"""
printwire_header_templ = """void printwire_{name}(const char *fieldname, const u8 *cursor);
"""
printwire_impl_templ = """void printwire_{name}(const char *fieldname, const u8 *cursor)
{{
\tsize_t plen = tal_count(cursor);
\tif (fromwire_u16(&cursor, &plen) != {enum.name}) {{
\t\tprintf("WRONG TYPE?!\\n");
\t\treturn;
\t}}
{subcalls}
\tif (plen != 0)
\t\tprintf("EXTRA: %s\\n", tal_hexstr(NULL, cursor, plen));
}}
"""
class CCode(object):
"""Simple class to create indented C code"""
def __init__(self):
self.indent = 1
self.single_indent = False
self.code = []
def append(self, lines):
for line in lines.split('\n'):
# Let us to the indenting please!
assert '\t' not in line
# Special case: } by itself is pre-unindented.
if line == '}':
self.indent -= 1
self.code.append("\t" * self.indent + line)
continue
self.code.append("\t" * self.indent + line)
if self.single_indent:
self.indent -= 1
self.single_indent = False
if line.endswith('{'):
self.indent += 1
elif line.endswith('}'):
self.indent -= 1
elif line.startswith('for') or line.startswith('if'):
self.indent += 1
self.single_indent = True
def __str__(self):
assert self.indent == 1
assert not self.single_indent
return '\n'.join(self.code)
class Message(object):
def __init__(self, name, enum, comments):
self.name = name
self.enum = enum
self.comments = comments
self.fields = []
self.has_variable_fields = False
def checkLenField(self, field):
# Optional fields don't have a len.
if field.optional:
return
for f in self.fields:
if f.name == field.lenvar:
if f.fieldtype.name != 'u16':
raise ValueError('Field {} has non-u16 length variable {} (type {})'
.format(field.name, field.lenvar, f.fieldtype.name))
if f.is_array() or f.needs_ptr_to_ptr():
raise ValueError('Field {} has non-simple length variable {}'
.format(field.name, field.lenvar))
f.is_len_var = True
f.lenvar_for = field
return
raise ValueError('Field {} unknown length variable {}'
.format(field.name, field.lenvar))
def addField(self, field):
# We assume field lengths are 16 bit, to avoid overflow issues and
# massive allocations.
if field.is_variable_size():
self.checkLenField(field)
self.has_variable_fields = True
elif field.basetype() in varlen_structs or field.optional:
self.has_variable_fields = True
self.fields.append(field)
def print_fromwire_array(self, subcalls, basetype, f, name, num_elems):
if f.has_array_helper():
subcalls.append('fromwire_{}_array(&cursor, &plen, {}, {});'
.format(basetype, name, num_elems))
else:
subcalls.append('for (size_t i = 0; i < {}; i++)'
.format(num_elems))
if f.fieldtype.is_assignable():
subcalls.append('({})[i] = fromwire_{}(&cursor, &plen);'
.format(name, basetype))
elif basetype in varlen_structs:
subcalls.append('({})[i] = fromwire_{}(ctx, &cursor, &plen);'
.format(name, basetype))
else:
subcalls.append('fromwire_{}(&cursor, &plen, {} + i);'
.format(basetype, name))
def print_fromwire(self, is_header):
ctx_arg = 'const tal_t *ctx, ' if self.has_variable_fields else ''
args = []
for f in self.fields:
if f.is_len_var or f.is_padding():
continue
elif f.is_array():
args.append(', {} {}[{}]'.format(f.fieldtype.name, f.name, f.num_elems))
else:
ptrs = '*'
# If we're handing a variable array, we need a ptr-to-ptr.
if f.needs_ptr_to_ptr():
ptrs += '*'
# If each type is a variable length, we need a ptr to that.
if f.basetype() in varlen_structs:
ptrs += '*'
args.append(', {} {}{}'.format(f.fieldtype.name, ptrs, f.name))
template = fromwire_header_templ if is_header else fromwire_impl_templ
fields = ['\t{} {};\n'.format(f.fieldtype.name, f.name) for f in self.fields if f.is_len_var]
subcalls = CCode()
for f in self.fields:
basetype = f.basetype()
for c in f.comments:
subcalls.append('/*{} */'.format(c))
if f.is_padding():
subcalls.append('fromwire_pad(&cursor, &plen, {});'
.format(f.num_elems))
elif f.is_array():
self.print_fromwire_array(subcalls, basetype, f, f.name,
f.num_elems)
elif f.is_variable_size():
subcalls.append("//2nd case {name}".format(name=f.name))
typename = f.fieldtype.name
# If structs are varlen, need array of ptrs to them.
if basetype in varlen_structs:
typename += ' *'
subcalls.append('*{} = {} ? tal_arr(ctx, {}, {}) : NULL;'
.format(f.name, f.lenvar, typename, f.lenvar))
self.print_fromwire_array(subcalls, basetype, f, '*' + f.name,
f.lenvar)
else:
if f.optional:
subcalls.append("if (!fromwire_bool(&cursor, &plen))\n"
"*{} = NULL;\n"
"else {{\n"
"*{} = tal(ctx, {});\n"
"fromwire_{}(&cursor, &plen, *{});\n"
"}}"
.format(f.name, f.name, f.fieldtype.name,
basetype, f.name))
elif f.is_assignable():
subcalls.append("//3rd case {name}".format(name=f.name))
if f.is_len_var:
subcalls.append('{} = fromwire_{}(&cursor, &plen);'
.format(f.name, basetype))
else:
subcalls.append('*{} = fromwire_{}(&cursor, &plen);'
.format(f.name, basetype))
elif basetype in varlen_structs:
subcalls.append('*{} = fromwire_{}(ctx, &cursor, &plen);'
.format(f.name, basetype))
else:
subcalls.append('fromwire_{}(&cursor, &plen, {});'
.format(basetype, f.name))
return template.format(
name=self.name,
ctx=ctx_arg,
args=''.join(args),
fields=''.join(fields),
enum=self.enum,
subcalls=str(subcalls)
)
def print_towire_array(self, subcalls, basetype, f, num_elems):
if f.has_array_helper():
subcalls.append('towire_{}_array(&p, {}, {});'
.format(basetype, f.name, num_elems))
else:
subcalls.append('for (size_t i = 0; i < {}; i++)'
.format(num_elems))
if f.fieldtype.is_assignable() or basetype in varlen_structs:
subcalls.append('towire_{}(&p, {}[i]);'
.format(basetype, f.name))
else:
subcalls.append('towire_{}(&p, {} + i);'
.format(basetype, f.name))
def print_towire(self, is_header):
template = towire_header_templ if is_header else towire_impl_templ
args = []
for f in self.fields:
if f.is_padding() or f.is_len_var:
continue
if f.is_array():
args.append(', const {} {}[{}]'.format(f.fieldtype.name, f.name, f.num_elems))
elif f.is_assignable():
args.append(', {} {}'.format(f.fieldtype.name, f.name))
elif f.is_variable_size() and f.basetype() in varlen_structs:
args.append(', const {} **{}'.format(f.fieldtype.name, f.name))
else:
args.append(', const {} *{}'.format(f.fieldtype.name, f.name))
field_decls = []
for f in self.fields:
if f.is_len_var:
field_decls.append('\t{0} {1} = tal_count({2});'.format(
f.fieldtype.name, f.name, f.lenvar_for.name
))
subcalls = CCode()
for f in self.fields:
basetype = f.fieldtype.name
if basetype.startswith('struct '):
basetype = basetype[7:]
elif basetype.startswith('enum '):
basetype = basetype[5:]
for c in f.comments:
subcalls.append('/*{} */'.format(c))
if f.is_padding():
subcalls.append('towire_pad(&p, {});'
.format(f.num_elems))
elif f.is_array():
self.print_towire_array(subcalls, basetype, f, f.num_elems)
elif f.is_variable_size():
self.print_towire_array(subcalls, basetype, f, f.lenvar)
else:
if f.optional:
subcalls.append("if (!{})\n"
"towire_bool(&p, false);\n"
"else {{\n"
"towire_bool(&p, true);\n"
"towire_{}(&p, {});\n"
"}}".format(f.name, basetype, f.name))
else:
subcalls.append('towire_{}(&p, {});'
.format(basetype, f.name))
return template.format(
name=self.name,
args=''.join(args),
enumname=self.enum.name,
field_decls='\n'.join(field_decls),
subcalls=str(subcalls),
)
def add_truncate_check(self, subcalls):
# Report if truncated, otherwise print.
subcalls.append('if (!cursor) {\n'
'printf("**TRUNCATED**\\n");\n'
'return;\n'
'}')
def print_printwire_array(self, subcalls, basetype, f, num_elems):
if f.has_array_helper():
subcalls.append('printwire_{}_array(tal_fmt(NULL, "%s.{}", fieldname), &cursor, &plen, {});'
.format(basetype, f.name, num_elems))
else:
subcalls.append('printf("[");')
subcalls.append('for (size_t i = 0; i < {}; i++) {{'
.format(num_elems))
subcalls.append('{} v;'.format(f.fieldtype.name))
if f.fieldtype.is_assignable():
subcalls.append('v = fromwire_{}(&cursor, plen);'
.format(f.fieldtype.name, basetype))
else:
# We don't handle this yet!
assert(basetype not in varlen_structs)
subcalls.append('fromwire_{}(&cursor, &plen, &v);'
.format(basetype))
self.add_truncate_check(subcalls)
subcalls.append('printwire_{}(tal_fmt(NULL, "%s.{}", fieldname), &v);'
.format(basetype, f.name))
subcalls.append('}')
subcalls.append('printf("]");')
def print_printwire(self, is_header):
template = printwire_header_templ if is_header else printwire_impl_templ
fields = ['\t{} {};\n'.format(f.fieldtype.name, f.name) for f in self.fields if f.is_len_var]
subcalls = CCode()
for f in self.fields:
basetype = f.basetype()
for c in f.comments:
subcalls.append('/*{} */'.format(c))
if f.is_len_var:
subcalls.append('{} {} = fromwire_{}(&cursor, &plen);'
.format(f.fieldtype.name, f.name, basetype))
self.add_truncate_check(subcalls)
continue
subcalls.append('printf("{}=");'.format(f.name))
if f.is_padding():
subcalls.append('printwire_pad(tal_fmt(NULL, "%s.{}", fieldname), &cursor, &plen, {});'
.format(f.name, f.num_elems))
self.add_truncate_check(subcalls)
elif f.is_array():
self.print_printwire_array(subcalls, basetype, f, f.num_elems)
self.add_truncate_check(subcalls)
elif f.is_variable_size():
self.print_printwire_array(subcalls, basetype, f, f.lenvar)
self.add_truncate_check(subcalls)
else:
if f.optional:
subcalls.append("if (fromwire_bool(&cursor, &plen)) {")
if f.is_assignable():
subcalls.append('{} {} = fromwire_{}(&cursor, &plen);'
.format(f.fieldtype.name, f.name, basetype))
else:
# Don't handle these yet.
assert(basetype not in varlen_structs)
subcalls.append('{} {};'.
format(f.fieldtype.name, f.name))
subcalls.append('fromwire_{}(&cursor, &plen, &{});'
.format(basetype, f.name))
self.add_truncate_check(subcalls)
subcalls.append('printwire_{}(tal_fmt(NULL, "%s.{}", fieldname), &{});'
.format(basetype, f.name, f.name))
if f.optional:
subcalls.append("} else {")
self.add_truncate_check(subcalls)
subcalls.append("}")
return template.format(
name=self.name,
fields=''.join(fields),
enum=self.enum,
subcalls=str(subcalls)
)
def find_message(messages, name):
for m in messages:
if m.name == name:
return m
return None
def find_message_with_option(messages, optional_messages, name, option):
fullname = name + "_" + option.replace('-', '_')
base = find_message(messages, name)
if not base:
raise ValueError('Unknown message {}'.format(name))
m = find_message(optional_messages, fullname)
if not m:
# Add a new option.
m = copy.deepcopy(base)
m.name = fullname
optional_messages.append(m)
return m
parser = argparse.ArgumentParser(description='Generate C from CSV')
parser.add_argument('--header', action='store_true', help="Create wire header")
parser.add_argument('--bolt', action='store_true', help="Generate wire-format for BOLT")
parser.add_argument('--printwire', action='store_true', help="Create print routines")
parser.add_argument('headerfilename', help='The filename of the header')
parser.add_argument('enumname', help='The name of the enum to produce')
parser.add_argument('files', nargs='*', help='Files to read in (or stdin)')
options = parser.parse_args()
# Maps message names to messages
messages = []
messages_with_option = []
comments = []
includes = []
prevfield = None
# Read csv lines. Single comma is the message values, more is offset/len.
for line in fileinput.input(options.files):
# #include gets inserted into header
if line.startswith('#include '):
includes.append(line)
continue
by_comments = line.rstrip().split('#')
# Emit a comment if they included one
if by_comments[1:]:
comments.append(' '.join(by_comments[1:]))
parts = by_comments[0].split(',')
if parts == ['']:
continue
if len(parts) == 2:
# eg commit_sig,132
messages.append(Message(parts[0], Enumtype("WIRE_" + parts[0].upper(), parts[1]), comments))
comments = []
prevfield = None
else:
if len(parts) == 4:
# eg commit_sig,0,channel-id,8 OR
# commit_sig,0,channel-id,u64
m = find_message(messages, parts[0])
if m is None:
raise ValueError('Unknown message {}'.format(parts[0]))
elif len(parts) == 5:
# eg.
# channel_reestablish,48,your_last_per_commitment_secret,32,option209
m = find_message_with_option(messages, messages_with_option, parts[0], parts[4])
else:
raise ValueError('Line {} malformed'.format(line.rstrip()))
f = Field(m.name, parts[2], parts[3], comments, prevfield)
m.addField(f)
# If it used prevfield as lenvar, keep that for next
# time (multiple fields can use the same lenvar).
if not f.lenvar:
prevfield = parts[2]
comments = []
header_template = """/* This file was generated by generate-wire.py */
/* Do not modify this file! Modify the _csv file it was generated from. */
#ifndef LIGHTNING_{idem}
#define LIGHTNING_{idem}
#include <ccan/tal/tal.h>
#include <wire/wire.h>
{includes}
enum {enumname} {{
{enums}}};
const char *{enumname}_name(int e);
{func_decls}
#endif /* LIGHTNING_{idem} */
"""
impl_template = """/* This file was generated by generate-wire.py */
/* Do not modify this file! Modify the _csv file it was generated from. */
#include <{headerfilename}>
#include <ccan/mem/mem.h>
#include <ccan/tal/str/str.h>
#include <stdio.h>
const char *{enumname}_name(int e)
{{
\tstatic char invalidbuf[sizeof("INVALID ") + STR_MAX_CHARS(e)];
\tswitch ((enum {enumname})e) {{
\t{cases}
\t}}
\tsnprintf(invalidbuf, sizeof(invalidbuf), "INVALID %i", e);
\treturn invalidbuf;
}}
{func_decls}
"""
print_header_template = """/* This file was generated by generate-wire.py */
/* Do not modify this file! Modify the _csv file it was generated from. */
#ifndef LIGHTNING_{idem}
#define LIGHTNING_{idem}
#include <ccan/tal/tal.h>
#include <devtools/print_wire.h>
{includes}
void print{enumname}_message(const u8 *msg);
{func_decls}
#endif /* LIGHTNING_{idem} */
"""
print_template = """/* This file was generated by generate-wire.py */
/* Do not modify this file! Modify the _csv file it was generated from. */
#include "{headerfilename}"
#include <ccan/mem/mem.h>
#include <ccan/tal/str/str.h>
#include <common/utils.h>
#include <stdio.h>
void print{enumname}_message(const u8 *msg)
{{
\tswitch ((enum {enumname})fromwire_peektype(msg)) {{
\t{printcases}
\t}}
\tprintf("UNKNOWN: %s\\n", tal_hex(msg, msg));
}}
{func_decls}
"""
idem = re.sub(r'[^A-Z]+', '_', options.headerfilename.upper())
if options.printwire:
if options.header:
template = print_header_template
else:
template = print_template
elif options.header:
template = header_template
else:
template = impl_template
# Dump out enum, sorted by value order.
enums = ""
for m in messages:
for c in m.comments:
enums += '\t/*{} */\n'.format(c)
enums += '\t{} = {},\n'.format(m.enum.name, m.enum.value)
includes = '\n'.join(includes)
cases = ['case {enum.name}: return "{enum.name}";'.format(enum=m.enum) for m in messages]
printcases = ['case {enum.name}: printf("{enum.name}:\\n"); printwire_{name}("{name}", msg); return;'.format(enum=m.enum, name=m.name) for m in messages]
if options.printwire:
decls = [m.print_printwire(options.header) for m in messages + messages_with_option]
else:
fromwire_decls = [m.print_fromwire(options.header) for m in messages + messages_with_option]
towire_decls = towire_decls = [m.print_towire(options.header) for m in messages + messages_with_option]
decls = fromwire_decls + towire_decls
print(template.format(
headerfilename=options.headerfilename,
cases='\n\t'.join(cases),
printcases='\n\t'.join(printcases),
idem=idem,
includes=includes,
enumname=options.enumname,
enums=enums,
func_decls='\n'.join(decls)))
|
the-stack_106_29064 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from argparse import ArgumentParser
from common import platform_util
class BaseBenchmarkUtil(object):
"""Base benchmark util class"""
MODEL_INITIALIZER = "model_init"
def __init__(self):
self._common_arg_parser = None
self._platform_util = platform_util.PlatformUtil()
def define_args(self):
"""define args for the benchmark interface shared by FP32 and int8
models"""
self._common_arg_parser = ArgumentParser(
add_help=False, description="Parse args for base benchmark "
"interface")
self._common_arg_parser.add_argument(
"-f", "--framework",
help="Specify the name of the deep learning framework to use.",
dest="framework", default=None, required=True)
self._common_arg_parser.add_argument(
"-r", "--model-source-dir",
help="Specify the models source directory from your local machine",
nargs="?", dest="model_source_dir")
self._common_arg_parser.add_argument(
"-p", "--precision",
help="Specify the model precision to use: fp32, int8, or bfloat16",
required=True, choices=["fp32", "int8", "bfloat16"],
dest="precision")
self._common_arg_parser.add_argument(
"-mo", "--mode", help="Specify the type training or inference ",
required=True, choices=["training", "inference"], dest="mode")
self._common_arg_parser.add_argument(
"-m", "--model-name", required=True,
help="model name to run benchmarks for", dest="model_name")
self._common_arg_parser.add_argument(
"-b", "--batch-size",
help="Specify the batch size. If this parameter is not specified "
"or is -1, the largest ideal batch size for the model will "
"be used",
dest="batch_size", type=int, default=-1)
self._common_arg_parser.add_argument(
"-d", "--data-location",
help="Specify the location of the data. If this parameter is not "
"specified, the benchmark will use random/dummy data.",
dest="data_location", default=None)
self._common_arg_parser.add_argument(
"-i", "--socket-id",
help="Specify which socket to use. Only one socket will be used "
"when this value is set. If used in conjunction with "
"--num-cores, all cores will be allocated on the single "
"socket.",
dest="socket_id", type=int, default=-1)
self._common_arg_parser.add_argument(
"-n", "--num-cores",
help="Specify the number of cores to use. If the parameter is not"
" specified or is -1, all cores will be used.",
dest="num_cores", type=int, default=-1)
self._common_arg_parser.add_argument(
"-a", "--num-intra-threads", type=int,
help="Specify the number of threads within the layer",
dest="num_intra_threads", default=None)
self._common_arg_parser.add_argument(
"-e", "--num-inter-threads", type=int,
help="Specify the number threads between layers",
dest="num_inter_threads", default=None)
self._common_arg_parser.add_argument(
"-c", "--checkpoint",
help="Specify the location of trained model checkpoint directory. "
"If mode=training model/weights will be written to this "
"location. If mode=inference assumes that the location points"
" to a model that has already been trained.",
dest="checkpoint", default=None)
self._common_arg_parser.add_argument(
"-g", "--in-graph", help="Full path to the input graph ",
dest="input_graph", default=None)
self._common_arg_parser.add_argument(
"-k", "--benchmark-only",
help="For benchmark measurement only. If neither --benchmark-only "
"or --accuracy-only are specified, it will default to run "
"benchmarking.",
dest="benchmark_only", action="store_true")
self._common_arg_parser.add_argument(
"--accuracy-only",
help="For accuracy measurement only. If neither --benchmark-only "
"or --accuracy-only are specified, it will default to run "
"benchmarking.",
dest="accuracy_only", action="store_true")
self._common_arg_parser.add_argument(
"--output-results",
help="Writes inference output to a file, when used in conjunction "
"with --accuracy-only and --mode=inference.",
dest="output_results", action="store_true")
self._common_arg_parser.add_argument(
"-v", "--verbose", help="Print verbose information.",
dest="verbose", action="store_true")
self._common_arg_parser.add_argument(
"--output-dir", help="Folder to dump output into.",
default="/models/benchmarks/common/tensorflow/logs")
# Allow for additional command line args after --
self._common_arg_parser.add_argument(
"model_args", nargs="*",
help="Additional command line arguments (prefix flag start with"
" '--').")
def check_for_link(self, arg_name, path):
"""
Throws an error if the specified path is a link. os.islink returns
True for sym links. For files, we also look at the number of links in
os.stat() to determine if it's a hard link.
"""
if os.path.islink(path) or \
(os.path.isfile(path) and os.stat(path).st_nlink > 1):
raise ValueError("The {} cannot be a link.".format(arg_name))
def validate_args(self, args):
"""validate the args """
# check model source directory exists
model_source_dir = args.model_source_dir
if model_source_dir is not None:
if not os.path.exists(model_source_dir) or \
not os.path.isdir(model_source_dir):
raise IOError("The model source directory {} "
"does not exist or is not a directory.".
format(model_source_dir))
self.check_for_link("model source directory", model_source_dir)
# check checkpoint location
checkpoint_dir = args.checkpoint
if checkpoint_dir is not None:
if not os.path.exists(checkpoint_dir):
raise IOError("The checkpoint location {} does not exist.".
format(checkpoint_dir))
elif not os.path.isdir(checkpoint_dir):
raise IOError("The checkpoint location {} is not a directory.".
format(checkpoint_dir))
self.check_for_link("checkpoint directory", checkpoint_dir)
# check if input graph file exists
input_graph = args.input_graph
if input_graph is not None:
if not os.path.exists(input_graph):
raise IOError("The input graph {} does not exist.".
format(input_graph))
if not os.path.isfile(input_graph):
raise IOError("The input graph {} must be a file.".
format(input_graph))
self.check_for_link("input graph", input_graph)
# check model_name exists
if not args.model_name:
raise ValueError("The model name is not valid")
# check batch size
batch_size = args.batch_size
if batch_size == 0 or batch_size < -1:
raise ValueError("The batch size {} is not valid.".format(
batch_size))
# check data location exist
data_dir = args.data_location
if data_dir is not None:
if not os.path.exists(data_dir):
raise IOError("The data location {} does not exist.".format(
data_dir))
self.check_for_link("data location", data_dir)
# check if socket id is in socket number range
num_sockets = self._platform_util.num_cpu_sockets()
if args.socket_id != -1 and \
(args.socket_id >= num_sockets or args.socket_id < -1):
raise ValueError("Socket id must be within socket number range: "
"[0, {}].".format(num_sockets - 1))
# check number of cores
num_logical_cores_per_socket = \
self._platform_util.num_cores_per_socket() * \
self._platform_util.num_threads_per_core()
# if a socket_id is specified, only count cores from one socket
system_num_cores = num_logical_cores_per_socket if \
args.socket_id != -1 else num_logical_cores_per_socket * \
self._platform_util.num_cpu_sockets()
num_cores = args.num_cores
if (num_cores <= 0) and (num_cores != -1):
raise ValueError(
"Core number must be greater than 0 or -1. The default value "
"is -1 which means using all the cores in the sockets")
elif num_cores > system_num_cores:
raise ValueError("Number of cores exceeds system core number: {}".
format(system_num_cores))
# check no.of intra threads > 0
num_intra_threads = args.num_intra_threads
if num_intra_threads and num_intra_threads <= 0:
raise ValueError("Number of intra threads "
"value should be greater than 0")
# check no.of inter threads > 0
num_inter_threads = args.num_inter_threads
if num_inter_threads and num_inter_threads <= 0:
raise ValueError("Number of inter threads "
"value should be greater than 0")
if args.output_results and (args.mode != "inference" or not args.accuracy_only):
raise ValueError("--output-results can only be used when running "
"with --mode=inference and --accuracy-only")
elif args.output_results and (args.model_name != "resnet50" or args.precision != "fp32"):
raise ValueError("--output-results is currently only supported for resnet50 FP32 inference.")
def initialize_model(self, args, unknown_args):
"""Create model initializer for the specified model"""
model_initializer = None
model_init_file = None
if args.model_name: # not empty
current_path = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
# find the path to the model_init.py file
filename = "{}.py".format(self.MODEL_INITIALIZER)
model_init_file = os.path.join(current_path, args.use_case,
args.framework, args.model_name,
args.mode, args.precision,
filename)
package = ".".join([args.use_case, args.framework,
args.model_name, args.mode, args.precision])
model_init_module = __import__(
package + "." + self.MODEL_INITIALIZER, fromlist=["*"])
model_initializer = model_init_module.ModelInitializer(
args, unknown_args, self._platform_util)
if model_initializer is None:
raise ImportError("Unable to locate {}.".format(model_init_file))
return model_initializer
|
the-stack_106_29065 | import os
from aws_cdk import core as cdk
from infra.features_ingestion_stack import FeatureIngestionStack
project_name = os.getenv("SAGEMAKER_PROJECT_NAME")
project_id = os.getenv("SAGEMAKER_PROJECT_ID")
app = cdk.App()
bucket_name = os.getenv("PROJECT_BUCKET")
region = os.getenv("AWS_REGION")
synth = cdk.DefaultStackSynthesizer(
file_assets_bucket_name=bucket_name, bucket_prefix="feature_ingestion/"
)
fs_stack = FeatureIngestionStack(
app,
f"{project_name}-FeatureStore",
configuration_path="configurations",
synthesizer=synth,
)
cdk.Tags.of(fs_stack).add(key="sagemaker:project-id", value=project_id)
cdk.Tags.of(fs_stack).add(key="sagemaker:project-name", value=project_name)
app.synth()
|
the-stack_106_29067 | import numpy as np
import pytest
import ezaero.vlm.steady as vlm_steady
INFINITE_WING = {
'wing': vlm_steady.WingParams(cr=1.0, ct=1.0, bp=10000, theta=0.0,
delta=0.0),
'mesh': vlm_steady.MeshParams(m=2, n=400)
}
@pytest.mark.parametrize('alpha', np.array((-2, -1, 0, 1, 2, 5)) * np.pi / 180)
def test_cl_for_infinite_wing(alpha):
flcond = vlm_steady.FlightConditions(ui=50.0, alpha=alpha, rho=1.0)
sim = vlm_steady.run_simulation(
wing=INFINITE_WING['wing'],
mesh=INFINITE_WING['mesh'],
flcond=flcond
)
cl = sim['cl_wing']
assert cl == pytest.approx(2 * np.pi * alpha, rel=5e-3)
def test_cl_slope_for_infinite_wing():
cls = []
alphas = [1 * np.pi / 180, 2 * np.pi / 180]
for alpha in alphas:
flcond = vlm_steady.FlightConditions(ui=50.0, alpha=alpha, rho=1.0)
sim = vlm_steady.run_simulation(
wing=INFINITE_WING['wing'],
mesh=INFINITE_WING['mesh'],
flcond=flcond
)
cls.append(sim['cl_wing'])
slope = (cls[1] - cls[0]) / (alphas[1] - alphas[0])
assert slope == pytest.approx(2 * np.pi, abs=1e-2)
|
the-stack_106_29068 | # ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""Provide a layer of abstraction for the ngraph++ runtime environment."""
import logging
from typing import List, Union
import numpy as np
from ngraph.impl import Function, Node, Shape, serialize, util
from ngraph.impl.runtime import Backend, Tensor
from ngraph.utils.types import get_dtype, NumericData
from ngraph.exceptions import UserInputError
log = logging.getLogger(__name__)
def runtime(backend_name='CPU'): # type: (str) -> 'Runtime'
"""Create a Runtime object (helper factory).
Use signature to parameterize runtime as needed.
"""
return Runtime(backend_name)
class Runtime:
"""Represents the ngraph++ runtime environment."""
def __init__(self, backend_name): # type: (str) -> None
self.backend_name = backend_name
self.backend = Backend.create(backend_name)
def __repr__(self): # type: () -> str
return '<Runtime: Backend=\'{}\'>'.format(self.backend_name)
def computation(self, node_or_function, *inputs):
# type: (Union[Node, Function], *Node) -> 'Computation'
"""Return a callable Computation object."""
if isinstance(node_or_function, Node):
ng_function = Function(node_or_function, inputs, node_or_function.name)
return Computation(self, ng_function)
elif isinstance(node_or_function, Function):
return Computation(self, node_or_function)
else:
raise TypeError('Runtime.computation must be called with an nGraph Function object '
'or an nGraph node object an optionally Parameter node objects. '
'Called with: %s', node_or_function)
class Computation(object):
"""ngraph callable computation object."""
def __init__(self, runtime, ng_function):
# type: (Runtime, Function) -> None
self.runtime = runtime
self.function = ng_function
self.parameters = ng_function.get_parameters()
self.tensor_views = [] # type: List[Tensor]
for parameter in self.parameters:
shape = parameter.get_shape()
element_type = parameter.get_element_type()
self.tensor_views.append(runtime.backend.create_tensor(element_type, shape))
def __repr__(self): # type: () -> str
params_string = ', '.join([param.name for param in self.parameters])
return '<Computation: {}({})>'.format(self.function.get_name(), params_string)
def __call__(self, *input_values): # type: (*NumericData) -> NumericData
"""Run computation on input values and return result."""
for tensor_view, value in zip(self.tensor_views, input_values):
if not isinstance(value, np.ndarray):
value = np.array(value)
Computation._write_ndarray_to_tensor_view(value, tensor_view)
result_element_type = self.function.get_output_element_type(0)
result_shape = self.function.get_output_shape(0)
result_dtype = get_dtype(result_element_type)
result_view = self.runtime.backend.create_tensor(result_element_type, result_shape)
result_arr = np.empty(result_shape, dtype=result_dtype)
self.runtime.backend.call(self.function, [result_view], self.tensor_views)
Computation._read_tensor_view_to_ndarray(result_view, result_arr)
result_arr = result_arr.reshape(result_shape)
return result_arr
def serialize(self, indent=0): # type: (int) -> str
"""Serialize function (compute graph) to a JSON string.
:param indent: set indent of serialized output
:return: serialized model
"""
return serialize(self.function, indent)
@staticmethod
def _get_buffer_size(element_type, element_count): # type: (Tensor, int) -> int
return int((element_type.bitwidth / 8.0) * element_count)
@staticmethod
def _write_ndarray_to_tensor_view(value, tensor_view):
# type: (np.ndarray, Tensor) -> None
tensor_view_dtype = get_dtype(tensor_view.element_type)
if list(tensor_view.shape) != list(value.shape) and len(value.shape) > 0:
raise UserInputError('Provided tensor\'s shape: %s does not match the expected: %s.',
list(value.shape), list(tensor_view.shape))
if value.dtype != tensor_view_dtype:
log.warning(
'Attempting to write a %s value to a %s tensor. Will attempt type conversion.',
value.dtype,
tensor_view.element_type)
value = value.astype(tensor_view_dtype)
buffer_size = Computation._get_buffer_size(
tensor_view.element_type, tensor_view.element_count)
nparray = np.ascontiguousarray(value)
tensor_view.write(util.numpy_to_c(nparray), 0, buffer_size)
@staticmethod
def _read_tensor_view_to_ndarray(tensor_view, output):
# type: (Tensor, np.ndarray) -> None
buffer_size = Computation._get_buffer_size(
tensor_view.element_type, tensor_view.element_count)
tensor_view.read(util.numpy_to_c(output), 0, buffer_size)
|
the-stack_106_29071 | import io
import os
import textwrap
import warnings
import unittest
import pathlib
from typing import Type, Iterable
from collections.abc import Mapping
from collections import UserDict
from compat import support, os_helper
from backports import configparser
def nice_literals(str):
"Remove b and u prefixes from reprs"
return str.replace("b'", "'").replace("u'", "'")
class SortedDict(UserDict):
def items(self):
return sorted(self.data.items())
def keys(self):
return sorted(self.data.keys())
def values(self):
return [i[1] for i in self.items()]
def iteritems(self):
return iter(self.items())
def iterkeys(self):
return iter(self.keys())
def itervalues(self):
return iter(self.values())
__iter__ = iterkeys
class CfgParserTestCaseClass(object):
allow_no_value = False
delimiters = ('=', ':') # type: Iterable[str]
comment_prefixes = (';', '#') # type: Iterable[str]
inline_comment_prefixes = (';', '#') # type: Iterable[str]
empty_lines_in_values = True
dict_type = configparser._default_dict # type: Type[Mapping]
strict = False
default_section = configparser.DEFAULTSECT
interpolation = configparser._UNSET
def newconfig(self, defaults=None):
arguments = dict(
defaults=defaults,
allow_no_value=self.allow_no_value,
delimiters=self.delimiters,
comment_prefixes=self.comment_prefixes,
inline_comment_prefixes=self.inline_comment_prefixes,
empty_lines_in_values=self.empty_lines_in_values,
dict_type=self.dict_type,
strict=self.strict,
default_section=self.default_section,
interpolation=self.interpolation,
)
instance = self.config_class(**arguments)
return instance
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
cf.read_string(string)
return cf
class BasicTestCase(CfgParserTestCaseClass):
def basic_test(self, cf):
E = [
'Commented Bar',
'Foo Bar',
'Internationalized Stuff',
'Long Line',
'Section\\with$weird%characters[\t',
'Spaces',
'Spacey Bar',
'Spacey Bar From The Beginning',
'Types',
'This One Has A ] In It',
]
if self.allow_no_value:
E.append('NoValue')
E.sort()
F = [('baz', 'qwe'), ('foo', 'bar3')]
# API access
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, E)
L = cf.items('Spacey Bar From The Beginning')
L.sort()
eq(L, F)
# mapping access
L = [section for section in cf]
L.sort()
E.append(self.default_section)
E.sort()
eq(L, E)
L = cf['Spacey Bar From The Beginning'].items()
L = sorted(list(L))
eq(L, F)
L = cf.items()
L = sorted(list(L))
self.assertEqual(len(L), len(E))
for name, section in L:
eq(name, section.name)
eq(cf.defaults(), cf[self.default_section])
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
# API access
eq(cf.get('Foo Bar', 'foo'), 'bar1')
eq(cf.get('Spacey Bar', 'foo'), 'bar2')
eq(cf.get('Spacey Bar From The Beginning', 'foo'), 'bar3')
eq(cf.get('Spacey Bar From The Beginning', 'baz'), 'qwe')
eq(cf.get('Commented Bar', 'foo'), 'bar4')
eq(cf.get('Commented Bar', 'baz'), 'qwe')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
eq(cf.getint('Types', 'int'), 42)
eq(cf.get('Types', 'int'), "42")
self.assertAlmostEqual(cf.getfloat('Types', 'float'), 0.44)
eq(cf.get('Types', 'float'), "0.44")
eq(cf.getboolean('Types', 'boolean'), False)
eq(cf.get('Types', '123'), 'strange but acceptable')
eq(cf.get('This One Has A ] In It', 'forks'), 'spoons')
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value'), None)
# test vars= and fallback=
eq(cf.get('Foo Bar', 'foo', fallback='baz'), 'bar1')
eq(cf.get('Foo Bar', 'foo', vars={'foo': 'baz'}), 'baz')
with self.assertRaises(configparser.NoSectionError):
cf.get('No Such Foo Bar', 'foo')
with self.assertRaises(configparser.NoOptionError):
cf.get('Foo Bar', 'no-such-foo')
eq(cf.get('No Such Foo Bar', 'foo', fallback='baz'), 'baz')
eq(cf.get('Foo Bar', 'no-such-foo', fallback='baz'), 'baz')
eq(cf.get('Spacey Bar', 'foo', fallback=None), 'bar2')
eq(cf.get('No Such Spacey Bar', 'foo', fallback=None), None)
eq(cf.getint('Types', 'int', fallback=18), 42)
eq(cf.getint('Types', 'no-such-int', fallback=18), 18)
eq(cf.getint('Types', 'no-such-int', fallback="18"), "18") # sic!
with self.assertRaises(configparser.NoOptionError):
cf.getint('Types', 'no-such-int')
self.assertAlmostEqual(cf.getfloat('Types', 'float', fallback=0.0), 0.44)
self.assertAlmostEqual(cf.getfloat('Types', 'no-such-float', fallback=0.0), 0.0)
eq(cf.getfloat('Types', 'no-such-float', fallback="0.0"), "0.0") # sic!
with self.assertRaises(configparser.NoOptionError):
cf.getfloat('Types', 'no-such-float')
eq(cf.getboolean('Types', 'boolean', fallback=True), False)
eq(cf.getboolean('Types', 'no-such-boolean', fallback="yes"), "yes") # sic!
eq(cf.getboolean('Types', 'no-such-boolean', fallback=True), True)
with self.assertRaises(configparser.NoOptionError):
cf.getboolean('Types', 'no-such-boolean')
eq(cf.getboolean('No Such Types', 'boolean', fallback=True), True)
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value', fallback=False), None)
eq(cf.get('NoValue', 'no-such-option-without-value', fallback=False), False)
# mapping access
eq(cf['Foo Bar']['foo'], 'bar1')
eq(cf['Spacey Bar']['foo'], 'bar2')
section = cf['Spacey Bar From The Beginning']
eq(section.name, 'Spacey Bar From The Beginning')
self.assertIs(section.parser, cf)
with self.assertRaises(AttributeError):
section.name = 'Name is read-only'
with self.assertRaises(AttributeError):
section.parser = 'Parser is read-only'
eq(section['foo'], 'bar3')
eq(section['baz'], 'qwe')
eq(cf['Commented Bar']['foo'], 'bar4')
eq(cf['Commented Bar']['baz'], 'qwe')
eq(cf['Spaces']['key with spaces'], 'value')
eq(cf['Spaces']['another with spaces'], 'splat!')
eq(
cf['Long Line']['foo'],
'this line is much, much longer than my editor\nlikes it.',
)
if self.allow_no_value:
eq(cf['NoValue']['option-without-value'], None)
# test vars= and fallback=
eq(cf['Foo Bar'].get('foo', 'baz'), 'bar1')
eq(cf['Foo Bar'].get('foo', fallback='baz'), 'bar1')
eq(cf['Foo Bar'].get('foo', vars={'foo': 'baz'}), 'baz')
with self.assertRaises(KeyError):
cf['No Such Foo Bar']['foo']
with self.assertRaises(KeyError):
cf['Foo Bar']['no-such-foo']
with self.assertRaises(KeyError):
cf['No Such Foo Bar'].get('foo', fallback='baz')
eq(cf['Foo Bar'].get('no-such-foo', 'baz'), 'baz')
eq(cf['Foo Bar'].get('no-such-foo', fallback='baz'), 'baz')
eq(cf['Foo Bar'].get('no-such-foo'), None)
eq(cf['Spacey Bar'].get('foo', None), 'bar2')
eq(cf['Spacey Bar'].get('foo', fallback=None), 'bar2')
with self.assertRaises(KeyError):
cf['No Such Spacey Bar'].get('foo', None)
eq(cf['Types'].getint('int', 18), 42)
eq(cf['Types'].getint('int', fallback=18), 42)
eq(cf['Types'].getint('no-such-int', 18), 18)
eq(cf['Types'].getint('no-such-int', fallback=18), 18)
eq(cf['Types'].getint('no-such-int', "18"), "18") # sic!
eq(cf['Types'].getint('no-such-int', fallback="18"), "18") # sic!
eq(cf['Types'].getint('no-such-int'), None)
self.assertAlmostEqual(cf['Types'].getfloat('float', 0.0), 0.44)
self.assertAlmostEqual(cf['Types'].getfloat('float', fallback=0.0), 0.44)
self.assertAlmostEqual(cf['Types'].getfloat('no-such-float', 0.0), 0.0)
self.assertAlmostEqual(cf['Types'].getfloat('no-such-float', fallback=0.0), 0.0)
eq(cf['Types'].getfloat('no-such-float', "0.0"), "0.0") # sic!
eq(cf['Types'].getfloat('no-such-float', fallback="0.0"), "0.0") # sic!
eq(cf['Types'].getfloat('no-such-float'), None)
eq(cf['Types'].getboolean('boolean', True), False)
eq(cf['Types'].getboolean('boolean', fallback=True), False)
eq(cf['Types'].getboolean('no-such-boolean', "yes"), "yes") # sic!
eq(cf['Types'].getboolean('no-such-boolean', fallback="yes"), "yes") # sic!
eq(cf['Types'].getboolean('no-such-boolean', True), True)
eq(cf['Types'].getboolean('no-such-boolean', fallback=True), True)
eq(cf['Types'].getboolean('no-such-boolean'), None)
if self.allow_no_value:
eq(cf['NoValue'].get('option-without-value', False), None)
eq(cf['NoValue'].get('option-without-value', fallback=False), None)
eq(cf['NoValue'].get('no-such-option-without-value', False), False)
eq(cf['NoValue'].get('no-such-option-without-value', fallback=False), False)
# Make sure the right things happen for remove_section() and
# remove_option(); added to include check for SourceForge bug #123324.
cf[self.default_section]['this_value'] = '1'
cf[self.default_section]['that_value'] = '2'
# API access
self.assertTrue(cf.remove_section('Spaces'))
self.assertFalse(cf.has_option('Spaces', 'key with spaces'))
self.assertFalse(cf.remove_section('Spaces'))
self.assertFalse(cf.remove_section(self.default_section))
self.assertTrue(
cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existence of option",
)
self.assertFalse(
cf.has_option('Foo Bar', 'foo'), "remove_option() failed to remove option"
)
self.assertFalse(
cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existence of option"
" that was removed",
)
self.assertTrue(cf.has_option('Foo Bar', 'this_value'))
self.assertFalse(cf.remove_option('Foo Bar', 'this_value'))
self.assertTrue(cf.remove_option(self.default_section, 'this_value'))
self.assertFalse(cf.has_option('Foo Bar', 'this_value'))
self.assertFalse(cf.remove_option(self.default_section, 'this_value'))
with self.assertRaises(configparser.NoSectionError) as cm:
cf.remove_option('No Such Section', 'foo')
self.assertEqual(cm.exception.args, ('No Such Section',))
eq(
cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.',
)
# mapping access
del cf['Types']
self.assertFalse('Types' in cf)
with self.assertRaises(KeyError):
del cf['Types']
with self.assertRaises(ValueError):
del cf[self.default_section]
del cf['Spacey Bar']['foo']
self.assertFalse('foo' in cf['Spacey Bar'])
with self.assertRaises(KeyError):
del cf['Spacey Bar']['foo']
self.assertTrue('that_value' in cf['Spacey Bar'])
with self.assertRaises(KeyError):
del cf['Spacey Bar']['that_value']
del cf[self.default_section]['that_value']
self.assertFalse('that_value' in cf['Spacey Bar'])
with self.assertRaises(KeyError):
del cf[self.default_section]['that_value']
with self.assertRaises(KeyError):
del cf['No Such Section']['foo']
# Don't add new asserts below in this method as most of the options
# and sections are now removed.
def test_basic(self):
config_string = """\
[Foo Bar]
foo{0[0]}bar1
[Spacey Bar]
foo {0[0]} bar2
[Spacey Bar From The Beginning]
foo {0[0]} bar3
baz {0[0]} qwe
[Commented Bar]
foo{0[1]} bar4 {1[1]} comment
baz{0[0]}qwe {1[0]}another one
[Long Line]
foo{0[1]} this line is much, much longer than my editor
likes it.
[Section\\with$weird%characters[\t]
[Internationalized Stuff]
foo[bg]{0[1]} Bulgarian
foo{0[0]}Default
foo[en]{0[0]}English
foo[de]{0[0]}Deutsch
[Spaces]
key with spaces {0[1]} value
another with spaces {0[0]} splat!
[Types]
int {0[1]} 42
float {0[0]} 0.44
boolean {0[0]} NO
123 {0[1]} strange but acceptable
[This One Has A ] In It]
forks {0[0]} spoons
""".format(
self.delimiters, self.comment_prefixes
)
if self.allow_no_value:
config_string += "[NoValue]\n" "option-without-value\n"
cf = self.fromstring(config_string)
self.basic_test(cf)
if self.strict:
with self.assertRaises(configparser.DuplicateOptionError):
cf.read_string(
textwrap.dedent(
"""\
[Duplicate Options Here]
option {0[0]} with a value
option {0[1]} with another value
""".format(
self.delimiters
)
)
)
with self.assertRaises(configparser.DuplicateSectionError):
cf.read_string(
textwrap.dedent(
"""\
[And Now For Something]
completely different {0[0]} True
[And Now For Something]
the larch {0[1]} 1
""".format(
self.delimiters
)
)
)
else:
cf.read_string(
textwrap.dedent(
"""\
[Duplicate Options Here]
option {0[0]} with a value
option {0[1]} with another value
""".format(
self.delimiters
)
)
)
cf.read_string(
textwrap.dedent(
"""\
[And Now For Something]
completely different {0[0]} True
[And Now For Something]
the larch {0[1]} 1
""".format(
self.delimiters
)
)
)
def test_basic_from_dict(self):
config = {
"Foo Bar": {"foo": "bar1"},
"Spacey Bar": {"foo": "bar2"},
"Spacey Bar From The Beginning": {"foo": "bar3", "baz": "qwe"},
"Commented Bar": {"foo": "bar4", "baz": "qwe"},
"Long Line": {
"foo": "this line is much, much longer than my editor\nlikes " "it."
},
"Section\\with$weird%characters[\t": {},
"Internationalized Stuff": {
"foo[bg]": "Bulgarian",
"foo": "Default",
"foo[en]": "English",
"foo[de]": "Deutsch",
},
"Spaces": {"key with spaces": "value", "another with spaces": "splat!"},
"Types": {
"int": 42,
"float": 0.44,
"boolean": False,
123: "strange but acceptable",
},
"This One Has A ] In It": {"forks": "spoons"},
}
if self.allow_no_value:
config.update({"NoValue": {"option-without-value": None}})
cf = self.newconfig()
cf.read_dict(config)
self.basic_test(cf)
if self.strict:
with self.assertRaises(configparser.DuplicateSectionError):
cf.read_dict({'1': {'key': 'value'}, 1: {'key2': 'value2'}})
with self.assertRaises(configparser.DuplicateOptionError):
cf.read_dict(
{
"Duplicate Options Here": {
'option': 'with a value',
'OPTION': 'with another value',
}
}
)
else:
cf.read_dict({'section': {'key': 'value'}, 'SECTION': {'key2': 'value2'}})
cf.read_dict(
{
"Duplicate Options Here": {
'option': 'with a value',
'OPTION': 'with another value',
}
}
)
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
cf.add_section("B")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "B", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(
cf.get("a", "b"),
"value",
"could not locate option, expecting case-insensitive option names",
)
with self.assertRaises(configparser.NoSectionError):
# section names are case-sensitive
cf.set("b", "A", "value")
self.assertTrue(cf.has_option("a", "b"))
self.assertFalse(cf.has_option("b", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
cf.has_option("A", opt),
"has_option() returned false for option which should exist",
)
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption{0} first line \n\tsecond line \n".format(
self.delimiters[0]
)
)
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring(
"[section]\n" "nekey{0}nevalue\n".format(self.delimiters[0]),
defaults={"key": "value"},
)
self.assertTrue(cf.has_option("section", "Key"))
def test_case_sensitivity_mapping_access(self):
cf = self.newconfig()
cf["A"] = {}
cf["a"] = {"B": "value"}
cf["B"] = {}
L = [section for section in cf]
L.sort()
eq = self.assertEqual
eq(L, sorted(["A", "B", self.default_section, "a"]))
eq(list(cf["a"].keys()), ["b"])
eq(
cf["a"]["b"],
"value",
"could not locate option, expecting case-insensitive option names",
)
with self.assertRaises(KeyError):
# section names are case-sensitive
cf["b"]["A"] = "value"
self.assertTrue("b" in cf["a"])
cf["A"]["A-B"] = "A-B value"
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
opt in cf["A"],
"has_option() returned false for option which should exist",
)
eq(list(cf["A"].keys()), ["a-b"])
eq(list(cf["a"].keys()), ["b"])
del cf["a"]["B"]
eq(len(cf["a"].keys()), 0)
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption{0} first line \n\tsecond line \n".format(
self.delimiters[0]
)
)
eq(list(cf["MySection"].keys()), ["option"])
eq(cf["MySection"]["Option"], "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring(
"[section]\n" "nekey{0}nevalue\n".format(self.delimiters[0]),
defaults={"key": "value"},
)
self.assertTrue("Key" in cf["section"])
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get(self.default_section, "Foo"),
"Bar",
"could not locate option, expecting case-insensitive option names",
)
cf = self.newconfig({"Foo": "Bar"})
self.assertEqual(
cf.get(self.default_section, "Foo"),
"Bar",
"could not locate option, expecting case-insensitive defaults",
)
def test_parse_errors(self):
cf = self.newconfig()
self.parse_error(
cf,
configparser.ParsingError,
"[Foo]\n" "{0}val-without-opt-name\n".format(self.delimiters[0]),
)
self.parse_error(
cf,
configparser.ParsingError,
"[Foo]\n" "{0}val-without-opt-name\n".format(self.delimiters[1]),
)
e = self.parse_error(
cf, configparser.MissingSectionHeaderError, "No Section!\n"
)
self.assertEqual(e.args, ('<???>', 1, "No Section!\n"))
if not self.allow_no_value:
e = self.parse_error(
cf, configparser.ParsingError, "[Foo]\n wrong-indent\n"
)
self.assertEqual(e.args, ('<???>',))
# read_file on a real file
tricky = support.findfile("cfgparser.3")
if self.delimiters[0] == '=':
error = configparser.ParsingError
expected = (tricky,)
else:
error = configparser.MissingSectionHeaderError
expected = (
tricky,
1,
' # INI with as many tricky parts as possible\n',
)
with open(tricky, encoding='utf-8') as f:
e = self.parse_error(cf, error, f)
self.assertEqual(e.args, expected)
def parse_error(self, cf, exc, src):
if hasattr(src, 'readline'):
sio = src
else:
sio = io.StringIO(src)
with self.assertRaises(exc) as cm:
cf.read_file(sio)
return cm.exception
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(
cf.sections(), [], "new ConfigParser should have no defined sections"
)
self.assertFalse(
cf.has_section("Foo"),
"new ConfigParser should have no acknowledged " "sections",
)
with self.assertRaises(configparser.NoSectionError):
cf.options("Foo")
with self.assertRaises(configparser.NoSectionError):
cf.set("foo", "bar", "value")
e = self.get_error(cf, configparser.NoSectionError, "foo", "bar")
self.assertEqual(e.args, ("foo",))
cf.add_section("foo")
e = self.get_error(cf, configparser.NoOptionError, "foo", "bar")
self.assertEqual(e.args, ("bar", "foo"))
def get_error(self, cf, exc, section, option):
try:
cf.get(section, option)
except exc as e:
return e
else:
exc_name = exc.__name__
if hasattr(exc, '__qualname__'):
exc_name = exc.__qualname__
self.fail("expected exception type %s.%s" % (exc.__module__, exc_name))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1{equals}1\n"
"T2{equals}TRUE\n"
"T3{equals}True\n"
"T4{equals}oN\n"
"T5{equals}yes\n"
"F1{equals}0\n"
"F2{equals}FALSE\n"
"F3{equals}False\n"
"F4{equals}oFF\n"
"F5{equals}nO\n"
"E1{equals}2\n"
"E2{equals}foo\n"
"E3{equals}-1\n"
"E4{equals}0.1\n"
"E5{equals}FALSE AND MORE".format(equals=self.delimiters[0])
)
for x in range(1, 5):
self.assertTrue(cf.getboolean('BOOLTEST', 't%d' % x))
self.assertFalse(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError, cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
with self.assertRaises(configparser.DuplicateSectionError) as cm:
cf.add_section("Foo")
e = cm.exception
self.assertEqual(str(e), "Section {0!r} already exists".format('Foo'))
self.assertEqual(e.args, ("Foo", None, None))
if self.strict:
with self.assertRaises(configparser.DuplicateSectionError) as cm:
cf.read_string(
textwrap.dedent(
"""\
[Foo]
will this be added{equals}True
[Bar]
what about this{equals}True
[Foo]
oops{equals}this won't
""".format(
equals=self.delimiters[0]
)
),
source='<foo-bar>',
)
e = cm.exception
self.assertEqual(
str(e),
"While reading from {0!r} [line 5]: "
"section {1!r} already exists".format('<foo-bar>', 'Foo'),
)
self.assertEqual(e.args, ("Foo", '<foo-bar>', 5))
with self.assertRaises(configparser.DuplicateOptionError) as cm:
cf.read_dict({'Bar': {'opt': 'val', 'OPT': 'is really `opt`'}})
e = cm.exception
self.assertEqual(
str(e),
"While reading from {0!r}: option {1!r} "
"in section {2!r} already exists".format('<dict>', 'opt', 'Bar'),
)
self.assertEqual(e.args, ("Bar", "opt", "<dict>", None))
def test_write(self):
config_string = (
"[Long Line]\n"
"foo{0[0]} this line is much, much longer than my editor\n"
" likes it.\n"
"[{default_section}]\n"
"foo{0[1]} another very\n"
" long line\n"
"[Long Line - With Comments!]\n"
"test {0[1]} we {comment} can\n"
" also {comment} place\n"
" comments {comment} in\n"
" multiline {comment} values"
"\n".format(
self.delimiters,
comment=self.comment_prefixes[0],
default_section=self.default_section,
)
)
if self.allow_no_value:
config_string += "[Valueless]\n" "option-without-value\n"
cf = self.fromstring(config_string)
for space_around_delimiters in (True, False):
output = io.StringIO()
cf.write(output, space_around_delimiters=space_around_delimiters)
delimiter = self.delimiters[0]
if space_around_delimiters:
delimiter = " {0} ".format(delimiter)
expect_string = (
"[{default_section}]\n"
"foo{equals}another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo{equals}this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
"[Long Line - With Comments!]\n"
"test{equals}we\n"
"\talso\n"
"\tcomments\n"
"\tmultiline\n"
"\n".format(equals=delimiter, default_section=self.default_section)
)
if self.allow_no_value:
expect_string += "[Valueless]\n" "option-without-value\n" "\n"
self.assertEqual(output.getvalue(), expect_string)
def test_set_string_types(self):
cf = self.fromstring(
"[sect]\n" "option1{eq}foo\n".format(eq=self.delimiters[0])
)
# Check that we don't get an exception when setting values in
# an existing section using strings:
class mystr(str):
pass
cf.set("sect", "option1", "splat")
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
cf.set("sect", "option1", "splat")
cf.set("sect", "option2", "splat")
def test_read_returns_file_list(self):
if self.delimiters[0] != '=':
self.skipTest('incompatible format')
file1 = support.findfile("cfgparser.1")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([file1, "nonexistent-file"], encoding="utf-8")
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a filename:
cf = self.newconfig()
parsed_files = cf.read(file1, encoding="utf-8")
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a Path object:
cf = self.newconfig()
parsed_files = cf.read(pathlib.Path(file1), encoding="utf-8")
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we passed both a filename and a Path object:
cf = self.newconfig()
parsed_files = cf.read([pathlib.Path(file1), file1], encoding="utf-8")
self.assertEqual(parsed_files, [file1, file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only missing files:
cf = self.newconfig()
parsed_files = cf.read(["nonexistent-file"], encoding="utf-8")
self.assertEqual(parsed_files, [])
# check when we pass no files:
cf = self.newconfig()
parsed_files = cf.read([], encoding="utf-8")
self.assertEqual(parsed_files, [])
def test_read_returns_file_list_with_bytestring_path(self):
if self.delimiters[0] != '=':
self.skipTest('incompatible format')
file1_bytestring = support.findfile("cfgparser.1").encode()
# check when passing an existing bytestring path
cf = self.newconfig()
parsed_files = cf.read(file1_bytestring, encoding="utf-8")
self.assertEqual(parsed_files, [file1_bytestring])
# check when passing an non-existing bytestring path
cf = self.newconfig()
parsed_files = cf.read(b'nonexistent-file', encoding="utf-8")
self.assertEqual(parsed_files, [])
# check when passing both an existing and non-existing bytestring path
cf = self.newconfig()
parsed_files = cf.read(
[file1_bytestring, b'nonexistent-file'], encoding="utf-8"
)
self.assertEqual(parsed_files, [file1_bytestring])
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar{equals}something %(with1)s interpolation (1 step)\n"
"bar9{equals}something %(with9)s lots of interpolation (9 steps)\n"
"bar10{equals}something %(with10)s lots of interpolation (10 steps)\n"
"bar11{equals}something %(with11)s lots of interpolation (11 steps)\n"
"with11{equals}%(with10)s\n"
"with10{equals}%(with9)s\n"
"with9{equals}%(with8)s\n"
"with8{equals}%(With7)s\n"
"with7{equals}%(WITH6)s\n"
"with6{equals}%(with5)s\n"
"With5{equals}%(with4)s\n"
"WITH4{equals}%(with3)s\n"
"with3{equals}%(with2)s\n"
"with2{equals}%(with1)s\n"
"with1{equals}with\n"
"\n"
"[Mutual Recursion]\n"
"foo{equals}%(bar)s\n"
"bar{equals}%(foo)s\n"
"\n"
"[Interpolation Error]\n"
# no definition for 'reference'
"name{equals}%(reference)s\n".format(equals=self.delimiters[0])
)
def check_items_config(self, expected):
cf = self.fromstring(
"""
[section]
name {0[0]} %(value)s
key{0[1]} |%(name)s|
getdefault{0[1]} |%(default)s|
""".format(
self.delimiters
),
defaults={"default": "<default>"},
)
L = list(cf.items("section", vars={'value': 'value'}))
L.sort()
self.assertEqual(L, expected)
with self.assertRaises(configparser.NoSectionError):
cf.items("no such section")
def test_popitem(self):
cf = self.fromstring(
"""
[section1]
name1 {0[0]} value1
[section2]
name2 {0[0]} value2
[section3]
name3 {0[0]} value3
""".format(
self.delimiters
),
defaults={"default": "<default>"},
)
self.assertEqual(cf.popitem()[0], 'section1')
self.assertEqual(cf.popitem()[0], 'section2')
self.assertEqual(cf.popitem()[0], 'section3')
with self.assertRaises(KeyError):
cf.popitem()
def test_clear(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get(self.default_section, "Foo"),
"Bar",
"could not locate option, expecting case-insensitive option names",
)
cf['zing'] = {'option1': 'value1', 'option2': 'value2'}
self.assertEqual(cf.sections(), ['zing'])
self.assertEqual(set(cf['zing'].keys()), set(['option1', 'option2', 'foo']))
cf.clear()
self.assertEqual(set(cf.sections()), set())
self.assertEqual(set(cf[self.default_section].keys()), set(['foo']))
def test_setitem(self):
cf = self.fromstring(
"""
[section1]
name1 {0[0]} value1
[section2]
name2 {0[0]} value2
[section3]
name3 {0[0]} value3
""".format(
self.delimiters
),
defaults={"nameD": "valueD"},
)
self.assertEqual(set(cf['section1'].keys()), set(['name1', 'named']))
self.assertEqual(set(cf['section2'].keys()), set(['name2', 'named']))
self.assertEqual(set(cf['section3'].keys()), set(['name3', 'named']))
self.assertEqual(cf['section1']['name1'], 'value1')
self.assertEqual(cf['section2']['name2'], 'value2')
self.assertEqual(cf['section3']['name3'], 'value3')
self.assertEqual(cf.sections(), ['section1', 'section2', 'section3'])
cf['section2'] = {'name22': 'value22'}
self.assertEqual(set(cf['section2'].keys()), set(['name22', 'named']))
self.assertEqual(cf['section2']['name22'], 'value22')
self.assertNotIn('name2', cf['section2'])
self.assertEqual(cf.sections(), ['section1', 'section2', 'section3'])
cf['section3'] = {}
self.assertEqual(set(cf['section3'].keys()), set(['named']))
self.assertNotIn('name3', cf['section3'])
self.assertEqual(cf.sections(), ['section1', 'section2', 'section3'])
# For bpo-32108, assigning default_section to itself.
cf[self.default_section] = cf[self.default_section]
self.assertNotEqual(set(cf[self.default_section].keys()), set())
cf[self.default_section] = {}
self.assertEqual(set(cf[self.default_section].keys()), set())
self.assertEqual(set(cf['section1'].keys()), set(['name1']))
self.assertEqual(set(cf['section2'].keys()), set(['name22']))
self.assertEqual(set(cf['section3'].keys()), set())
self.assertEqual(cf.sections(), ['section1', 'section2', 'section3'])
# For bpo-32108, assigning section to itself.
cf['section2'] = cf['section2']
self.assertEqual(set(cf['section2'].keys()), {'name22'})
def test_invalid_multiline_value(self):
if self.allow_no_value:
self.skipTest('if no_value is allowed, ParsingError is not raised')
invalid = textwrap.dedent(
"""\
[DEFAULT]
test {0} test
invalid""".format(
self.delimiters[0]
)
)
cf = self.newconfig()
with self.assertRaises(configparser.ParsingError):
cf.read_string(invalid)
self.assertEqual(cf.get('DEFAULT', 'test'), 'test')
self.assertEqual(cf['DEFAULT']['test'], 'test')
class StrictTestCase(BasicTestCase, unittest.TestCase):
config_class = configparser.RawConfigParser
strict = True
class ConfigParserTestCase(BasicTestCase, unittest.TestCase):
config_class = configparser.ConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"), "something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"), "something with lots of interpolation (10 steps)")
e = self.get_error(cf, configparser.InterpolationDepthError, "Foo", "bar11")
if self.interpolation == configparser._UNSET:
self.assertEqual(
e.args,
(
"bar11",
"Foo",
"something %(with11)s lots of interpolation (11 steps)",
),
)
elif isinstance(self.interpolation, configparser.LegacyInterpolation):
self.assertEqual(
e.args,
(
"bar11",
"Foo",
"something %(with11)s lots of interpolation (11 steps)",
),
)
def test_interpolation_missing_value(self):
cf = self.get_interpolation_config()
e = self.get_error(
cf,
configparser.InterpolationMissingOptionError,
"Interpolation Error",
"name",
)
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
if self.interpolation == configparser._UNSET:
self.assertEqual(
e.args, ('name', 'Interpolation Error', '%(reference)s', 'reference')
)
elif isinstance(self.interpolation, configparser.LegacyInterpolation):
self.assertEqual(
e.args, ('name', 'Interpolation Error', '%(reference)s', 'reference')
)
def test_items(self):
self.check_items_config(
[
('default', '<default>'),
('getdefault', '|<default>|'),
('key', '|value|'),
('name', 'value'),
]
)
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring(
"[section]\n"
"option1{eq}xxx\n"
"option2{eq}%(option1)s/xxx\n"
"ok{eq}%(option1)s/%%s\n"
"not_ok{eq}%(option2)s/%%s".format(eq=self.delimiters[0])
)
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
if self.interpolation == configparser._UNSET:
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
elif isinstance(self.interpolation, configparser.LegacyInterpolation):
with self.assertRaises(TypeError):
cf.get("section", "not_ok")
def test_set_malformatted_interpolation(self):
cf = self.fromstring(
"[sect]\n" "option1{eq}foo\n".format(eq=self.delimiters[0])
)
self.assertEqual(cf.get('sect', "option1"), "foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "%foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "foo%")
self.assertRaises(ValueError, cf.set, "sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "foo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%bar")
def test_set_nonstring_types(self):
cf = self.fromstring(
"[sect]\n" "option1{eq}foo\n".format(eq=self.delimiters[0])
)
# Check that we get a TypeError when setting non-string values
# in an existing section:
self.assertRaises(TypeError, cf.set, "sect", "option1", 1)
self.assertRaises(TypeError, cf.set, "sect", "option1", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option1", object())
self.assertRaises(TypeError, cf.set, "sect", "option2", 1)
self.assertRaises(TypeError, cf.set, "sect", "option2", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option2", object())
self.assertRaises(TypeError, cf.set, "sect", 123, "invalid opt name!")
self.assertRaises(TypeError, cf.add_section, 123)
def test_add_section_default(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, self.default_section)
def test_defaults_keyword(self):
"""bpo-23835 fix for ConfigParser"""
cf = self.newconfig(defaults={1: 2.4})
self.assertEqual(cf[self.default_section]['1'], '2.4')
self.assertAlmostEqual(cf[self.default_section].getfloat('1'), 2.4)
cf = self.newconfig(defaults={"A": 5.2})
self.assertEqual(cf[self.default_section]['a'], '5.2')
self.assertAlmostEqual(cf[self.default_section].getfloat('a'), 5.2)
class ConfigParserTestCaseNoInterpolation(BasicTestCase, unittest.TestCase):
config_class = configparser.ConfigParser
interpolation = None
ini = textwrap.dedent(
"""
[numbers]
one = 1
two = %(one)s * 2
three = ${common:one} * 3
[hexen]
sixteen = ${numbers:two} * 8
"""
).strip()
def assertMatchesIni(self, cf):
self.assertEqual(cf['numbers']['one'], '1')
self.assertEqual(cf['numbers']['two'], '%(one)s * 2')
self.assertEqual(cf['numbers']['three'], '${common:one} * 3')
self.assertEqual(cf['hexen']['sixteen'], '${numbers:two} * 8')
def test_no_interpolation(self):
cf = self.fromstring(self.ini)
self.assertMatchesIni(cf)
def test_empty_case(self):
cf = self.newconfig()
self.assertIsNone(cf.read_string(""))
def test_none_as_default_interpolation(self):
class CustomConfigParser(configparser.ConfigParser):
_DEFAULT_INTERPOLATION = None
cf = CustomConfigParser()
cf.read_string(self.ini)
self.assertMatchesIni(cf)
class ConfigParserTestCaseLegacyInterpolation(ConfigParserTestCase, unittest.TestCase):
config_class = configparser.ConfigParser
interpolation = configparser.LegacyInterpolation()
def test_set_malformatted_interpolation(self):
cf = self.fromstring(
"[sect]\n" "option1{eq}foo\n".format(eq=self.delimiters[0])
)
self.assertEqual(cf.get('sect', "option1"), "foo")
cf.set("sect", "option1", "%foo")
self.assertEqual(cf.get('sect', "option1"), "%foo")
cf.set("sect", "option1", "foo%")
self.assertEqual(cf.get('sect', "option1"), "foo%")
cf.set("sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "f%oo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%%bar")
class ConfigParserTestCaseNonStandardDelimiters(
ConfigParserTestCase, unittest.TestCase
):
delimiters = (':=', '$')
comment_prefixes = ('//', '"')
inline_comment_prefixes = ('//', '"')
class ConfigParserTestCaseNonStandardDefaultSection(
ConfigParserTestCase, unittest.TestCase
):
default_section = 'general'
class MultilineValuesTestCase(BasicTestCase, unittest.TestCase):
config_class = configparser.ConfigParser
wonderful_spam = (
"I'm having spam spam spam spam "
"spam spam spam beaked beans spam "
"spam spam and spam!"
).replace(' ', '\t\n')
def setUp(self):
cf = self.newconfig()
for i in range(100):
s = 'section{0}'.format(i)
cf.add_section(s)
for j in range(10):
cf.set(s, 'lovely_spam{}'.format(j), self.wonderful_spam)
with open(os_helper.TESTFN, 'w', encoding="utf-8") as f:
cf.write(f)
def tearDown(self):
os.unlink(os_helper.TESTFN)
def test_dominating_multiline_values(self):
# We're reading from file because this is where the code changed
# during performance updates in Python 3.2
cf_from_file = self.newconfig()
with open(os_helper.TESTFN, encoding="utf-8") as f:
cf_from_file.read_file(f)
self.assertEqual(
cf_from_file.get('section8', 'lovely_spam4'),
self.wonderful_spam.replace('\t\n', '\n'),
)
class RawConfigParserTestCase(BasicTestCase, unittest.TestCase):
config_class = configparser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "bar"), "something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"), "something %(with9)s lots of interpolation (9 steps)")
eq(
cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)",
)
eq(
cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)",
)
def test_items(self):
self.check_items_config(
[
('default', '<default>'),
('getdefault', '|%(default)s|'),
('key', '|%(name)s|'),
('name', '%(value)s'),
]
)
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13])
cf.set('non-string', 'dict', {'pi': 3.14159})
self.assertEqual(cf.get('non-string', 'int'), 1)
self.assertEqual(cf.get('non-string', 'list'), [0, 1, 1, 2, 3, 5, 8, 13])
self.assertEqual(cf.get('non-string', 'dict'), {'pi': 3.14159})
cf.add_section(123)
cf.set(123, 'this is sick', True)
self.assertEqual(cf.get(123, 'this is sick'), True)
if cf._dict is configparser._default_dict:
# would not work for SortedDict; only checking for the most common
# default dictionary (dict)
cf.optionxform = lambda x: x
cf.set('non-string', 1, 1)
self.assertEqual(cf.get('non-string', 1), 1)
def test_defaults_keyword(self):
"""bpo-23835 legacy behavior for RawConfigParser"""
with self.assertRaises(AttributeError) as ctx:
self.newconfig(defaults={1: 2.4})
err = ctx.exception
self.assertEqual(str(err), "'int' object has no attribute 'lower'")
cf = self.newconfig(defaults={"A": 5.2})
self.assertAlmostEqual(cf[self.default_section]['a'], 5.2)
class RawConfigParserTestCaseNonStandardDelimiters(RawConfigParserTestCase):
delimiters = (':=', '$')
comment_prefixes = ('//', '"')
inline_comment_prefixes = ('//', '"')
class RawConfigParserTestSambaConf(CfgParserTestCaseClass, unittest.TestCase):
config_class = configparser.RawConfigParser
comment_prefixes = ('#', ';', '----')
inline_comment_prefixes = ('//',)
empty_lines_in_values = False
def test_reading(self):
smbconf = support.findfile("cfgparser.2")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([smbconf, "nonexistent-file"], encoding='utf-8')
self.assertEqual(parsed_files, [smbconf])
sections = [
'global',
'homes',
'printers',
'print$',
'pdf-generator',
'tmp',
'Agustin',
]
self.assertEqual(cf.sections(), sections)
self.assertEqual(cf.get("global", "workgroup"), "MDKGROUP")
self.assertEqual(cf.getint("global", "max log size"), 50)
self.assertEqual(cf.get("global", "hosts allow"), "127.")
self.assertEqual(cf.get("tmp", "echo command"), "cat %s; rm %s")
class ConfigParserTestCaseExtendedInterpolation(BasicTestCase, unittest.TestCase):
config_class = configparser.ConfigParser
interpolation = configparser.ExtendedInterpolation()
default_section = 'common'
strict = True
def fromstring(self, string, defaults=None, optionxform=None):
cf = self.newconfig(defaults)
if optionxform:
cf.optionxform = optionxform
cf.read_string(string)
return cf
def test_extended_interpolation(self):
cf = self.fromstring(
textwrap.dedent(
"""
[common]
favourite Beatle = Paul
favourite color = green
[tom]
favourite band = ${favourite color} day
favourite pope = John ${favourite Beatle} II
sequel = ${favourite pope}I
[ambv]
favourite Beatle = George
son of Edward VII = ${favourite Beatle} V
son of George V = ${son of Edward VII}I
[stanley]
favourite Beatle = ${ambv:favourite Beatle}
favourite pope = ${tom:favourite pope}
favourite color = black
favourite state of mind = paranoid
favourite movie = soylent ${common:favourite color}
favourite song = ${favourite color} sabbath - ${favourite state of mind}
"""
).strip()
)
eq = self.assertEqual
eq(cf['common']['favourite Beatle'], 'Paul')
eq(cf['common']['favourite color'], 'green')
eq(cf['tom']['favourite Beatle'], 'Paul')
eq(cf['tom']['favourite color'], 'green')
eq(cf['tom']['favourite band'], 'green day')
eq(cf['tom']['favourite pope'], 'John Paul II')
eq(cf['tom']['sequel'], 'John Paul III')
eq(cf['ambv']['favourite Beatle'], 'George')
eq(cf['ambv']['favourite color'], 'green')
eq(cf['ambv']['son of Edward VII'], 'George V')
eq(cf['ambv']['son of George V'], 'George VI')
eq(cf['stanley']['favourite Beatle'], 'George')
eq(cf['stanley']['favourite color'], 'black')
eq(cf['stanley']['favourite state of mind'], 'paranoid')
eq(cf['stanley']['favourite movie'], 'soylent green')
eq(cf['stanley']['favourite pope'], 'John Paul II')
eq(cf['stanley']['favourite song'], 'black sabbath - paranoid')
def test_endless_loop(self):
cf = self.fromstring(
textwrap.dedent(
"""
[one for you]
ping = ${one for me:pong}
[one for me]
pong = ${one for you:ping}
[selfish]
me = ${me}
"""
).strip()
)
with self.assertRaises(configparser.InterpolationDepthError):
cf['one for you']['ping']
with self.assertRaises(configparser.InterpolationDepthError):
cf['selfish']['me']
def test_strange_options(self):
cf = self.fromstring(
"""
[dollars]
$var = $$value
$var2 = ${$var}
${sick} = cannot interpolate me
[interpolated]
$other = ${dollars:$var}
$trying = ${dollars:${sick}}
"""
)
self.assertEqual(cf['dollars']['$var'], '$value')
self.assertEqual(cf['interpolated']['$other'], '$value')
self.assertEqual(cf['dollars']['${sick}'], 'cannot interpolate me')
exception_class = configparser.InterpolationMissingOptionError
with self.assertRaises(exception_class) as cm:
cf['interpolated']['$trying']
self.assertEqual(cm.exception.reference, 'dollars:${sick')
self.assertEqual(cm.exception.args[2], '${dollars:${sick}}') # rawval
def test_case_sensitivity_basic(self):
ini = textwrap.dedent(
"""
[common]
optionlower = value
OptionUpper = Value
[Common]
optionlower = a better ${common:optionlower}
OptionUpper = A Better ${common:OptionUpper}
[random]
foolower = ${common:optionlower} redefined
FooUpper = ${Common:OptionUpper} Redefined
"""
).strip()
cf = self.fromstring(ini)
eq = self.assertEqual
eq(cf['common']['optionlower'], 'value')
eq(cf['common']['OptionUpper'], 'Value')
eq(cf['Common']['optionlower'], 'a better value')
eq(cf['Common']['OptionUpper'], 'A Better Value')
eq(cf['random']['foolower'], 'value redefined')
eq(cf['random']['FooUpper'], 'A Better Value Redefined')
def test_case_sensitivity_conflicts(self):
ini = textwrap.dedent(
"""
[common]
option = value
Option = Value
[Common]
option = a better ${common:option}
Option = A Better ${common:Option}
[random]
foo = ${common:option} redefined
Foo = ${Common:Option} Redefined
"""
).strip()
with self.assertRaises(configparser.DuplicateOptionError):
cf = self.fromstring(ini)
# raw options
cf = self.fromstring(ini, optionxform=lambda opt: opt)
eq = self.assertEqual
eq(cf['common']['option'], 'value')
eq(cf['common']['Option'], 'Value')
eq(cf['Common']['option'], 'a better value')
eq(cf['Common']['Option'], 'A Better Value')
eq(cf['random']['foo'], 'value redefined')
eq(cf['random']['Foo'], 'A Better Value Redefined')
def test_other_errors(self):
cf = self.fromstring(
"""
[interpolation fail]
case1 = ${where's the brace
case2 = ${does_not_exist}
case3 = ${wrong_section:wrong_value}
case4 = ${i:like:colon:characters}
case5 = $100 for Fail No 5!
"""
)
with self.assertRaises(configparser.InterpolationSyntaxError):
cf['interpolation fail']['case1']
with self.assertRaises(configparser.InterpolationMissingOptionError):
cf['interpolation fail']['case2']
with self.assertRaises(configparser.InterpolationMissingOptionError):
cf['interpolation fail']['case3']
with self.assertRaises(configparser.InterpolationSyntaxError):
cf['interpolation fail']['case4']
with self.assertRaises(configparser.InterpolationSyntaxError):
cf['interpolation fail']['case5']
with self.assertRaises(ValueError):
cf['interpolation fail']['case6'] = "BLACK $ABBATH"
class ConfigParserTestCaseNoValue(ConfigParserTestCase):
allow_no_value = True
class ConfigParserTestCaseTrickyFile(CfgParserTestCaseClass, unittest.TestCase):
config_class = configparser.ConfigParser
delimiters = ['=']
comment_prefixes = ['#']
allow_no_value = True
def test_cfgparser_dot_3(self):
tricky = support.findfile("cfgparser.3")
cf = self.newconfig()
self.assertEqual(len(cf.read(tricky, encoding='utf-8')), 1)
self.assertEqual(
cf.sections(),
[
'strange',
'corruption',
'yeah, sections can be ' 'indented as well',
'another one!',
'no values here',
'tricky interpolation',
'more interpolation',
],
)
self.assertEqual(
cf.getint(self.default_section, 'go', vars={'interpolate': '-1'}), -1
)
with self.assertRaises(ValueError):
# no interpolation will happen
cf.getint(self.default_section, 'go', raw=True, vars={'interpolate': '-1'})
self.assertEqual(len(cf.get('strange', 'other').split('\n')), 4)
self.assertEqual(len(cf.get('corruption', 'value').split('\n')), 10)
longname = 'yeah, sections can be indented as well'
self.assertFalse(cf.getboolean(longname, 'are they subsections'))
self.assertEqual(cf.get(longname, 'lets use some Unicode'), '็ไปฎๅ')
self.assertEqual(len(cf.items('another one!')), 5) # 4 in section and
# `go` from DEFAULT
with self.assertRaises(configparser.InterpolationMissingOptionError):
cf.items('no values here')
self.assertEqual(cf.get('tricky interpolation', 'lets'), 'do this')
self.assertEqual(
cf.get('tricky interpolation', 'lets'), cf.get('tricky interpolation', 'go')
)
self.assertEqual(cf.get('more interpolation', 'lets'), 'go shopping')
def test_unicode_failure(self):
tricky = support.findfile("cfgparser.3")
cf = self.newconfig()
with self.assertRaises(UnicodeDecodeError):
cf.read(tricky, encoding='ascii')
class Issue7005TestCase(unittest.TestCase):
"""Test output when None is set() as a value and allow_no_value == False.
http://bugs.python.org/issue7005
"""
expected_output = "[section]\noption = None\n\n"
def prepare(self, config_class):
# This is the default, but that's the point.
cp = config_class(allow_no_value=False)
cp.add_section("section")
cp.set("section", "option", None)
sio = io.StringIO()
cp.write(sio)
return sio.getvalue()
def test_none_as_value_stringified(self):
cp = configparser.ConfigParser(allow_no_value=False)
cp.add_section("section")
with self.assertRaises(TypeError):
cp.set("section", "option", None)
def test_none_as_value_stringified_raw(self):
output = self.prepare(configparser.RawConfigParser)
self.assertEqual(output, self.expected_output)
class SortedTestCase(RawConfigParserTestCase):
dict_type = SortedDict
def test_sorted(self):
cf = self.fromstring(
"[b]\n" "o4=1\n" "o3=2\n" "o2=3\n" "o1=4\n" "[a]\n" "k=v\n"
)
output = io.StringIO()
cf.write(output)
self.assertEqual(
output.getvalue(),
"[a]\n" "k = v\n\n" "[b]\n" "o1 = 4\n" "o2 = 3\n" "o3 = 2\n" "o4 = 1\n\n",
)
class CompatibleTestCase(CfgParserTestCaseClass, unittest.TestCase):
config_class = configparser.RawConfigParser
comment_prefixes = '#;'
inline_comment_prefixes = ';'
def test_comment_handling(self):
config_string = textwrap.dedent(
"""\
[Commented Bar]
baz=qwe ; a comment
foo: bar # not a comment!
# but this is a comment
; another comment
quirk: this;is not a comment
; a space must precede an inline comment
"""
)
cf = self.fromstring(config_string)
self.assertEqual(cf.get('Commented Bar', 'foo'), 'bar # not a comment!')
self.assertEqual(cf.get('Commented Bar', 'baz'), 'qwe')
self.assertEqual(cf.get('Commented Bar', 'quirk'), 'this;is not a comment')
class CopyTestCase(BasicTestCase, unittest.TestCase):
config_class = configparser.ConfigParser
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
cf.read_string(string)
cf_copy = self.newconfig()
cf_copy.read_dict(cf)
# we have to clean up option duplicates that appeared because of
# the magic DEFAULTSECT behaviour.
for section in cf_copy.values():
if section.name == self.default_section:
continue
for default, value in cf[self.default_section].items():
if section[default] == value:
del section[default]
return cf_copy
class FakeFile(object):
def __init__(self):
file_path = support.findfile("cfgparser.1")
with open(file_path, encoding="utf-8") as f:
self.lines = f.readlines()
self.lines.reverse()
def readline(self):
if len(self.lines):
return self.lines.pop()
return ''
def readline_generator(f):
"""As advised in Doc/library/configparser.rst."""
line = f.readline()
while line:
yield line
line = f.readline()
class ReadFileTestCase(unittest.TestCase):
def test_file(self):
file_paths = [support.findfile("cfgparser.1")]
try:
file_paths.append(file_paths[0].encode('utf8'))
except UnicodeEncodeError:
pass # unfortunately we can't test bytes on this path
for file_path in file_paths:
parser = configparser.ConfigParser()
with open(file_path, encoding="utf-8") as f:
parser.read_file(f)
self.assertIn("Foo Bar", parser)
self.assertIn("foo", parser["Foo Bar"])
self.assertEqual(parser["Foo Bar"]["foo"], "newbar")
def test_iterable(self):
lines = (
textwrap.dedent(
"""
[Foo Bar]
foo=newbar"""
)
.strip()
.split('\n')
)
parser = configparser.ConfigParser()
parser.read_file(lines)
self.assertIn("Foo Bar", parser)
self.assertIn("foo", parser["Foo Bar"])
self.assertEqual(parser["Foo Bar"]["foo"], "newbar")
def test_readline_generator(self):
"""Issue #11670."""
parser = configparser.ConfigParser()
with self.assertRaises(TypeError):
parser.read_file(FakeFile())
parser.read_file(readline_generator(FakeFile()))
self.assertIn("Foo Bar", parser)
self.assertIn("foo", parser["Foo Bar"])
self.assertEqual(parser["Foo Bar"]["foo"], "newbar")
def test_source_as_bytes(self):
"""Issue #18260."""
lines = (
textwrap.dedent(
"""
[badbad]
[badbad]"""
)
.strip()
.split('\n')
)
parser = configparser.ConfigParser()
with self.assertRaises(configparser.DuplicateSectionError) as dse:
parser.read_file(lines, source=b"badbad")
self.assertEqual(
nice_literals(str(dse.exception)),
"While reading from 'badbad' [line 2]: section 'badbad' " "already exists",
)
lines = (
textwrap.dedent(
"""
[badbad]
bad = bad
bad = bad"""
)
.strip()
.split('\n')
)
parser = configparser.ConfigParser()
with self.assertRaises(configparser.DuplicateOptionError) as dse:
parser.read_file(lines, source=b"badbad")
self.assertEqual(
nice_literals(str(dse.exception)),
"While reading from 'badbad' [line 3]: option 'bad' in section "
"'badbad' already exists",
) or self.assertEqual(
nice_literals(str(dse.exception)),
"While reading from 'badbad' [line 3]: option 'bad' in section "
"'badbad' already exists",
)
lines = (
textwrap.dedent(
"""
[badbad]
= bad"""
)
.strip()
.split('\n')
)
parser = configparser.ConfigParser()
with self.assertRaises(configparser.ParsingError) as dse:
parser.read_file(lines, source=b"badbad")
self.assertEqual(
nice_literals(str(dse.exception)),
"Source contains parsing errors: 'badbad'\n\t[line 2]: '= bad'",
)
lines = (
textwrap.dedent(
"""
[badbad
bad = bad"""
)
.strip()
.split('\n')
)
parser = configparser.ConfigParser()
with self.assertRaises(configparser.MissingSectionHeaderError) as dse:
parser.read_file(lines, source=b"badbad")
self.assertEqual(
nice_literals(str(dse.exception)),
"File contains no section headers.\nfile: 'badbad', line: 1\n" "'[badbad'",
)
class CoverageOneHundredTestCase(unittest.TestCase):
"""Covers edge cases in the codebase."""
def test_duplicate_option_error(self):
error = configparser.DuplicateOptionError('section', 'option')
self.assertEqual(error.section, 'section')
self.assertEqual(error.option, 'option')
self.assertEqual(error.source, None)
self.assertEqual(error.lineno, None)
self.assertEqual(error.args, ('section', 'option', None, None))
self.assertEqual(
str(error),
"Option {0!r} in section {1!r} already "
"exists".format('option', 'section'),
)
def test_interpolation_depth_error(self):
error = configparser.InterpolationDepthError('option', 'section', 'rawval')
self.assertEqual(error.args, ('option', 'section', 'rawval'))
self.assertEqual(error.option, 'option')
self.assertEqual(error.section, 'section')
def test_parsing_error(self):
with self.assertRaises(ValueError) as cm:
configparser.ParsingError()
self.assertEqual(str(cm.exception), "Required argument `source' not " "given.")
with self.assertRaises(ValueError) as cm:
configparser.ParsingError(source='source', filename='filename')
self.assertEqual(
str(cm.exception),
"Cannot specify both `filename' " "and `source'. Use `source'.",
)
error = configparser.ParsingError(filename='source')
self.assertEqual(error.source, 'source')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
self.assertEqual(error.filename, 'source')
error.filename = 'filename'
self.assertEqual(error.source, 'filename')
for warning in w:
self.assertTrue(warning.category is DeprecationWarning)
def test_interpolation_validation(self):
parser = configparser.ConfigParser()
parser.read_string(
"""
[section]
invalid_percent = %
invalid_reference = %(()
invalid_variable = %(does_not_exist)s
"""
)
with self.assertRaises(configparser.InterpolationSyntaxError) as cm:
parser['section']['invalid_percent']
self.assertEqual(
str(cm.exception),
"'%' must be followed by '%' or " "'(', found: {0!r}".format('%'),
)
with self.assertRaises(configparser.InterpolationSyntaxError) as cm:
parser['section']['invalid_reference']
self.assertEqual(
str(cm.exception),
"bad interpolation variable " "reference {0!r}".format('%(()'),
)
def test_readfp_deprecation(self):
sio = io.StringIO(
"""
[section]
option = value
"""
)
parser = configparser.ConfigParser()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
parser.readfp(sio, filename='StringIO')
for warning in w:
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(len(parser), 2)
self.assertEqual(parser['section']['option'], 'value')
def test_safeconfigparser_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
configparser.SafeConfigParser()
for warning in w:
self.assertTrue(warning.category is DeprecationWarning)
def test_sectionproxy_repr(self):
parser = configparser.ConfigParser()
parser.read_string(
"""
[section]
key = value
"""
)
self.assertEqual(repr(parser['section']), '<Section: section>')
def test_inconsistent_converters_state(self):
parser = configparser.ConfigParser()
import decimal
parser.converters['decimal'] = decimal.Decimal
parser.read_string(
"""
[s1]
one = 1
[s2]
two = 2
"""
)
self.assertIn('decimal', parser.converters)
self.assertEqual(parser.getdecimal('s1', 'one'), 1)
self.assertEqual(parser.getdecimal('s2', 'two'), 2)
self.assertEqual(parser['s1'].getdecimal('one'), 1)
self.assertEqual(parser['s2'].getdecimal('two'), 2)
del parser.getdecimal
with self.assertRaises(AttributeError):
parser.getdecimal('s1', 'one')
self.assertIn('decimal', parser.converters)
del parser.converters['decimal']
self.assertNotIn('decimal', parser.converters)
with self.assertRaises(AttributeError):
parser.getdecimal('s1', 'one')
with self.assertRaises(AttributeError):
parser['s1'].getdecimal('one')
with self.assertRaises(AttributeError):
parser['s2'].getdecimal('two')
class ExceptionPicklingTestCase(unittest.TestCase):
"""Tests for issue #13760: ConfigParser exceptions are not picklable."""
def test_error(self):
import pickle
e1 = configparser.Error('value')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(repr(e1), repr(e2))
def test_nosectionerror(self):
import pickle
e1 = configparser.NoSectionError('section')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_nooptionerror(self):
import pickle
e1 = configparser.NoOptionError('option', 'section')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_duplicatesectionerror(self):
import pickle
e1 = configparser.DuplicateSectionError('section', 'source', 123)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.source, e2.source)
self.assertEqual(e1.lineno, e2.lineno)
self.assertEqual(repr(e1), repr(e2))
def test_duplicateoptionerror(self):
import pickle
e1 = configparser.DuplicateOptionError('section', 'option', 'source', 123)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(e1.source, e2.source)
self.assertEqual(e1.lineno, e2.lineno)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationerror(self):
import pickle
e1 = configparser.InterpolationError('option', 'section', 'msg')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationmissingoptionerror(self):
import pickle
e1 = configparser.InterpolationMissingOptionError(
'option', 'section', 'rawval', 'reference'
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(e1.reference, e2.reference)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationsyntaxerror(self):
import pickle
e1 = configparser.InterpolationSyntaxError('option', 'section', 'msg')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationdeptherror(self):
import pickle
e1 = configparser.InterpolationDepthError('option', 'section', 'rawval')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_parsingerror(self):
import pickle
e1 = configparser.ParsingError('source')
e1.append(1, 'line1')
e1.append(2, 'line2')
e1.append(3, 'line3')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.source, e2.source)
self.assertEqual(e1.errors, e2.errors)
self.assertEqual(repr(e1), repr(e2))
e1 = configparser.ParsingError(filename='filename')
e1.append(1, 'line1')
e1.append(2, 'line2')
e1.append(3, 'line3')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.source, e2.source)
self.assertEqual(e1.errors, e2.errors)
self.assertEqual(repr(e1), repr(e2))
def test_missingsectionheadererror(self):
import pickle
e1 = configparser.MissingSectionHeaderError('filename', 123, 'line')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(e1, proto)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.line, e2.line)
self.assertEqual(e1.source, e2.source)
self.assertEqual(e1.lineno, e2.lineno)
self.assertEqual(repr(e1), repr(e2))
class InlineCommentStrippingTestCase(unittest.TestCase):
"""Tests for issue #14590: ConfigParser doesn't strip inline comment when
delimiter occurs earlier without preceding space.."""
def test_stripping(self):
cfg = configparser.ConfigParser(inline_comment_prefixes=(';', '#', '//'))
cfg.read_string(
"""
[section]
k1 = v1;still v1
k2 = v2 ;a comment
k3 = v3 ; also a comment
k4 = v4;still v4 ;a comment
k5 = v5;still v5 ; also a comment
k6 = v6;still v6; and still v6 ;a comment
k7 = v7;still v7; and still v7 ; also a comment
[multiprefix]
k1 = v1;still v1 #a comment ; yeah, pretty much
k2 = v2 // this already is a comment ; continued
k3 = v3;#//still v3# and still v3 ; a comment
"""
)
s = cfg['section']
self.assertEqual(s['k1'], 'v1;still v1')
self.assertEqual(s['k2'], 'v2')
self.assertEqual(s['k3'], 'v3')
self.assertEqual(s['k4'], 'v4;still v4')
self.assertEqual(s['k5'], 'v5;still v5')
self.assertEqual(s['k6'], 'v6;still v6; and still v6')
self.assertEqual(s['k7'], 'v7;still v7; and still v7')
s = cfg['multiprefix']
self.assertEqual(s['k1'], 'v1;still v1')
self.assertEqual(s['k2'], 'v2')
self.assertEqual(s['k3'], 'v3;#//still v3# and still v3')
class ExceptionContextTestCase(unittest.TestCase):
"""Test that implementation details doesn't leak
through raising exceptions."""
def test_get_basic_interpolation(self):
parser = configparser.ConfigParser()
parser.read_string(
"""
[Paths]
home_dir: /Users
my_dir: %(home_dir1)s/lumberjack
my_pictures: %(my_dir)s/Pictures
"""
)
cm = self.assertRaises(configparser.InterpolationMissingOptionError)
with cm:
parser.get('Paths', 'my_dir')
self.assertIs(cm.exception.__suppress_context__, True)
def test_get_extended_interpolation(self):
parser = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation()
)
parser.read_string(
"""
[Paths]
home_dir: /Users
my_dir: ${home_dir1}/lumberjack
my_pictures: ${my_dir}/Pictures
"""
)
cm = self.assertRaises(configparser.InterpolationMissingOptionError)
with cm:
parser.get('Paths', 'my_dir')
self.assertIs(cm.exception.__suppress_context__, True)
def test_missing_options(self):
parser = configparser.ConfigParser()
parser.read_string(
"""
[Paths]
home_dir: /Users
"""
)
with self.assertRaises(configparser.NoSectionError) as cm:
parser.options('test')
self.assertIs(cm.exception.__suppress_context__, True)
def test_missing_section(self):
config = configparser.ConfigParser()
with self.assertRaises(configparser.NoSectionError) as cm:
config.set('Section1', 'an_int', '15')
self.assertIs(cm.exception.__suppress_context__, True)
def test_remove_option(self):
config = configparser.ConfigParser()
with self.assertRaises(configparser.NoSectionError) as cm:
config.remove_option('Section1', 'an_int')
self.assertIs(cm.exception.__suppress_context__, True)
class ConvertersTestCase(BasicTestCase, unittest.TestCase):
"""Introduced in 3.5, issue #18159."""
config_class = configparser.ConfigParser
def newconfig(self, defaults=None):
instance = super(ConvertersTestCase, self).newconfig(defaults=defaults)
instance.converters['list'] = lambda v: [
e.strip() for e in v.split() if e.strip()
]
return instance
def test_converters(self):
cfg = self.newconfig()
self.assertIn('boolean', cfg.converters)
self.assertIn('list', cfg.converters)
self.assertIsNone(cfg.converters['int'])
self.assertIsNone(cfg.converters['float'])
self.assertIsNone(cfg.converters['boolean'])
self.assertIsNotNone(cfg.converters['list'])
self.assertEqual(len(cfg.converters), 4)
with self.assertRaises(ValueError):
cfg.converters[''] = lambda v: v
with self.assertRaises(ValueError):
cfg.converters[None] = lambda v: v
cfg.read_string(
"""
[s]
str = string
int = 1
float = 0.5
list = a b c d e f g
bool = yes
"""
)
s = cfg['s']
self.assertEqual(s['str'], 'string')
self.assertEqual(s['int'], '1')
self.assertEqual(s['float'], '0.5')
self.assertEqual(s['list'], 'a b c d e f g')
self.assertEqual(s['bool'], 'yes')
self.assertEqual(cfg.get('s', 'str'), 'string')
self.assertEqual(cfg.get('s', 'int'), '1')
self.assertEqual(cfg.get('s', 'float'), '0.5')
self.assertEqual(cfg.get('s', 'list'), 'a b c d e f g')
self.assertEqual(cfg.get('s', 'bool'), 'yes')
self.assertEqual(cfg.get('s', 'str'), 'string')
self.assertEqual(cfg.getint('s', 'int'), 1)
self.assertEqual(cfg.getfloat('s', 'float'), 0.5)
self.assertEqual(cfg.getlist('s', 'list'), ['a', 'b', 'c', 'd', 'e', 'f', 'g'])
self.assertEqual(cfg.getboolean('s', 'bool'), True)
self.assertEqual(s.get('str'), 'string')
self.assertEqual(s.getint('int'), 1)
self.assertEqual(s.getfloat('float'), 0.5)
self.assertEqual(s.getlist('list'), ['a', 'b', 'c', 'd', 'e', 'f', 'g'])
self.assertEqual(s.getboolean('bool'), True)
with self.assertRaises(AttributeError):
cfg.getdecimal('s', 'float')
with self.assertRaises(AttributeError):
s.getdecimal('float')
import decimal
cfg.converters['decimal'] = decimal.Decimal
self.assertIn('decimal', cfg.converters)
self.assertIsNotNone(cfg.converters['decimal'])
self.assertEqual(len(cfg.converters), 5)
dec0_5 = decimal.Decimal('0.5')
self.assertEqual(cfg.getdecimal('s', 'float'), dec0_5)
self.assertEqual(s.getdecimal('float'), dec0_5)
del cfg.converters['decimal']
self.assertNotIn('decimal', cfg.converters)
self.assertEqual(len(cfg.converters), 4)
with self.assertRaises(AttributeError):
cfg.getdecimal('s', 'float')
with self.assertRaises(AttributeError):
s.getdecimal('float')
with self.assertRaises(KeyError):
del cfg.converters['decimal']
with self.assertRaises(KeyError):
del cfg.converters['']
with self.assertRaises(KeyError):
del cfg.converters[None]
class BlatantOverrideConvertersTestCase(unittest.TestCase):
"""What if somebody overrode a getboolean()? We want to make sure that in
this case the automatic converters do not kick in."""
config = """
[one]
one = false
two = false
three = long story short
[two]
one = false
two = false
three = four
"""
def test_converters_at_init(self):
cfg = configparser.ConfigParser(converters={'len': len})
cfg.read_string(self.config)
self._test_len(cfg)
self.assertIsNotNone(cfg.converters['len'])
def test_inheritance(self):
class StrangeConfigParser(configparser.ConfigParser):
gettysburg = 'a historic borough in south central Pennsylvania'
def getboolean(
self,
section,
option,
raw=False,
vars=None,
fallback=configparser._UNSET,
):
if section == option:
return True
return super(StrangeConfigParser, self).getboolean(
section, option, raw=raw, vars=vars, fallback=fallback
)
def getlen(
self,
section,
option,
raw=False,
vars=None,
fallback=configparser._UNSET,
):
return self._get_conv(
section, option, len, raw=raw, vars=vars, fallback=fallback
)
cfg = StrangeConfigParser()
cfg.read_string(self.config)
self._test_len(cfg)
self.assertIsNone(cfg.converters['len'])
self.assertTrue(cfg.getboolean('one', 'one'))
self.assertTrue(cfg.getboolean('two', 'two'))
self.assertFalse(cfg.getboolean('one', 'two'))
self.assertFalse(cfg.getboolean('two', 'one'))
cfg.converters['boolean'] = cfg._convert_to_boolean
self.assertFalse(cfg.getboolean('one', 'one'))
self.assertFalse(cfg.getboolean('two', 'two'))
self.assertFalse(cfg.getboolean('one', 'two'))
self.assertFalse(cfg.getboolean('two', 'one'))
def _test_len(self, cfg):
self.assertEqual(len(cfg.converters), 4)
self.assertIn('boolean', cfg.converters)
self.assertIn('len', cfg.converters)
self.assertNotIn('tysburg', cfg.converters)
self.assertIsNone(cfg.converters['int'])
self.assertIsNone(cfg.converters['float'])
self.assertIsNone(cfg.converters['boolean'])
self.assertEqual(cfg.getlen('one', 'one'), 5)
self.assertEqual(cfg.getlen('one', 'two'), 5)
self.assertEqual(cfg.getlen('one', 'three'), 16)
self.assertEqual(cfg.getlen('two', 'one'), 5)
self.assertEqual(cfg.getlen('two', 'two'), 5)
self.assertEqual(cfg.getlen('two', 'three'), 4)
self.assertEqual(cfg.getlen('two', 'four', fallback=0), 0)
with self.assertRaises(configparser.NoOptionError):
cfg.getlen('two', 'four')
self.assertEqual(cfg['one'].getlen('one'), 5)
self.assertEqual(cfg['one'].getlen('two'), 5)
self.assertEqual(cfg['one'].getlen('three'), 16)
self.assertEqual(cfg['two'].getlen('one'), 5)
self.assertEqual(cfg['two'].getlen('two'), 5)
self.assertEqual(cfg['two'].getlen('three'), 4)
self.assertEqual(cfg['two'].getlen('four', 0), 0)
self.assertEqual(cfg['two'].getlen('four'), None)
def test_instance_assignment(self):
cfg = configparser.ConfigParser()
cfg.getboolean = lambda section, option: True
cfg.getlen = lambda section, option: len(cfg[section][option])
cfg.read_string(self.config)
self.assertEqual(len(cfg.converters), 3)
self.assertIn('boolean', cfg.converters)
self.assertNotIn('len', cfg.converters)
self.assertIsNone(cfg.converters['int'])
self.assertIsNone(cfg.converters['float'])
self.assertIsNone(cfg.converters['boolean'])
self.assertTrue(cfg.getboolean('one', 'one'))
self.assertTrue(cfg.getboolean('two', 'two'))
self.assertTrue(cfg.getboolean('one', 'two'))
self.assertTrue(cfg.getboolean('two', 'one'))
cfg.converters['boolean'] = cfg._convert_to_boolean
self.assertFalse(cfg.getboolean('one', 'one'))
self.assertFalse(cfg.getboolean('two', 'two'))
self.assertFalse(cfg.getboolean('one', 'two'))
self.assertFalse(cfg.getboolean('two', 'one'))
self.assertEqual(cfg.getlen('one', 'one'), 5)
self.assertEqual(cfg.getlen('one', 'two'), 5)
self.assertEqual(cfg.getlen('one', 'three'), 16)
self.assertEqual(cfg.getlen('two', 'one'), 5)
self.assertEqual(cfg.getlen('two', 'two'), 5)
self.assertEqual(cfg.getlen('two', 'three'), 4)
# If a getter impl is assigned straight to the instance, it won't
# be available on the section proxies.
with self.assertRaises(AttributeError):
self.assertEqual(cfg['one'].getlen('one'), 5)
with self.assertRaises(AttributeError):
self.assertEqual(cfg['two'].getlen('one'), 5)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, configparser, not_exported={"Error"})
if __name__ == '__main__':
unittest.main()
|
the-stack_106_29074 | """
Classes for reading FASTA and FASTQ files
"""
__all__ = ['FastaReader', 'FastqReader']
import io
from xopen import xopen
from ._core import fastq_iter as _fastq_iter, Sequence
from ._util import shorten as _shorten
from .exceptions import FastaFormatError
class BinaryFileReader:
"""
A mixin for readers that ensures that a file or a path can be passed in to the constructor.
"""
_close_on_exit = False
paired = False
mode = 'rb'
def __init__(self, file, opener=xopen, _close_file=None):
"""
The file is a path or a file-like object. In both cases, the file may
be compressed (.gz, .bz2, .xz).
"""
if isinstance(file, str):
file = opener(file, self.mode)
self._close_on_exit = True
elif _close_file:
self._close_on_exit = True
self._file = file
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, getattr(self._file, "name", self._file))
def close(self):
if self._close_on_exit and self._file is not None:
self._file.close()
self._file = None
def __enter__(self):
if self._file is None:
raise ValueError("I/O operation on closed BinaryFileReader")
return self
def __exit__(self, *args):
self.close()
class FastaReader(BinaryFileReader):
"""
Reader for FASTA files.
"""
def __init__(self, file, keep_linebreaks=False, sequence_class=Sequence, opener=xopen, _close_file=None):
"""
file is a path or a file-like object. In both cases, the file may
be compressed (.gz, .bz2, .xz).
keep_linebreaks -- whether to keep newline characters in the sequence
"""
super().__init__(file, opener=opener, _close_file=_close_file)
self.sequence_class = sequence_class
self.delivers_qualities = False
self._delimiter = '\n' if keep_linebreaks else ''
def __iter__(self):
"""
Read next entry from the file (single entry at a time).
"""
name = None
seq = []
f = io.TextIOWrapper(self._file)
for i, line in enumerate(f):
# strip() also removes DOS line breaks
line = line.strip()
if not line:
continue
if line and line[0] == '>':
if name is not None:
yield self.sequence_class(name, self._delimiter.join(seq), None)
name = line[1:]
seq = []
elif line and line[0] == '#':
continue
elif name is not None:
seq.append(line)
else:
raise FastaFormatError(
"Expected '>' at beginning of record, but got {!r}."
.format(_shorten(line)), line=i)
if name is not None:
yield self.sequence_class(name, self._delimiter.join(seq), None)
# Prevent TextIOWrapper from closing the underlying file
f.detach()
class FastqReader(BinaryFileReader):
"""
Reader for FASTQ files. Does not support multi-line FASTQ files.
"""
def __init__(self, file, sequence_class=Sequence, buffer_size=1048576, opener=xopen, _close_file=None):
"""
file is a filename or a file-like object.
If file is a filename, then .gz files are supported.
"""
super().__init__(file, opener=opener, _close_file=_close_file)
self.sequence_class = sequence_class
self.delivers_qualities = True
self.buffer_size = buffer_size
# The first value yielded by _fastq_iter indicates
# whether the file has repeated headers
self._iter = _fastq_iter(self._file, self.sequence_class, self.buffer_size)
try:
self.two_headers = next(self._iter)
assert self.two_headers in (True, False)
except StopIteration:
# Empty file
self.two_headers = False
self._iter = iter(())
except Exception:
self.close()
raise
def __iter__(self):
return self._iter
|
the-stack_106_29075 | import logging
# import sentry_sdk
#
# from sentry_sdk.integrations.django import DjangoIntegration
# from sentry_sdk.integrations.logging import LoggingIntegration
# from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["yooky.com"])
print(ALLOWED_HOSTS)
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = env("DJANGO_EMAIL_HOST",default='')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = env("DJANGO_EMAIL_PORT")
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD")
EMAIL_USE_TLS = env("DJANGO_EMAIL_USE_TLS")
DEFAULT_FROM_EMAIL = env("DJANGO_EMAIL_FROM")
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
# Collectfast
# ------------------------------------------------------------------------------
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# # Sentry
# # ------------------------------------------------------------------------------
# SENTRY_DSN = env("SENTRY_DSN")
# SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
#
# sentry_logging = LoggingIntegration(
# level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
# event_level=logging.ERROR, # Send errors as events
# )
# sentry_sdk.init(
# dsn=SENTRY_DSN,
# integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
# )
#
# # Your stuff...
# # ------------------------------------------------------------------------------
|
the-stack_106_29080 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pylab as plt
from gradient_2d import numerical_gradient
def gradient_descent(f, init_x, lr = 0.01, step_num = 100):
x = init_x
x_history = []
for i in range(step_num):
x_history.append( x.copy() )
grad = numerical_gradient(f, x)
x -= lr * grad
return x, np.array(x_history)
def function_2(x):
return x[0] ** 2 + x[1] ** 2
init_x = np.array([-3.0, 4.0])
lr = 0.1
step_num = 20
x, x_history = gradient_descent(function_2, init_x, lr = lr, step_num = step_num)
plt.plot([-5, 5], [0, 0], '--b')
plt.plot([0, 0], [-5, 5], '--b')
plt.plot(x_history[:, 0], x_history[:, 1], 'o')
plt.xlim(-3.5, 3.5)
plt.ylim(-4.5, 4.5)
plt.xlabel("X0")
plt.ylabel("X1")
plt.show() |
the-stack_106_29081 | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="icicle.marker.colorbar.tickfont",
**kwargs,
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
the-stack_106_29085 | # model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
hm_center_ratio=0.27,
center_ratio=0.01,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttf53_beta001_1lr_log_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_106_29086 | import os
import config_module
import sqlite3 #for creating databases for new galleries
import requests
import string
import random
import hashlib
def create_new_gallery(gallery_id):
if(str(gallery_id) not in os.listdir(config_module.library_path)):
gallery_path = os.path.join(config_module.library_path, str(gallery_id))
os.mkdir(gallery_path)
db_conn = sqlite3.connect(os.path.join(gallery_path, "gallery.db"))
cur = db_conn.cursor()
cur.execute('CREATE TABLE "photos" ("id" INTEGER UNIQUE, "date" INTEGER, "checksum" TEXT, "size" INTEGER, PRIMARY KEY("id" AUTOINCREMENT));')
db_conn.commit()
db_conn.close()
return 0
else:
print("Error creating directory for new gallery (id {}). Path already exists.".format(gallery_id))
return 1
def check_photo_ext(filename):
if(os.path.splitext(filename)[1].lower() in [".jpg", ".jpeg", ".png", ".heic"]): return True
else: return False
def _generate_id(size=32, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def download(url, local_name): #DEPRICATED because of local server
print("Downloading:", url)
os.makedirs(config_module.downloads_path, exist_ok=True)
r = requests.get(url, allow_redirects=True)
local_filepath = os.path.join(config_module.downloads_path, local_name+os.path.splitext(url)[1])
with open(local_filepath, 'wb') as file:
file.write(r.content)
return local_filepath
def copy_to_tmp(origin_path, local_name):
print("Copying:", origin_path)
os.makedirs(config_module.downloads_path, exist_ok=True)
local_filepath = os.path.join(config_module.downloads_path, local_name+os.path.splitext(origin_path)[1])
local_filepath = os.path.splitext(local_filepath)[0]+os.path.splitext(local_filepath)[1].lower() #using splitext to convert .JPG -> .jpg, etc.
os.rename(origin_path, local_filepath)
return local_filepath
def md5_of_file(path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest() |
the-stack_106_29088 | import lightbulb
from . import music_plugin
from nrk.utils.Equalizers import Equalizers
@music_plugin.command
@lightbulb.add_checks(lightbulb.guild_only)
@lightbulb.option('filter_name', 'Select EQ filter name', choices=("flat", "boost", "metal", "piano"))
@lightbulb.command(
'eq', "Add a filter to the current song"
)
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
async def eq(ctx: lightbulb.Context) -> None:
node = await ctx.bot.lavalink.get_guild_node(ctx.guild_id)
filter_name = ctx.options.filter_name
if not node or not node.queue:
await ctx.respond("No song playing right now")
return
if filter_name == "flat":
await ctx.bot.lavalink.equalize_all(ctx.guild_id, Equalizers().flat())
await ctx.respond(f"Applied the filter `{filter_name}` successfully")
elif filter_name == "boost":
await ctx.bot.lavalink.equalize_all(ctx.guild_id, Equalizers().boost())
await ctx.respond(f"Applied the filter `{filter_name}` successfully")
elif filter_name == "metal":
await ctx.bot.lavalink.equalize_all(ctx.guild_id, Equalizers().metal())
await ctx.respond(f"Applied the filter `{filter_name}` successfully")
elif filter_name == "piano":
await ctx.bot.lavalink.equalize_all(ctx.guild_id, Equalizers().piano())
await ctx.respond(f"Applied the filter `{filter_name}` successfully")
else:
pass
|
the-stack_106_29093 | import tkinter as tk
from tkinter_gui_builder.widgets.widget_utils.widget_events import WidgetEvents
class Spinbox(tk.Spinbox, WidgetEvents):
def __init__(self, master=None, cnf={}, **kw):
super(tk.Spinbox, self).__init__(master, 'spinbox', cnf, kw)
super(WidgetEvents, self).__init__()
def set_text(self, text):
# handle case if the widget is disabled
entry_state = self['state']
if entry_state == 'disabled':
self.config(state='normal')
self.delete(0, tk.END)
self.insert(0, text)
if entry_state == 'disabled':
self.config(state='disabled')
|
the-stack_106_29094 | import datetime
import os
from ...utils import paths
try:
from google.cloud import storage # type:ignore
except ImportError:
pass
from typing import Optional
def blob_writer(
source_file_name: str,
target_path: str,
date: Optional[datetime.date] = None,
add_extention: str = '',
**kwargs):
# default the date to today
if date is None:
date = datetime.datetime.today()
# get the project name
project = kwargs.get('project')
if project is None:
raise Exception('blob_writer must have project defined')
# factorize the path
bucket, gcs_path, filename, extention = paths.get_parts(target_path)
# get a reference to the gcs bucket
client = storage.Client(project=project)
gcs_bucket = client.get_bucket(bucket)
# avoid collisions
collision_tests = 0
maybe_colliding_filename = paths.date_format(f"{gcs_path}{filename}-{collision_tests:04d}{extention}{add_extention}", date)
blob = gcs_bucket.blob(maybe_colliding_filename)
while blob.exists():
collision_tests += 1
maybe_colliding_filename = paths.date_format(f"{gcs_path}{filename}-{collision_tests:04d}{extention}{add_extention}", date)
blob = gcs_bucket.blob(maybe_colliding_filename)
# save the blob
blob.upload_from_filename(source_file_name)
return maybe_colliding_filename
|
the-stack_106_29095 | # Copyright 2018 Samuel Payne [email protected]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from cptac.dataset import Dataset
from cptac.dataframe_tools import *
from cptac.exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError
class PdcCcrcc(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0"]
data_files = {
"1.0": [
"clinical.tsv.gz",
"phosphoproteome.tsv.gz",
"proteome.tsv.gz",
]
}
# Call the parent class __init__ function
super().__init__(cancer_type="pdcccrcc", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet, attempt_update_index=False)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
if file_name == "clinical.tsv.gz":
df = pd.read_csv(file_path, sep="\t", index_col=0)
self._data["clinical"] = df
if file_name == "phosphoproteome.tsv.gz":
df = pd.read_csv(file_path, sep="\t")
df = df.set_index(["case_submitter_id", "aliquot_submitter_id"])
self._data["phosphoproteomics"] = df
if file_name == "proteome.tsv.gz":
df = pd.read_csv(file_path, sep="\t")
df = df.set_index(["case_submitter_id", "aliquot_submitter_id"])
self._data["proteomics"] = df
print(' ' * len(loading_msg), end='\r') # Erase the loading message
formatting_msg = "Formatting dataframes..."
print(formatting_msg, end='\r')
# NOTE: The code below will not work properly until you have all the
# dataframes formatted properly and loaded into the self._data
# dictionary. That's why they're commented out for now. Go ahead and
# uncomment them when all the data tables are ready. Note that some of
# the lines are marked as just examples, though, and you'll still need
# to adapt them to your specific situation.
# ALSO: This section makes use of several useful functions from the
# dataframe_tools.py file, such as "unionize_indices",
# "generate_sample_status_col", and so on. If you want more information
# about these functions, open that file and look at the docstring at
# the beginning of each functions, which is a triple-quoted string that
# gives an overview of what the function does, a description of what
# each parameter should be, and a description of the returned value. If
# you're using a function in a Jupyter Notebook or Python interpreter,
# you can also get the docstring using the Python "help" function, which
# just checks if the function has a docstring and then prints it if it
# does. An example usage would be "help(reformat_normal_patient_ids)".
# You can use the help function for any function from any library, not
# just cptac; docstrings are a common standard.
# Get a union of all dataframes' indices, with duplicates removed
###FILL: If there are any tables whose index values you don't want
### included in the master index, pass them to the optional 'exclude'
### parameter of the unionize_indices function. This was useful, for
### example, when some datasets' followup data files included samples
### from cohorts that weren't in any data tables besides the followup
### table, so we excluded the followup table from the master index since
### there wasn't any point in creating empty representative rows for
### those samples just because they existed in the followup table.
# master_index = unionize_indices(self._data)
# Use the master index to reindex the clinical dataframe, so the clinical dataframe has a record of every sample in the dataset. Rows that didn't exist before (such as the rows for normal samples) are filled with NaN.
# new_clinical = self._data["clinical"]
# new_clinical = new_clinical.reindex(master_index)
# Add a column called Sample_Tumor_Normal to the clinical dataframe indicating whether each sample was a tumor or normal sample. Use a function from dataframe_tools to generate it.
###FILL: Your dataset should have some way that it marks the Patient IDs
### of normal samples. The example code below is for a dataset that
### marks them by putting an 'N' at the beginning of each one. You will
### need to write a lambda function that takes a given Patient_ID string
### and returns a bool indicating whether it corresponds to a normal
### sample. Pass that lambda function to the 'normal_test' parameter of
### the generate_sample_status_col function when you call it. See
### cptac/dataframe_tools.py for further function documentation.
###START EXAMPLE CODE###################################################
# sample_status_col = generate_sample_status_col(new_clinical, normal_test=lambda sample: sample[0] == 'N')
###END EXAMPLE CODE#####################################################
# new_clinical.insert(0, "Sample_Tumor_Normal", sample_status_col)
# Replace the clinical dataframe in the data dictionary with our new and improved version!
# self._data['clinical'] = new_clinical
# Edit the format of the Patient_IDs to have normal samples marked the same way as in other datasets.
###FILL: You may need to use the code below to reformat the patient IDs
### in your dataset. This applies if all of the normal samples are
### already marked in the original data files in some way, but just not
### in the way we want (e.g. they have an "N" at the beginning of the
### sample ID, instead of a ".N" at the end). Be aware that the case
### with some datasets such as PDAC is different; instead of the normal
### samples already being marked, just not in the way we want, they're
### actually contained in a separate table, with no special marking on
### the sample ids. In those cases you wouldn't use the
### reformat_normal_patient_ids function, and would instead just mark
### the samples in the normal tables with the ".N" before appending them
### to the tumor tables.
### If you do use this function: the standard normal ID format is to
### have the string '.N' appended to the end of the normal patient IDs,
### e.g. the normal patient ID corresponding to C3L-00378 would be
### C3L-00378.N (this way we can easily match two samples from the same
### patient). The example code below is for a dataset where all the
### normal samples have an "N" prepended to the patient IDs. The
### reformat_normal_patient_ids function erases that and puts a ".N" at
### the end. See cptac/dataframe_tools.py for further function
### documentation.
###START EXAMPLE CODE###################################################
# self._data = reformat_normal_patient_ids(self._data, existing_identifier="N", existing_identifier_location="start")
###END EXAMPLE CODE#####################################################
# Call function from dataframe_tools.py to sort all tables first by sample status, and then by the index
# self._data = sort_all_rows(self._data)
# Call function from dataframe_tools.py to standardize the names of the index and column axes
# self._data = standardize_axes_names(self._data)
print(" " * len(formatting_msg), end='\r') # Erase the formatting message
|
the-stack_106_29104 | import graphene
import pytest
from saleor.discount import DiscountValueType, VoucherType
from saleor.graphql.discount.enums import (
DiscountValueTypeEnum, VoucherTypeEnum)
from tests.api.utils import get_graphql_content
def test_voucher_query(staff_api_client, voucher, permission_manage_discounts):
query = """
query vouchers {
vouchers(first: 1) {
edges {
node {
type
name
code
usageLimit
used
startDate
discountValueType
discountValue
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['vouchers']['edges'][0]['node']
assert data['type'] == voucher.type.upper()
assert data['name'] == voucher.name
assert data['code'] == voucher.code
assert data['usageLimit'] == voucher.usage_limit
assert data['used'] == voucher.used
assert data['startDate'] == voucher.start_date.isoformat()
assert data['discountValueType'] == voucher.discount_value_type.upper()
assert data['discountValue'] == voucher.discount_value
def test_sale_query(staff_api_client, sale, permission_manage_discounts):
query = """
query sales {
sales(first: 1) {
edges {
node {
type
name
value
startDate
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['sales']['edges'][0]['node']
assert data['type'] == sale.type.upper()
assert data['name'] == sale.name
assert data['value'] == sale.value
assert data['startDate'] == sale.start_date.isoformat()
def test_create_voucher(staff_api_client, permission_manage_discounts):
query = """
mutation voucherCreate(
$type: VoucherTypeEnum, $name: String, $code: String,
$discountValueType: DiscountValueTypeEnum,
$discountValue: Decimal, $minAmountSpent: Decimal) {
voucherCreate(input: {
name: $name, type: $type, code: $code,
discountValueType: $discountValueType, discountValue: $discountValue,
minAmountSpent: $minAmountSpent}) {
errors {
field
message
}
voucher {
type
minAmountSpent {
amount
}
name
code
discountValueType
}
}
}
"""
variables = {
'name': 'test voucher',
'type': VoucherTypeEnum.VALUE.name,
'code': 'testcode123',
'discountValueType': DiscountValueTypeEnum.FIXED.name,
'discountValue': 10.12,
'minAmountSpent': 1.12}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['voucherCreate']['voucher']
assert data['type'] == VoucherType.VALUE.upper()
assert data['minAmountSpent']['amount'] == 1.12
assert data['name'] == 'test voucher'
assert data['code'] == 'testcode123'
assert data['discountValueType'] == DiscountValueType.FIXED.upper()
def test_update_voucher(
staff_api_client, voucher, permission_manage_discounts):
query = """
mutation voucherUpdate($code: String,
$discountValueType: DiscountValueTypeEnum, $id: ID!) {
voucherUpdate(id: $id, input: {
code: $code, discountValueType: $discountValueType}) {
errors {
field
message
}
voucher {
code
discountValueType
}
}
}
"""
# Set discount value type to 'fixed' and change it in mutation
voucher.discount_value_type = DiscountValueType.FIXED
voucher.save()
assert voucher.code != 'testcode123'
variables = {
'id': graphene.Node.to_global_id('Voucher', voucher.id),
'code': 'testcode123',
'discountValueType': DiscountValueTypeEnum.PERCENTAGE.name}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['voucherUpdate']['voucher']
assert data['code'] == 'testcode123'
assert data['discountValueType'] == DiscountValueType.PERCENTAGE.upper()
def test_voucher_delete_mutation(
staff_api_client, voucher, permission_manage_discounts):
query = """
mutation DeleteVoucher($id: ID!) {
voucherDelete(id: $id) {
voucher {
name
id
}
errors {
field
message
}
}
}
"""
variables = {'id': graphene.Node.to_global_id('Voucher', voucher.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['voucherDelete']
assert data['voucher']['name'] == voucher.name
with pytest.raises(voucher._meta.model.DoesNotExist):
voucher.refresh_from_db()
def test_create_sale(staff_api_client, permission_manage_discounts):
query = """
mutation saleCreate(
$type: DiscountValueTypeEnum, $name: String, $value: Decimal) {
saleCreate(input: {name: $name, type: $type, value: $value}) {
errors {
field
message
}
sale {
type
name
value
}
}
}
"""
variables = {
'name': 'test sale',
'type': DiscountValueTypeEnum.FIXED.name,
'value': '10.12'}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['saleCreate']['sale']
assert data['type'] == DiscountValueType.FIXED.upper()
assert data['name'] == 'test sale'
assert data['value'] == 10.12
def test_update_sale(staff_api_client, sale, permission_manage_discounts):
query = """
mutation saleUpdate($type: DiscountValueTypeEnum, $id: ID!) {
saleUpdate(id: $id, input: {type: $type}) {
errors {
field
message
}
sale {
type
}
}
}
"""
# Set discount value type to 'fixed' and change it in mutation
sale.type = DiscountValueType.FIXED
sale.save()
variables = {
'id': graphene.Node.to_global_id('Sale', sale.id),
'type': DiscountValueTypeEnum.PERCENTAGE.name}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['saleUpdate']['sale']
assert data['type'] == DiscountValueType.PERCENTAGE.upper()
def test_sale_delete_mutation(
staff_api_client, sale, permission_manage_discounts):
query = """
mutation DeleteSale($id: ID!) {
saleDelete(id: $id) {
sale {
name
id
}
errors {
field
message
}
}
}
"""
variables = {'id': graphene.Node.to_global_id('Sale', sale.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts])
content = get_graphql_content(response)
data = content['data']['saleDelete']
assert data['sale']['name'] == sale.name
with pytest.raises(sale._meta.model.DoesNotExist):
sale.refresh_from_db()
def test_validate_voucher(
voucher, staff_api_client, permission_manage_discounts):
query = """
mutation voucherUpdate(
$id: ID!, $type: VoucherTypeEnum) {
voucherUpdate(
id: $id, input: {type: $type}) {
errors {
field
message
}
}
}
"""
# apparently can't do so via pytest parametrize
# as it parses VoucherTypeEnum into str format
fields = (
(VoucherTypeEnum.CATEGORY, 'categories'),
(VoucherTypeEnum.PRODUCT, 'products'),
(VoucherTypeEnum.COLLECTION, 'collections'))
staff_api_client.user.user_permissions.add(permission_manage_discounts)
for voucher_type, field_name in fields:
variables = {
'type': voucher_type.name,
'id': graphene.Node.to_global_id('Voucher', voucher.id)}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content['data']['voucherUpdate']['errors'][0]
assert data['field'] == field_name
assert data['message'] == 'This field is required.'
|
the-stack_106_29105 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import six
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners as owners_db
import owners_client
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout.decode('utf-8', 'ignore');
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
sys.stdout.flush()
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(str(self._long_text))
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, url=None, project=None, branch=None):
self.host = urlparse.urlparse(url).netloc if url else None
self.project = project
self.branch = branch
self.cache = {}
self.code_owners_enabled = None
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
def IsCodeOwnersEnabledOnRepo(self):
if self.code_owners_enabled is None:
self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
self.host, self.project)
return self.code_owners_enabled
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_WHITE_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_WHITE_LIST.setter
def DEFAULT_WHITE_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_ALLOW_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_ALLOW_LIST.setter
def DEFAULT_ALLOW_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLACK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLACK_LIST.setter
def DEFAULT_BLACK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLOCK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLOCK_LIST.setter
def DEFAULT_BLOCK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
# Offer a python 3 executable for use during the migration off of python 2.
self.python3_executable = 'vpython3'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
self.owners_client = None
if self.gerrit:
self.owners_client = owners_client.GetCodeOwnersClient(
root=change.RepositoryRoot(),
upstream=change.UpstreamBranch(),
host=self.gerrit.host,
project=self.gerrit.project,
branch=self.gerrit.branch)
self.owners_db = owners_db.Database(
change.RepositoryRoot(), fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self,
affected_file,
files_to_check=None,
files_to_skip=None,
allow_list=None,
block_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
except UnicodeDecodeError as e:
# log the filename since we're probably trying to read a binary
# file, and shouldn't be.
print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
raise
return self._cached_new_contents[:]
def ChangedContents(self, keeplinebreaks=False):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
# Don't return cached results when line breaks are requested.
if not keeplinebreaks and self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
result = []
line_num = 0
# The keeplinebreaks parameter to splitlines must be True or else the
# CheckForWindowsLineEndings presubmit will be a NOP.
for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
result.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
# Don't cache results with line breaks.
if keeplinebreaks:
return result;
self._cached_changed_contents = result
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def UpstreamBranch(self):
"""Returns the upstream branch for the change."""
return self._upstream
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).decode('utf-8', 'ignore').splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, gerrit_obj, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write('Warning, no PRESUBMIT.py found.\n')
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write('Running default presubmit script.\n')
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
gerrit_obj,
verbose):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False, use_python3=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
use_python3: if true, will use python3 instead of python2 by default
if USE_PYTHON3 is not specified.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
self.use_python3 = use_python3
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
# Try to figure out whether these presubmit checks should be run under
# python2 or python3. We need to do this without actually trying to
# compile the text, since the text might compile in one but not the
# other.
m = re.search('^USE_PYTHON3 = (True|False)$', script_text,
flags=re.MULTILINE)
if m:
use_python3 = m.group(1) == 'True'
else:
use_python3 = self.use_python3
if (((sys.version_info.major == 2) and use_python3) or
((sys.version_info.major == 3) and not use_python3)):
return []
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = project = ''
if self.gerrit:
host = self.gerrit.host or ''
project = self.gerrit.project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
version = [
int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
]
with rdb_wrapper.client(prefix) as sink:
if version >= [2, 0, 0]:
for function_name in context:
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, sink=None):
"""Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
Args:
function_name: the name of the presubmit function to evaluate
context: a context dictionary in which the function will be evaluated
sink: an instance of ResultSink. None, by default.
Returns:
the result of the presubmit function call.
"""
start_time = time_time()
try:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
except Exception:
if sink:
elapsed_time = time_time() - start_time
sink.report(function_name, rdb_wrapper.STATUS_FAIL, elapsed_time)
# TODO(crbug.com/953884): replace reraise with native py3:
# raise .. from e
e_type, e_value, e_tb = sys.exc_info()
print('Evaluation of %s failed: %s' % (function_name, e_value))
six.reraise(e_type, e_value, e_tb)
elapsed_time = time_time() - start_time
if elapsed_time > 10.0:
sys.stdout.write(
'%s took %.1fs to run.\n' % (function_name, elapsed_time))
if sink:
status = rdb_wrapper.STATUS_PASS
if any(r.fatal for r in result):
status = rdb_wrapper.STATUS_FAIL
sink.report(function_name, status, elapsed_time)
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None,
use_python3=False):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
use_python3: if true, default to using Python3 for presubmit checks
rather than Python2.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
python_version = 'Python %s' % sys.version_info.major
if committing:
sys.stdout.write('Running %s presubmit commit checks ...\n' %
python_version)
else:
sys.stdout.write('Running %s presubmit upload checks ...\n' %
python_version)
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel, use_python3)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
sys.stdout.write('\n')
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('%s presubmit checks passed.\n' % python_version)
elif should_prompt:
sys.stdout.write('There were %s presubmit warnings. ' % python_version)
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(
url=options.gerrit_url,
project=options.gerrit_project,
branch=options.gerrit_branch)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warning('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
parser.add_argument('--use-python3', action='store_true',
help='Use python3 for presubmit checks by default')
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(
change,
gerrit_obj,
options.verbose)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output,
options.use_python3)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
the-stack_106_29108 | import random
f = open("data/cli/clinical_train.txt","r")
lines = f.readlines()
tab = [lines[0]]
lines = lines[1:]
random.shuffle(lines)
test = tab + lines[:160]
train = tab + lines[160:]
f_train = open("data/cli/train.txt","w")
f_test = open("data/cli/test.txt","w")
f_train.writelines(train)
f_test.writelines(test)
print(test) |
the-stack_106_29113 | import altair as alt
import pandas as pd
class AltAirLogPlot(object):
df = None
DTCSH = 70
DTCMA = 55.8
DTCW = 200
#PHIS = (DTC - DTCMA) / (DTCW - DTCMA) / KCP
def __init__(self, df):
self.df = df
@classmethod
def plot_log_df(cls, df):
chart = alt.Chart(df).mark_line(interpolate='basis', orient='vertical').encode(
x=alt.X('GR:Q', scale=alt.Scale(
domain=(0, 100),
clamp=True
)),
y='DEPT:Q',
order='DEPT',
fill='GR',
tooltip=['DEPT', 'GR'],
).interactive(bind_x=False)
return chart
@classmethod
def plot_multi_logs(cls, df, log_names=None):
class_instance = cls(df)
df = class_instance._handle_log_names(df, log_names)
df.sort_values(by=['DEPT'], inplace=True, ascending=True)
df_unscaled = df.copy()
df = class_instance._scale_data_for_plotting(df)
df = class_instance._melt_df(df)
chart = alt.Chart(df).mark_line().encode(
x=alt.X('value'),
y=alt.Y('DEPT', sort='descending'),
tooltip=['DEPT', 'value'],
order='DEPT',
column='variable',
color='variable'
).properties(
width=50,
height=600
).interactive(bind_x=False)
return chart
@classmethod
def plot_quad_combo_tracks(cls, df):
class_instance = cls(df)
brush = alt.selection(type='interval', encodings=['y'])
selector_track = class_instance.plot_GR_SP_selection_chart(brush)
GR_SP_track = class_instance.plot_GR_SP(brush)
porosity_track = class_instance.plot_porosity(brush)
resistivity_track = class_instance.plot_resistivity_track(brush)
return selector_track | GR_SP_track | resistivity_track | porosity_track
def plot_GR_SP_selection_chart(self, brush, GR_str='GR', SP_str='SP') -> alt.Chart:
df = self._handle_log_names(self.df, log_names=[GR_str, SP_str])
df = self._melt_df(df)
chart = alt.Chart(df).mark_line().encode(
x=alt.X('value', axis=alt.Axis(title='Selector')),
y=alt.Y('DEPT', sort='descending'),
# x=alt.X('value:Q', axis=alt.Axis(title='Selector')),
# y=alt.Y('DEPT:O', sort='ascending'),
tooltip=['DEPT', 'value'],
order='DEPT',
color='variable'
).properties(
width=100,
height=600
).add_selection(brush)
return chart
def plot_GR_SP(self, brush, GR_str='GR', SP_str='SP')->alt.Chart:
df = self._handle_log_names(self.df, log_names=[GR_str, SP_str])
df = self._melt_df(df)
color_scale = alt.Scale(
domain=['SP', 'GR', 'RDEP', 'RMED', 'RSHA', 'RHOB', 'NPHI', 'DT', 'DTC'],
range=['#B71C1C',
'#4A148C',
'#1A237E',
'#01579B',
'#004D40',
'#33691E',
'#F57F17',
'#E65100',
'#3E2723']
)
chart = alt.Chart(df).mark_line().encode(
x=alt.X('value', axis=alt.Axis(title='SP GR')),
y=alt.Y('DEPT', sort='descending', scale={'domain': brush.ref(), 'zero': True}),
# x=alt.X('value:Q', axis=alt.Axis(title='SP GR')),
# y=alt.Y('DEPT:O', sort='ascending', scale={'domain': brush.ref(), 'zero': True}),
color=alt.Color('variable:N', legend=None, scale=color_scale),
tooltip=['DEPT', 'value'],
order='DEPT',
opacity=alt.OpacityValue(0.8)
).properties(
width=100,
height=600
)
return chart
def plot_resistivity_track(self, brush, deep_res_str='RDEP', med_res_str='RMED', shallow_res_str='RSHA')->alt.Chart:
df = self._handle_log_names(self.df, log_names=[deep_res_str, med_res_str, shallow_res_str])
df = self._melt_df(df)
chart = alt.Chart(df).mark_line().encode(
x=alt.X('value', axis=alt.Axis(title='Resistivity'), scale={'type': 'log'}),
y=alt.Y('DEPT', sort='descending', scale={'domain': brush.ref(), 'zero': True}, axis=None),
# x=alt.X('value:Q', axis=alt.Axis(title='Resistivity'), scale={'type': 'log'}),
# y=alt.Y('DEPT:O', sort='ascending', scale={'domain': brush.ref(), 'zero': True}, axis=None),
tooltip=['DEPT', 'value'],
order='DEPT',
color='variable',
opacity=alt.OpacityValue(0.8)
).properties(
width=100,
height=600
)
return chart
def plot_porosity(self, brush, density_str='RHOB', neutron_str='NPHI', sonic_str='DTC', lithology_dens=2.65)->alt.Chart:
df = self._handle_log_names(self.df, log_names=[density_str, neutron_str, sonic_str])
df['DPHI'] = (df[density_str] - lithology_dens)/(1-lithology_dens)
df['PHIS'] = (df[sonic_str] - self.DTCMA) / (self.DTCW - self.DTCMA)
df = self._handle_log_names(df, log_names=['NPHI', 'DPHI', 'PHIS'])
df = self._melt_df(df)
chart = alt.Chart(df).mark_line().encode(
x=alt.X('value', axis=alt.Axis(title='Porosity')),
y=alt.Y('DEPT', sort='descending', axis=None, scale={'domain': brush.ref(), 'zero': True}),
# x=alt.X('value:Q', axis=alt.Axis(title='Porosity')),
# y=alt.Y('DEPT:O', sort='ascending', axis=None, scale={'domain': brush.ref(), 'zero': True}),
tooltip=['DEPT', 'value'],
order='DEPT',
color='variable',
opacity=alt.OpacityValue(0.8)
).properties(
width=100,
height=600
)
return chart
@staticmethod
def _scale_data_for_plotting(df):
for col in df:
if col == 'DEPT':
continue
series = df[col]
series = series / series.max()
df[col] = series
return df
@staticmethod
def _melt_df(df):
return df.melt(id_vars=['DEPT'])
@staticmethod
def _handle_log_names(df, log_names=None):
if log_names is not None:
dept = df['DEPT']
df = df[log_names]
if 'DEPT' not in df:
df['DEPT'] = dept
return df
|
the-stack_106_29115 | import asyncio, aiohttp, json
def setup(bot):
pass
async def async_post_json(url, data = None, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(url, data=data) as response:
return await response.json()
async def async_post_text(url, data = None, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(url, data=data) as response:
res = await response.read()
return res.decode("utf-8", "replace")
async def async_post_bytes(url, data = None, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(url, data=data) as response:
return await response.read()
async def async_head_json(url, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.head(url) as response:
return await response.json()
async def async_dl(url, headers = None):
# print("Attempting to download {}".format(url))
total_size = 0
data = b""
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url) as response:
assert response.status == 200
while True:
chunk = await response.content.read(4*1024) # 4k
data += chunk
total_size += len(chunk)
if not chunk:
break
if total_size > 8000000:
# Too big...
# print("{}\n - Aborted - file too large.".format(url))
return None
return data
async def async_text(url, headers = None):
data = await async_dl(url, headers)
if data != None:
return data.decode("utf-8", "replace")
else:
return data
async def async_json(url, headers = None):
data = await async_dl(url, headers)
if data != None:
return json.loads(data.decode("utf-8", "replace"))
else:
return data |
the-stack_106_29119 | # coding=utf-8
import logging
try:
import rospy
from rosgraph_msgs.msg import Log
except ImportError:
logging.warning("ROS not available: no ROS logging")
roslevel = {'DEBUG':1, 'INFO':2, 'WARNING':4, 'ERROR':8, 'CRITICAL':16}
class RXConsoleHandler(logging.Handler):
def __init__(self, topic = "/rosout"):
logging.Handler.__init__(self)
rospy.sleep(0.5) # wait a bit to make sure our ROS node is up
self.pub = rospy.Publisher(topic, Log)
self.log = Log()
self.level = logging.DEBUG
self.log.level = 2
self.log.name = "pyrobots logger"
self.log.msg = "Welcome in pyRobots"
self.log.file = ""
self.log.function = ""
self.log.line = 0
self.log.header.stamp = rospy.rostime.Time.now()
self.pub.publish(self.log)
def emit(self, record):
if record.levelname in roslevel:
level = roslevel[record.levelname]
else:
level = 2 # Default to 'INFO'. Should be improved to default to closest 'symbolic' level
log = Log(level = level,
name = record.name,
msg = record.msg,
file = record.filename,
function = record.funcName,
line = record.lineno)
log.header.stamp = rospy.rostime.Time.now()
self.pub.publish(log)
if __name__ == "__main__":
logger = logging.getLogger("test")
logger.addHandler(RXConsoleHandler())
logger.setLevel(logging.DEBUG)
logger.debug("Debug message")
logger.info("Info message")
logger.warning("Warning message")
logger.error("Error message")
logger.critical("Fatal message")
|
the-stack_106_29120 | # Copyright 2017-2020 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import sys
import traceback
import threading
import unittest
from flaky import flaky
import psutil
from typed_python.compiler.type_wrappers.compilable_builtin import CompilableBuiltin
from typed_python import (
Function, OneOf, TupleOf, ListOf, Tuple, NamedTuple, Class, NotCompiled, Dict,
_types, Compiled, Member, Final, isCompiled, ConstDict,
makeNamedTuple, UInt32, Int32, Type, identityHash, typeKnownToCompiler, checkOneOfType,
refcount, checkType, map
)
from typed_python.compiler.runtime import Runtime, Entrypoint, RuntimeEventVisitor
from typed_python.python_ast import (
convertFunctionToAlgebraicPyAst,
evaluateFunctionDefWithLocalsInCells,
)
def result_or_exception(f, *p):
try:
return f(*p)
except Exception as e:
return type(e)
def result_or_exception_str(f, *p):
try:
return f(*p)
except Exception as e:
return str(type(e)) + " " + str(e)
# ad hoc transformation of specific error strings occurring during tests, for compatibility between python versions
def emulate_older_errors(s):
return s.replace('TypeError: can only concatenate str (not "int") to str', 'TypeError: must be str, not int')
def result_or_exception_tb(f, *p):
try:
return f(*p)
except BaseException as e:
return str(type(e)) + "\n" + emulate_older_errors(traceback.format_exc())
def repeat_test(f, *a):
for i in range(10000):
try:
f(*a)
except Exception:
pass
class GetCompiledTypes(RuntimeEventVisitor):
def __init__(self):
self.types = {}
def onNewFunction(
self,
identifier,
functionConverter,
nativeFunction,
funcName,
funcCode,
funcGlobals,
closureVars,
inputTypes,
outputType,
yieldType,
variableTypes,
conversionType
):
self.types[funcName] = makeNamedTuple(
inputTypes=inputTypes,
outputType=outputType,
varTypes=variableTypes
)
# used in tests that need module-level objects, which are handled
# internally in a different way than objects created at function
# scope.
aModuleLevelDict = Dict(str, int)({'hi': 1})
aModuleLevelConstDict = ConstDict(str, int)({'hi': 1})
repeat_test_compiled = Entrypoint(repeat_test)
class TestCompilationStructures(unittest.TestCase):
def test_dispatch_order_independent(self):
class AClass(Class):
pass
someValues = [
TupleOf(int)(),
(1, 2, 3),
[1, 2, 3],
AClass()
]
for testCases in [someValues, reversed(someValues)]:
# ensure that we are not order-dependent trying to dispatch
# a class to a tuple or vice-versa
@Entrypoint
def typeToString(x):
return str(type(x))
for x in testCases:
self.assertEqual(typeToString(x), str(type(x)))
def checkFunctionOfIntegers(self, f):
f_fast = Compiled(f)
for i in range(100):
self.assertEqual(f_fast(i), f(i))
def test_simple_loop(self):
def f(x: int) -> int:
y = 0
while x > 0:
x = x - 1
y = y + x
return y
self.checkFunctionOfIntegers(f)
def test_returning(self):
def f(x: int) -> int:
return x
self.checkFunctionOfIntegers(f)
def test_basic_arithmetic(self):
def f(x: int) -> int:
y = x+1
return y
self.checkFunctionOfIntegers(f)
def test_boolean_and(self):
@Compiled
def f(x: int, y: int, z: int) -> bool:
return x and y and z
self.assertEqual(f(0, 0, 0), False)
self.assertEqual(f(0, 0, 1), False)
self.assertEqual(f(0, 1, 0), False)
self.assertEqual(f(0, 1, 1), False)
self.assertEqual(f(1, 0, 0), False)
self.assertEqual(f(1, 0, 1), False)
self.assertEqual(f(1, 1, 0), False)
self.assertEqual(f(1, 1, 1), True)
def test_boolean_or(self):
@Compiled
def f(x: int, y: int, z: int) -> bool:
return x or y or z
self.assertEqual(f(0, 0, 0), False)
self.assertEqual(f(0, 0, 1), True)
self.assertEqual(f(0, 1, 0), True)
self.assertEqual(f(0, 1, 1), True)
self.assertEqual(f(1, 0, 0), True)
self.assertEqual(f(1, 0, 1), True)
self.assertEqual(f(1, 1, 0), True)
self.assertEqual(f(1, 1, 1), True)
def test_boolean_operators(self):
@Compiled
def f(x: int, y: int, z: float) -> bool:
return x and y and z
self.assertEqual(f(0, 1, 1.5), False)
self.assertEqual(f(1, 1, 1.5), True)
def test_boolean_operators_with_side_effects(self):
# a function that appends 'effect' onto a list of effects
# and then returns result, so that we can track when we
# are actually executing a particular expression
def effectAndResult(effectList, effect, result):
effectList.append(effect)
return result
@Compiled
def f_and(x: int, y: int, z: str) -> ListOf(str):
result = ListOf(str)()
(effectAndResult(result, "x", x)
and effectAndResult(result, "y", y)
and effectAndResult(result, "z", z))
return result
self.assertEqual(f_and(0, 1, "s"), ["x"])
self.assertEqual(f_and(1, 0, "s"), ["x", "y"])
self.assertEqual(f_and(1, 1, "s"), ["x", "y", "z"])
@Compiled
def f_or(x: int, y: int, z: str) -> ListOf(str):
result = ListOf(str)()
(effectAndResult(result, "x", x)
and effectAndResult(result, "y", y)
and effectAndResult(result, "z", z))
return result
self.assertEqual(f_or(0, 0, ""), ["x"])
self.assertEqual(f_or(1, 0, ""), ["x", "y"])
self.assertEqual(f_or(1, 1, ""), ["x", "y", "z"])
def test_object_to_int_conversion(self):
@Function
def toObject(o: object):
return o
@Compiled
def f(x: int) -> int:
return int(toObject(x))
self.assertEqual(f(10), 10)
def test_variable_type_changes_make_sense(self):
@Compiled
def f(x: int) -> float:
y = x
y = 1.2
return y
self.assertEqual(f(10), 1.2)
def test_call_other_typed_function(self):
def g(x: int) -> int:
return x+1
def f(x: int) -> int:
return g(x+2)
self.checkFunctionOfIntegers(f)
def test_call_untyped_function(self):
@Compiled
def f(x: object):
return x
x = []
self.assertIs(f(x), x)
def test_call_other_untyped_function(self):
def g(x):
return x
@Compiled
def f(x: object):
return g(x)
x = []
self.assertIs(f(x), x)
def test_integers_in_closures(self):
y = 2
def f(x: int) -> int:
return x+y
self.checkFunctionOfIntegers(f)
def test_loop_variable_changing_type(self):
@Compiled
def f(top: float) -> OneOf(int, float):
y = 0
x = 0
while x < top:
x += 1.0
y += x
return y
self.assertEqual(f(3.5), 10.0)
def test_unassigned_variables(self):
@Compiled
def f(switch: int, t: TupleOf(int)) -> TupleOf(int):
if switch:
x = t
return x
self.assertEqual(f(1, (1, 2, 3)), (1, 2, 3))
with self.assertRaisesRegex(Exception, "local variable 'x' referenced before assignment"):
self.assertEqual(f(0, (1, 2, 3)), (1, 2, 3))
def test_return_from_function_without_return_value_specified(self):
@Compiled
def f(t: TupleOf(int)):
return t
self.assertEqual(f((1, 2, 3)), (1, 2, 3))
def test_return_from_function_with_bad_convert_throws(self):
@Compiled
def f(t: TupleOf(int)) -> None:
return t
with self.assertRaisesRegex(Exception, "Can't convert"):
f((1, 2, 3))
@flaky(max_runs=3, min_passes=1)
def test_perf_of_mutually_recursive_untyped_functions(self):
def q(x):
return x-1
def z(x):
return q(x)+1
def f(x):
return z(g(x - 1)) + z(g(x - 2)) + z(x)
def g(x):
if x > 0:
return z(f(x-1)) * z(2) + f(x-2)
return 1
g_typed = Entrypoint(g)
self.assertEqual(g(10), g_typed(10))
for input in [18, 18.0]:
t0 = time.time()
g(input)
untyped_duration = time.time() - t0
g_typed(input)
t0 = time.time()
g_typed(input)
typed_duration = time.time() - t0
# I get around 50x for ints and 12 for floats
speedup = untyped_duration / typed_duration
self.assertGreater(speedup, 20 if isinstance(input, int) else 4)
print("for ", input, " speedup is ", speedup)
def test_call_typed_function(self):
@Function
def f(x):
return x
@Compiled
def g(x: int):
return f(x+1)
self.assertEqual(g(10), 11)
def test_adding_with_nones_throws(self):
@Compiled
def g():
return None + None
with self.assertRaisesRegex(Exception, "Can't apply op Add.. to expressions of type None"):
g()
def test_exception_before_return_propagated(self):
@Compiled
def g():
None+None
return None
with self.assertRaisesRegex(Exception, "Can't apply op Add.. to expressions of type None"):
g()
def test_call_function_with_none(self):
@Compiled
def g(x: None):
return None
self.assertEqual(g(None), None)
def test_call_other_function_with_none(self):
def f(x):
return x
@Compiled
def g(x: int):
return f(None)
self.assertEqual(g(1), None)
def test_interleaving_nones(self):
def f(x, y, z):
x+z
return y
@Compiled
def works(x: int):
return f(x, None, x)
@Compiled
def throws(x: int):
return f(None, None, x)
self.assertEqual(works(1), None)
with self.assertRaisesRegex(Exception, "Can't apply op Add.. to expressions of type None"):
throws(1)
def test_return_none(self):
def f(x):
return x
@Compiled
def g():
return f(None)
self.assertEqual(g.resultTypeFor().typeRepresentation, type(None))
self.assertEqual(g(), None)
def test_assign_with_none(self):
def f(x):
return x
@Compiled
def g(x: int):
y = f(None)
z = y
return z
self.assertEqual(g(1), None)
def test_nonexistent_variable(self):
@Compiled
def f():
return this_variable_name_is_undefined # noqa: F821
with self.assertRaisesRegex(Exception, "name 'this_variable_name_is_undefined' is not defined"):
f()
@flaky(max_runs=3, min_passes=1)
def test_iterating(self):
@Compiled
def sumDirectly(x: int):
y = 0.0
i = 0
while i < x:
y += i
i += 1
return y
@Compiled
def sumUsingRange(x: int):
y = 0.0
for i in range(x):
y += i
return y
for i in range(10):
self.assertEqual(sumDirectly(i), sumUsingRange(i))
t0 = time.time()
sumDirectly(1000000)
t1 = time.time()
sumUsingRange(1000000)
t2 = time.time()
print("Range is %.2f slower than nonrange." % ((t2-t1)/(t1-t0))) # I get 1.00
self.assertLess((t1-t0), (t2 - t1) * 1.2)
def test_read_invalid_variables(self):
@Compiled
def readNonexistentVariable(readIt: bool):
if readIt:
return y # noqa
else:
return 0
with self.assertRaisesRegex(Exception, "name 'y' is not defined"):
readNonexistentVariable(True)
self.assertEqual(readNonexistentVariable(False), 0)
def test_append_float_to_int_rules_same(self):
def f():
x = ListOf(int)()
x.append(1.0)
return x
self.assertEqual(f(), Compiled(f)())
def test_multiple_assignments(self):
@Entrypoint
def f(iterable):
x, y, z = iterable
return x + y + z
self.assertEqual(f(TupleOf(int)((1, 2, 3))), 6)
with self.assertRaisesRegex(Exception, "not enough"):
f(TupleOf(int)((1, 2)))
with self.assertRaisesRegex(Exception, "too many"):
f(TupleOf(int)((1, 2, 3, 4)))
self.assertEqual(f(Tuple(int, int, int)((1, 2, 3))), 6)
self.assertEqual(f(NamedTuple(x=int, y=int, z=int)(x=1, y=2, z=3)), 6)
with self.assertRaisesRegex(Exception, "not enough"):
f(Tuple(int, int)((1, 2)))
with self.assertRaisesRegex(Exception, "too many"):
f(Tuple(int, int, int, int)((1, 2, 3, 4)))
def test_print_oneof(self):
@Compiled
def f(x: OneOf(float, str), y: OneOf(float, str)):
print("You can print either a float or a string", x, y)
f("hi", "hi")
f(1.0, "hi")
def test_type_oneof(self):
@Compiled
def f(x: OneOf(float, int)):
return str(type(x))
self.assertEqual(f(1), str(int))
self.assertEqual(f(1.0), str(float))
def test_can_raise_exceptions(self):
@Compiled
def aFunctionThatRaises(x: object):
raise AttributeError(f"you gave me {x}")
with self.assertRaisesRegex(AttributeError, "you gave me hihi"):
aFunctionThatRaises("hihi")
try:
aFunctionThatRaises("hihi")
except Exception:
trace = traceback.format_exc()
# the traceback should know where we are
self.assertIn('conversion_test', trace)
self.assertIn('aFunctionThatRaises', trace)
def test_stacktraces_show_up(self):
def f2(x):
return f3(x)
def f3(x):
return f4(x)
def f4(x):
raise Exception(f"X is {x}")
@Entrypoint
def f1(x):
return f2(x)
try:
f1("hihi")
except Exception:
trace = traceback.format_exc()
self.assertIn("f1", trace)
self.assertIn("f2", trace)
self.assertIn("f3", trace)
self.assertIn("f4", trace)
@flaky(max_runs=3, min_passes=1)
def test_perf_of_inlined_functions_doesnt_degrade(self):
def f1(x):
return f2(x)
def f2(x):
return f3(x)
def f3(x):
return f4(x)
def f4(x: int):
return x
@Entrypoint
def callsF1(times: int):
res = 0.0
for i in range(times):
res += f1(i)
return res
@Entrypoint
def callsF4(times: int):
res = 0.0
for i in range(times):
res += f4(i)
return res
@Entrypoint
def getit(f):
return f
# prime the compilation
callsF4(1)
callsF1(1)
t0 = time.time()
callsF1(10000000)
t1 = time.time()
callsF4(10000000)
t2 = time.time()
callsDeeply = t1 - t0
callsShallowly = t2 - t1
ratio = callsDeeply / callsShallowly
# inlining should work across invocations, regardless of order
self.assertLessEqual(.8, ratio)
self.assertLessEqual(ratio, 1.2)
print(f"Deeper call tree code was {ratio} times slow.")
def test_exception_handling_preserves_refcount(self):
@Entrypoint
def f(x, shouldThrow):
# this will increment the 'x' refcount
y = x # noqa
if shouldThrow:
raise Exception("exception")
aList = ListOf(int)()
self.assertEqual(_types.refcount(aList), 1)
f(aList, False)
self.assertEqual(_types.refcount(aList), 1)
with self.assertRaises(Exception):
f(aList, True)
self.assertEqual(_types.refcount(aList), 1)
def test_assert(self):
@Entrypoint
def assertNoMessage(x):
assert x
@Entrypoint
def assertWithMessage(x, y):
assert x, y
with self.assertRaises(AssertionError):
assertNoMessage(0)
with self.assertRaisesRegex(AssertionError, "boo"):
assertWithMessage(0, "boo")
assertNoMessage(1)
assertWithMessage(1, "message")
def test_assert_false(self):
@Entrypoint
def check(x):
assert False, x
return x
self.assertEqual(check.resultTypeFor(int), None)
with self.assertRaises(AssertionError):
check(10)
def test_conditional_eval_or(self):
@Compiled
def f1(x: float, y: int):
return x or 1 / y
@Compiled
def f2(x: str, y: int):
return x or 1 / y
@Compiled
def f3(x: int, y: float, z: str):
return x or y or z
with self.assertRaises(ZeroDivisionError):
f1(0.0, 0)
self.assertEqual(f1(0.0, 2), 0.5)
self.assertEqual(f1(1.23, 0), 1.23)
self.assertEqual(f1(1.23, 2), 1.23)
with self.assertRaises(ZeroDivisionError):
f2("", 0)
self.assertEqual(f2("", 2), 0.5)
self.assertEqual(f2("y", 0), "y")
self.assertEqual(f2("y", 2), "y")
self.assertEqual(f3(0, 0.0, ""), "")
self.assertEqual(f3(0, 0.0, "one"), "one")
self.assertEqual(f3(0, 1.5, ""), 1.5)
self.assertEqual(f3(0, 1.5, "one"), 1.5)
self.assertEqual(f3(3, 0.0, ""), 3)
self.assertEqual(f3(3, 0.0, "one"), 3)
self.assertEqual(f3(3, 1.5, ""), 3)
self.assertEqual(f3(3, 1.5, "one"), 3)
def test_conditional_eval_and(self):
@Compiled
def f1(x: float, y: int):
return x and 1 / y
self.assertEqual(f1(0.0, 0), 0.0)
self.assertEqual(f1(0.0, 2), 0.0)
with self.assertRaises(ZeroDivisionError):
f1(2.5, 0)
self.assertEqual(f1(2.5, 2), 0.5)
@Compiled
def f2(x: str, y: int):
return x and 1 / y
self.assertEqual(f2("", 0), "")
self.assertEqual(f2("", 2), "")
with self.assertRaises(ZeroDivisionError):
f2("y", 0)
self.assertEqual(f2("y", 2), 0.5)
@Compiled
def f(x: int, y: str, z: float):
return x and y and z
self.assertEqual(f(0, "", 0.0), 0)
self.assertEqual(f(0, "", 1.5), 0)
self.assertEqual(f(0, "one", 0.0), 0)
self.assertEqual(f(0, "one", 1.5), 0)
self.assertEqual(f(3, "", 0.0), "")
self.assertEqual(f(3, "", 1.5), "")
self.assertEqual(f(3, "one", 0.0), 0.0)
self.assertEqual(f(3, "one", 1.5), 1.5)
def test_conversion_of_deeply_nested_functions(self):
def g_0():
return 0
def f_0():
return 0
def g_1():
return g_0() + f_0()
def f_1():
return f_0() + g_0()
def g_2():
return g_1() + f_1()
def f_2():
return f_1() + g_1()
def g_3():
return g_2() + f_2()
def f_3():
return f_2() + g_2()
def g_4():
return g_3() + f_3()
def f_4():
return f_3() + g_3()
def g_5():
return g_4() + f_4()
def f_5():
return f_4() + g_4()
def g_6():
return g_5() + f_5()
def f_6():
return f_5() + g_5()
@Entrypoint
def compute():
return g_6() + f_6()
oldTimesCalculated = dict(Runtime.singleton().converter._times_calculated)
compute()
for identity, timesCalculated in Runtime.singleton().converter._times_calculated.items():
if identity not in oldTimesCalculated:
self.assertLessEqual(timesCalculated, 6, identity)
def test_converting_break_in_while(self):
def testBreak(x):
res = 0
while True:
x = x - 1
res = res + x
if x < 0:
break
return res + 1
self.assertEqual(testBreak(10), Entrypoint(testBreak)(10))
def test_converting_break_in_while_with_try_outside_of_loop(self):
def testBreak():
res = 0
try:
while True:
res += 1
break
res += 10
finally:
res += 100
return res
self.assertEqual(testBreak(), Entrypoint(testBreak)())
def test_converting_break_in_while_with_try_inside_of_loop(self):
def testBreak():
res = 0
while True:
try:
res += 1
break
finally:
res += 10
res += 100
return res
self.assertEqual(testBreak(), Entrypoint(testBreak)())
def test_converting_break_through_nested_try_finally(self):
def testBreak():
res = 0
try:
while True:
try:
try:
res += 1
break
finally:
res += 10
finally:
res += 100
finally:
res += 1000
res += 10000
return res
self.assertEqual(testBreak(), Entrypoint(testBreak)())
def test_converting_continue_through_multiple_nested_try_finally(self):
def testBreak():
res = 0
try:
while True:
if res > 0:
break
try:
try:
res += 1
continue
finally:
res += 10
finally:
res += 100
# never gets here
assert False
finally:
res += 1000
res += 10000
return res
self.assertEqual(testBreak(), Entrypoint(testBreak)())
def test_converting_continue_in_while(self):
def testContinue(x):
res = 0
while x > 0:
x = x - 1
res = res + x
if x % 2 == 0:
continue
res = res + 10
return res
self.assertEqual(testContinue(10), Entrypoint(testContinue)(10))
def test_converting_break_in_foreach(self):
def testBreak(x):
res = 0
for i in x:
res += i
if i > len(x) / 2:
break
return res
for thing in [ListOf(int)(range(10)), Tuple(int, int, int, int)((1, 2, 3, 4))]:
self.assertEqual(testBreak(thing), Entrypoint(testBreak)(thing))
def test_converting_continue_in_foreach(self):
def testContinue(x):
res = 0
for i in x:
if i > len(x) / 2:
continue
res += i
return res
for thing in [ListOf(int)(range(10)), Tuple(int, int, int, int)((1, 2, 3, 4))]:
self.assertEqual(testContinue(thing), Entrypoint(testContinue)(thing))
def test_call_function_with_wrong_number_of_arguments(self):
def f(x, y):
return x + y
@Compiled
def callIt(x: int):
return f(x)
with self.assertRaisesRegex(TypeError, "annot find a valid overload"):
callIt(1)
def test_call_function_with_default_arguments(self):
def f(x, y=1):
return x + y
@Entrypoint
def callIt(x):
return f(x)
self.assertEqual(callIt(10), f(10))
def test_call_function_with_named_args_ordering(self):
def f(x, y):
return x
@Entrypoint
def callWithArgsReversed(x, y):
return f(y=y, x=x)
self.assertEqual(callWithArgsReversed(2, 3), 2)
def test_call_function_with_named_args(self):
def f(x=1, y=10):
return x + y
def callWithX(x: int):
return f(x=x)
def callWithY(y: int):
return f(y=y)
def callWithXY(x: int, y: int):
return f(y=y, x=x)
callWithXCompiled = Compiled(callWithX)
callWithYCompiled = Compiled(callWithY)
callWithXYCompiled = Compiled(callWithXY)
self.assertEqual(callWithX(2), callWithXCompiled(2))
self.assertEqual(callWithY(2), callWithYCompiled(2))
self.assertEqual(callWithXY(2, 3), callWithXYCompiled(2, 3))
def test_call_function_with_star_args(self):
def f(*args):
return args
@Entrypoint
def callIt(x, y, z):
return f(x, y, z)
self.assertEqual(callIt(1, 2.5, "hi"), Tuple(int, float, str)((1, 2.5, "hi")))
def test_call_function_with_kwargs(self):
def f(**kwargs):
return kwargs
@Entrypoint
def callIt(x, y, z):
return f(x=x, y=y, z=z)
self.assertEqual(callIt(1, 2.5, "hi"), dict(x=1, y=2.5, z="hi"))
def test_call_function_with_excess_named_arg(self):
def f(x=1, y=2):
return x + y
@Entrypoint
def callIt(x, y, z):
return f(x=x, y=y, z=z)
with self.assertRaisesRegex(TypeError, "annot find a valid over"):
callIt(1, 2, 3)
def test_star_arg_call_function(self):
def f(x, y):
return x + y
@Entrypoint
def callIt(a):
return f(*a)
self.assertEqual(callIt(Tuple(int, int)((1, 2))), 3)
def test_star_kwarg_type(self):
def f(**kwargs):
return type(kwargs)
@Entrypoint
def callIt():
return f(x=10, y=20)
self.assertEqual(callIt(), dict)
def test_star_kwarg_as_dict(self):
def f(**kwargs):
return kwargs
@Entrypoint
def callIt():
return f(x=10, y=20)
self.assertEqual(callIt(), dict(x=10, y=20))
def test_star_kwarg_call_function(self):
def f(x, y):
return x + y
def g(**kwargs):
return f(**kwargs)
@Entrypoint
def callIt(x, y):
return g(y=y, x=x)
self.assertEqual(callIt(1, 2), 3)
@flaky(max_runs=3, min_passes=1)
def test_perf_of_star_kwarg_intermediate_is_fast(self):
def f(x, y):
return x + y
def g(**kwargs):
return f(**kwargs)
def sumUsingG(a: int):
res = 0.0
for i in range(a):
res += g(x=2, y=i)
return res
def sumUsingF(a: int):
res = 0.0
for i in range(a):
res += f(x=2, y=i)
return res
sumUsingGCompiled = Compiled(sumUsingG)
sumUsingFCompiled = Compiled(sumUsingF)
self.assertEqual(sumUsingG(100), sumUsingGCompiled(100))
t0 = time.time()
sumUsingGCompiled(1000000)
elapsedG = time.time() - t0
t0 = time.time()
sumUsingFCompiled(1000000)
elapsedF = time.time() - t0
t0 = time.time()
sumUsingG(1000000)
elapsedGPy = time.time() - t0
print("Compiled is ", elapsedGPy / elapsedG, " times faster")
# check that the extra call to 'g' doesn't introduce any overhead
self.assertTrue(.7 <= elapsedF / elapsedG <= 1.3, elapsedF / elapsedG)
@flaky(max_runs=3, min_passes=1)
def test_perf_of_star_arg_intermediate_is_fast(self):
def f(x, y):
return x + y
def g(*args):
return f(*args)
def sumUsingG(a: int):
res = 0.0
for i in range(a):
res += g(i, 2)
return res
def sumUsingF(a: int):
res = 0.0
for i in range(a):
res += f(i, 2)
return res
sumUsingGCompiled = Compiled(sumUsingG)
sumUsingFCompiled = Compiled(sumUsingF)
self.assertEqual(sumUsingG(100), sumUsingGCompiled(100))
t0 = time.time()
sumUsingGCompiled(1000000)
elapsedG = time.time() - t0
t0 = time.time()
sumUsingFCompiled(1000000)
elapsedF = time.time() - t0
t0 = time.time()
sumUsingG(1000000)
elapsedGPy = time.time() - t0
print("Compiled is ", elapsedGPy / elapsedG, " times faster")
# check that the extra call to 'g' doesn't introduce any overhead
self.assertTrue(.65 <= elapsedF / elapsedG <= 1.35, elapsedF / elapsedG)
def test_star_args_of_masquerade(self):
def f(*args):
return args[1]
@Entrypoint
def callF():
return f(1, "a b c".split())
self.assertEqual(callF.resultTypeFor().interpreterTypeRepresentation, list)
def test_star_args_type(self):
def f(*args):
return type(args)
@Entrypoint
def callF():
return f(1, 2, 3)
self.assertEqual(callF(), tuple)
def test_typed_functions_with_star_args(self):
@Function
def f(x: int):
return 1
@f.overload
def f(x: int, *args):
return 1 + len(args)
@Entrypoint
def callF1(x):
return f(x)
@Entrypoint
def callF2(x, y):
return f(x, y)
self.assertEqual(callF1(0), 1)
self.assertEqual(callF2(0, 1), 2)
def test_typed_functions_with_kwargs(self):
@Function
def f(x, **kwargs):
return x + len(kwargs)
@Entrypoint
def callF1(x):
return f(x)
@Entrypoint
def callF2(x, y):
return f(x, y)
@Entrypoint
def callF3(x, y):
return f(x, y=y)
@Entrypoint
def callF4(x, y):
return f(x, x=y)
self.assertEqual(callF1(10), 10)
with self.assertRaisesRegex(TypeError, "cannot find a valid overload"):
callF2(0, 1)
self.assertEqual(callF3(10, 2), 11)
with self.assertRaisesRegex(TypeError, "cannot find a valid overload"):
callF4(0, 1)
def test_typed_functions_with_typed_kwargs(self):
@Function
def f(**kwargs: int):
return "int"
@f.overload
def f(**kwargs: str):
return "str"
self.assertEqual(f(), "int")
self.assertEqual(f(x=1), "int")
self.assertEqual(f(x=1.5), "int")
self.assertEqual(f(x="1"), "str")
@Entrypoint
def callF(**kwargs):
return f(**kwargs)
self.assertEqual(callF(), "int")
self.assertEqual(callF(x=1), "int")
self.assertEqual(callF(x=1.5), "int")
self.assertEqual(callF(x="1"), "str")
def test_typed_functions_dispatch_based_on_names(self):
@Function
def f(x):
return "x"
@f.overload
def f(y):
return "y"
@Entrypoint
def callFUnnamed(x):
return f(x)
@Entrypoint
def callFWithY(x):
return f(y=x)
@Entrypoint
def callFWithX(x):
return f(x=x)
self.assertEqual(callFUnnamed(10), "x")
self.assertEqual(callFWithX(10), "x")
self.assertEqual(callFWithY(10), "y")
def test_typed_functions_with_oneof(self):
@Function
def f(x: OneOf(int, float)):
return x + 1.0
@Entrypoint
def callF(x):
return f(x)
@Entrypoint
def callF2(x: OneOf(int, float, str)):
return f(x)
self.assertEqual(callF(1.5), 2.5)
self.assertEqual(callF(1), 2.0)
self.assertEqual(callF2(1.5), 2.5)
self.assertEqual(callF2(1), 2.0)
with self.assertRaisesRegex(TypeError, r"Failed to find an overload"):
callF2("h")
def test_can_call_function_with_typed_function_as_argument(self):
@Function
def add(x: int, y: int):
return x + y
def g(x, y):
return x + y
@Function
def callIt(x: int, f: add):
return f(x, 1)
self.assertEqual(callIt(1, add), 2)
with self.assertRaisesRegex(TypeError, "annot find a valid overload"):
callIt(1, g)
def test_check_type_of_method_conversion(self):
@Entrypoint
def g(x: OneOf(None, TupleOf(int))):
return type(x)
self.assertEqual(g((1, 2, 3)), TupleOf(int))
self.assertEqual(g(None), type(None))
def test_check_is_on_unlike_things(self):
@Entrypoint
def g(x, y):
return x is y
self.assertFalse(g([], []))
self.assertTrue(g(None, None))
self.assertFalse(g(ListOf(int)(), TupleOf(int)()))
def test_if_condition_throws(self):
def throws():
raise Exception("Boo")
@Entrypoint
def shouldThrow():
x = ListOf(int)()
y = Dict(int, int)()
if x in y:
return True
else:
return False
with self.assertRaisesRegex(Exception, "Couldn't initialize type int"):
shouldThrow()
def test_if_with_return_types(self):
@Entrypoint
def popCheck(d, x):
if x in d:
d.pop(x)
popCheck(Dict(int, int)(), 1)
def test_assign_to_arguments_with_typechange(self):
@Entrypoint
def f(x, y: object):
x = x + y
f(1, 1)
def test_unassigned_variable_access(self):
@Compiled
def reduce2(aList: ListOf(int)):
for i in aList:
r += i # noqa
return r # noqa
with self.assertRaisesRegex(Exception, "ame 'r' is not defined"):
reduce2([1, 2, 3])
def test_iterate_closures(self):
x = ListOf(int)((1, 2, 3))
@Entrypoint
def f():
res = ListOf(int)()
for value in x:
res.append(value)
return res
self.assertEqual(f(), [1, 2, 3])
def test_function_not_returning_returns_none(self):
@Entrypoint
def f(l, i, y):
l[i] = y
self.assertEqual(f.resultTypeFor(ListOf(int), int, int).typeRepresentation, type(None))
def test_method_not_returning_returns_none(self):
class NoPythonObjectTypes(RuntimeEventVisitor):
def onNewFunction(
self,
identifier,
functionConverter,
nativeFunction,
funcName,
funcCode,
funcGlobals,
closureVars,
inputTypes,
outputType,
yieldType,
variableTypes,
conversionType
):
assert issubclass(outputType.typeRepresentation, Type)
class C(Class, Final):
def f(self, l, i, y: int):
l[i] = y
def f(self, l, i, y: float): # noqa
l[i] = y
@Entrypoint
def f(l: ListOf(int), i, y: OneOf(int, float)):
return C().f(l, i, y)
with NoPythonObjectTypes():
f(ListOf(int)([1, 2, 3]), 0, 2)
def test_try_simple(self):
def f0(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
except TypeError:
ret += "catch "
finally:
ret += "finally"
return ret
def f1(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
except Exception: # noqa: E722
raise NotImplementedError("custom")
ret += "catch "
finally:
ret += "finally"
return ret
def f2(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
except Exception:
ret += "catch "
finally:
ret += "finally"
return ret
def f3(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
except Exception:
ret += "catch "
return ret
def f4(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
except Exception:
ret += "catch "
else:
ret += "else "
return ret
def f5(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
if x == 1:
ret += x
except ZeroDivisionError as ex:
ret += "catch1 " + str(type(ex))
except TypeError as ex:
ret += "catch2 " + str(type(ex))
except Exception:
ret += "catchdefault "
finally:
ret += "finally"
return ret
def f6(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
if x == 1:
ret += x
except ArithmeticError:
ret += "catch1 "
# TODO: The compiled code will have type(ex) = ArithmeticError instead of ZeroDivisionError.
# TODO: Also, there are variations between interpreted and compiled code in the string representations of errors.
except TypeError as ex:
ret += "catch2 " + str(type(ex))
except Exception:
ret += "catchdefault "
finally:
ret += "finally"
return ret
def f7(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
except ZeroDivisionError as ex:
ret += "catch " + " " + str(ex) + " "
finally:
ret += "finally " + str(ex) # noqa: F821
# Ensure that this is detected as error "variable 'ex' referenced before assignment" in compiled case
return ret
def f7a(x: int) -> str:
ex = "might be overwritten"
ret = "try "
try:
ret += str(1/x) + " "
except ZeroDivisionError as ex:
ret += "catch " + " " + str(ex) + " "
finally:
ret += "finally " + str(ex)
# Ensure that this is detected as error "variable 'ex' referenced before assignment" in compiled case
return ret
# TODO: support finally in situation where control flow exits try block
def f8(x: int) -> str:
ret = "start "
for i in [0, 1]:
ret += str(i) + " "
if i > x:
try:
ret += "try"
if x < 10:
break
finally:
ret += "finally"
return ret
def f9(x: int) -> int:
try:
t = 0
for i in range(10):
t += i
if i > x * 10:
return t
finally:
t = 123
return t+1
def f10(x: int) -> int:
try:
t = 456
return t
finally:
t = 123
return t
def f11(x: int) -> int:
try:
if x == 0:
return int(1/0)
elif x == 1:
raise SyntaxError("aaa")
except Exception as e:
raise NotImplementedError("bbb") from e
return 0
def f12(x: int) -> int:
try:
if x == 0:
return int(1/0)
elif x == 1:
raise SyntaxError("aaa")
except Exception:
raise NotImplementedError("bbb") from None
return 0
def f13(x: int) -> int:
try:
return 111
finally:
return 222
def f14(x: int) -> str:
ret = "try "
try:
ret += str(1/x) + " "
if x == 1:
ret += x
except SyntaxError:
ret += "catch1 "
except (TypeError, ArithmeticError):
ret += "catch2 "
except Exception:
ret += "catchdefault "
finally:
ret += "finally"
return ret
def f15(x: int) -> str:
ret = "begin "
try:
ret += "return "
ret += str(1/x) + " "
return ret
except Exception:
ret += "except "
finally:
ret += "finally "
return "But return this instead: " + ret
def f16(x: int) -> str:
ret = "begin "
try:
ret += "return "
ret += str(1/x) + " "
return ret
except Exception:
ret += "except "
return "Exception " + ret
finally:
ret += "finally "
def f17(x: int) -> str:
ret = "begin "
try:
ret += "return "
ret += str(1/(x-1)) + " " + str(1/x) + " "
return ret
except Exception:
ret += "except "
ret += str(1/x) + " "
return "Exception " + ret
finally:
ret += "finally "
return ret
def f18(x: int) -> int:
try:
return x
finally:
x += 1
return x
def f19(x: int) -> str:
try:
ret = "try "
except Exception:
ret = "exception "
else:
ret += "else "
return ret
finally:
if x == 0:
return "override"
def f20(x: int) -> str:
try:
ret = "try "
if x < 10:
return ret
except Exception:
ret = "exception "
else:
ret += "else "
return ret
finally:
if x == 0:
return "override"
def f21(x: int) -> str:
ret = "start "
for i in [0, 1]:
ret += str(i) + " "
if i > x:
try:
ret += "try"
break
finally:
return "override"
return ret
def f22(x: int) -> str:
ret = "start "
for i in [0, 1]:
ret += str(i) + " "
if i > x:
try:
ret += "try"
if x < 10:
return "try"
finally:
if x < 10:
break
return ret
def f23(x: int) -> str:
ret = "start "
for i in [0, 1, 2]:
ret += str(i) + " "
try:
ret += "try "
if i == 0:
continue
ret += "looping "
finally:
ret += "finally "
if x == 1:
return "override "
return ret
def f24(x: int) -> str:
ret = "start "
for i in [0, 1]:
ret += str(i) + " "
if i > x:
try:
ret += "try"
return "try"
finally:
break
return ret
def f25(x: int) -> str:
ret = "start "
for i in [0, 1]:
ret += str(i) + " "
if i > x:
try:
ret += "try"
ret += str(1/x)
finally:
break
return ret
# Assertion failure: not self.block.is_terminated
def t1(a: int) -> int:
try:
return 1
finally:
return 2
# compiles
def t2(a: int) -> int:
try:
if a == 1:
return 1
finally:
return 2
# Assertion failure: not self.block.is_terminated
def t3(a: int) -> int:
try:
return 1
finally:
pass
return 3
# failure: [f14] Tuples of exceptions not supported yet.
# failures: [f15, f16, f17, f19, f20, f21, f22, f23, f24]
for f in [f0, f1, f2, f3, f4, f5, f6, f7, f7a, f8, f9, f10, f11, f12, f13, f18, f25]:
c_f = Compiled(f)
for v in [0, 1]:
r1 = result_or_exception_tb(f, v)
r2 = result_or_exception_tb(c_f, v)
self.assertEqual(r1, r2, (str(f), v))
@flaky(max_runs=5, min_passes=1)
def test_try_general(self):
def g1(a: int, b: int, c: int, d: int) -> str:
ret = "start "
try:
ret += "try " + str(a) + " "
if a == 1:
ret += str(1/0)
elif a == 2:
ret += a
elif a == 3:
raise NotImplementedError("in body")
elif a == 4:
return ret
except ArithmeticError:
ret += "catch1 " + str(b) + " "
if b == 1:
ret += str(1/0)
elif b == 2:
ret += b
elif b == 3:
raise NotImplementedError("in handler")
elif b == 4:
return ret
elif b == 5:
raise
# TODO: The compiled code will have type(ex) = ArithmeticError instead of ZeroDivisionError.
# TODO: Also, there are variations between interpreted and compiled code in the string representations of errors.
except TypeError:
ret += "catch2 " + str(b) + " "
if b == 1:
ret += str(1/0)
elif b == 2:
ret += b
elif b == 3:
raise NotImplementedError("in handler")
elif b == 4:
return ret
elif b == 5:
raise
except Exception:
ret += "catchdefault " + str(b) + " "
if b == 1:
ret += str(1/0)
elif b == 2:
ret += b
elif b == 3:
raise NotImplementedError("in handler")
elif b == 4:
return ret
elif b == 5:
raise
else:
ret += "else " + str(c) + " "
if c == 1:
ret += str(1/0)
elif c == 2:
ret += b
elif c == 3:
raise NotImplementedError("in else")
elif c == 4:
return ret
finally:
ret += "finally " + str(d) + " "
if d == 1:
ret += str(1/0)
elif d == 2:
ret += b
elif d == 3:
raise NotImplementedError("in finally")
elif d == 4:
return ret
ret += "end "
return ret
def g2(a: int, b: int, c: int, d: int) -> str:
ret = "start "
for i in [1, 2, 3]:
try:
ret += "try" + str(i) + ':' + str(a) + " "
if a == 1:
ret += str(1/0)
elif a == 2:
ret += a
elif a == 3:
break
elif a == 4:
continue
elif a == 5:
ret += "return within try "
return ret
ret += "a "
except ZeroDivisionError:
ret += "except "
if b == 1:
ret += str(1/0)
elif b == 2:
ret += b
elif b == 3:
break
elif b == 4:
continue
elif b == 5:
ret += "return within except "
return ret
ret += "b "
else:
ret += "else "
if c == 1:
ret += str(1/0)
elif c == 2:
ret += c
elif c == 3:
ret += "return within except "
return ret
ret += "c "
finally:
ret += "finally "
if d == 1:
ret += str(1/0)
elif d == 2:
ret += d
elif d == 3:
ret += "return within finally "
return ret
ret += "d "
ret += "end"
return ret
def g3(x: int):
try:
if x == 1:
raise SyntaxError()
finally:
if x == 1:
raise NotImplementedError()
def g4(x: int):
try:
if x == 1:
raise SyntaxError()
except Exception:
pass
finally:
pass
def g4a(x: int):
try:
if x == 1:
raise SyntaxError()
except Exception as ex:
ex
finally:
pass
def g5(x: int) -> int:
t = x
for i in range(x+100):
t += i
return t
def g11(x: int) -> int:
try:
if x == 0:
return int(1/0)
elif x == 1:
raise SyntaxError("aaa")
except Exception as e:
raise NotImplementedError("bbb") from e
return 0
perf_test_cases = [
(g1, (3, 3, 0, 3), 2.0),
(g4, (1,), 2.0),
(g4, (0,), 2.0),
(g4a, (1,), 2.0),
(g4a, (0,), 2.0),
(g1, (0, 0, 0, 0), 2.0),
(g1, (0, 0, 0, 4), 2.0),
(g1, (4, 0, 0, 0), 2.0),
(g1, (3, 0, 0, 0), 2.0),
(g1, (3, 3, 0, 3), 2.0),
(g1, (3, 4, 0, 0), 2.0)
]
for f, a, limit in perf_test_cases:
m0 = psutil.Process().memory_info().rss / 1024
t0 = time.time()
repeat_test(f, *a)
t1 = time.time()
m1 = psutil.Process().memory_info().rss / 1024
# burn in the compiler
repeat_test_compiled(f, *a)
m2 = psutil.Process().memory_info().rss / 1024
t2 = time.time()
repeat_test_compiled(f, *a)
t3 = time.time()
m3 = psutil.Process().memory_info().rss / 1024
ratio = (t3 - t2) / (t1 - t0)
print(f"{f.__name__}{a}: compiled/interpreted is {ratio:.2%}.")
# performance is poor, so don't fail yet
# self.assertLessEqual(ratio, limit, (f.__name__, a))
print(f"Increase was {m3 - m2} vs {m1 - m0}")
# this is failing nondeterministically, and it's not clear why, but it's also
# not clear to me that it's really because of a memory issue.
# osx memory usage rises, but not others
# if sys.platform != "darwin":
# self.assertLessEqual(m3 - m2, m1 - m0 + 512, (f.__name__, a))
for f in [g1]:
c_f = Compiled(f)
for a in [0, 1, 2, 3, 4]:
for b in [0, 1, 2, 3, 4, 5]:
for c in [0, 1, 2, 3, 4]:
for d in [0, 1, 2, 3, 4]:
r1 = result_or_exception_tb(f, a, b, c, d)
r2 = result_or_exception_tb(c_f, a, b, c, d)
self.assertEqual(r1, r2, (str(f), a, b, c, d))
for f in [g2]:
c_f = Compiled(f)
for a in [0, 1, 2, 3, 4, 5]:
for b in [0, 1, 2, 3, 4, 5]:
for c in [0, 1, 2, 3]:
for d in [0, 1, 2, 3]:
r1 = result_or_exception_tb(f, a, b, c, d)
r2 = result_or_exception_tb(c_f, a, b, c, d)
self.assertEqual(r1, r2, (str(f), a, b, c, d))
def test_try_nested(self):
def n1(x: int, y: int) -> str:
try:
ret = "try1 "
if x == 1:
ret += str(1/0)
elif x == 2:
ret += x
elif x == 3:
raise NotImplementedError("try1")
elif x == 4:
return ret
ret += str(x) + " "
try:
ret += "try2 "
if y == 1:
ret += str(1/0)
elif y == 2:
ret += y
elif y == 3:
raise NotImplementedError("try2")
elif y == 4:
return ret
ret += str(y) + " "
except ArithmeticError:
ret += "catch1 "
finally:
ret += "finally1 "
except TypeError as ex:
ret += "catch2 " + str(type(ex))
finally:
ret += "finally2"
return ret
def n2(x: int, y: int) -> str:
ret = "start "
i = 0
while i < 3:
i += 1
ret += "(" + str(i) + ": "
try:
ret + "try1 "
if x == 1:
ret += str(1/0)
elif x == 2:
ret += x
elif x == 3:
raise NotImplementedError("try1")
elif x == 4:
break
elif x == 5:
continue
elif x == 6:
return ret
ret += str(x) + " "
try:
ret += "try2 "
if y == 1:
ret += str(1/0)
elif y == 2:
ret += y
elif y == 3:
raise NotImplementedError("try2")
elif y == 4:
break
elif y == 5:
continue
elif y == 6:
return ret
ret += str(y) + " "
except ArithmeticError:
ret += "catch1 "
finally:
ret += "finally1 "
except TypeError as ex:
ret += "catch2 " + str(type(ex))
finally:
ret += "finally2 "
ret += ") "
ret += "done "
return ret
for f in [n1]:
c_f = Compiled(f)
for a in [0, 1, 2, 3, 4]:
for b in [0, 1, 2, 3, 4]:
r1 = result_or_exception_tb(f, a, b)
r2 = result_or_exception_tb(c_f, a, b)
self.assertEqual(r1, r2, (str(f), a, b))
for f in [n2]:
c_f = Compiled(f)
for a in [0, 1, 2, 3, 4, 5, 6]:
for b in [0, 1, 2, 3, 4, 5, 6]:
r1 = result_or_exception_tb(f, a, b)
r2 = result_or_exception_tb(c_f, a, b)
self.assertEqual(r1, r2, (str(f), a, b))
def test_compile_chained_context_managers(self):
class CM(Class, Final):
lst = Member(ListOf(int))
def __init__(self, l):
self.lst = l
def __enter__(self):
self.lst.append(1)
def __exit__(self, a, b, c):
self.lst.pop()
def chainTwoOfThem():
aList = ListOf(int)()
with CM(aList), CM(aList):
assert len(aList) == 2
assert len(aList) == 0
chainTwoOfThem()
Entrypoint(chainTwoOfThem)()
def test_try_reraise(self):
# Test reraise directly in exception handler
def reraise1(a: int, b: int) -> str:
ret = "start "
try:
if a == 1:
ret += str(1/0)
elif a == 2:
ret += a
except Exception:
ret += "caught "
if b == 1:
raise
ret += "end"
return ret
# Test reraise in function called by exception handler
def reraise2(a: int, b: int) -> str:
ret = "start "
try:
if a == 1:
ret += str(1/0)
elif a == 2:
ret += a
except Exception:
ret += reraise(b)
ret += "end"
return ret
# Test if reraise is possible if 'try' is interpreted but 'raise' is compiled.
# Might be unlikely, but ensures we are following the language rules.
def reraise0(a: int, b: int) -> str:
ret = "start "
try:
if a == 1:
ret += str(1/0)
elif a == 2:
ret += a
except Exception:
ret += Compiled(reraise)(b)
ret += "end"
return ret
def reraise(b: int) -> str:
if b == 1:
raise
return "caught "
# Test raise outside of handler
# TODO: traceback is different in this case. Usually 'raise' does not get a traceback line, but in this case it does.
c_reraise = Compiled(reraise)
for b in [0, 1]:
r1 = result_or_exception_str(reraise, b)
r2 = result_or_exception_str(c_reraise, b)
self.assertEqual(r1, r2, b)
# Test raise inside handler
c_reraise1 = Compiled(reraise1)
c_reraise2 = Compiled(reraise2)
for a in [0, 1, 2]:
for b in [0, 1]:
# functional results should be the same for all 3 functions, compiled or interpreted
r0 = result_or_exception(reraise0, a, b)
r1 = result_or_exception(reraise1, a, b)
r2 = result_or_exception(c_reraise1, a, b)
r3 = result_or_exception(reraise2, a, b)
r4 = result_or_exception(c_reraise2, a, b)
self.assertEqual(r0, r1, (a, b))
self.assertEqual(r0, r2, (a, b))
self.assertEqual(r0, r3, (a, b))
self.assertEqual(r0, r4, (a, b))
# tracebacks should be the same for each function, compiled or interpreted
r1 = result_or_exception_tb(reraise1, a, b)
r2 = result_or_exception_tb(c_reraise1, a, b)
r3 = result_or_exception_tb(reraise2, a, b)
r4 = result_or_exception_tb(c_reraise2, a, b)
self.assertEqual(r1, r2, (a, b))
self.assertEqual(r3, r4, (a, b))
def test_context_manager_refcounts(self):
class ContextManaer(Class, Final):
def __enter__(self):
pass
def __exit__(self, a, b, c):
pass
@Entrypoint
def f(x):
with ContextManaer():
return x
a = ListOf(int)()
assert _types.refcount(a) == 1
f(a)
assert _types.refcount(a) == 1
def test_try_finally_refcounts(self):
@Entrypoint
def f(x):
try:
return x
finally:
pass
a = ListOf(int)()
assert _types.refcount(a) == 1
f(a)
assert _types.refcount(a) == 1
def test_context_manager_functionality(self):
class ConMan1():
def __init__(self, a, b, c, t):
self.a = a
self.b = b
self.c = c
self.t = t # trace
def __enter__(self):
self.t.append("__enter__")
if self.a == 1:
self.t.append("raise in __enter__")
raise SyntaxError()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.t.append(f"__exit__ {str(exc_type)} {exc_val}")
if self.b == 1:
self.t.append("raise in __exit__")
raise NotImplementedError()
self.t.append(f"__exit__ returns {self.c == 1}")
return self.c == 1
class ConMan2(Class, Final):
a = Member(int)
b = Member(int)
c = Member(int)
t = Member(ListOf(str))
def __init__(self, a: int, b: int, c: int, t: ListOf(str)):
self.a = a
self.b = b
self.c = c
self.t = t
def __enter__(self):
self.t.append("__enter__")
if self.a == 1:
self.t.append("raise in __enter__")
raise SyntaxError()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.t.append(f"__exit__ {str(exc_type)} {exc_val}")
if self.b == 1:
self.t.append("raise in __exit__")
raise NotImplementedError()
self.t.append(f"__exit__ returns {self.c == 1}")
return self.c == 1
def with_cm_simple1(a, b, c, d, t) -> int:
t.append("start")
with ConMan1(a, b, c, t):
t.append("body")
if d == 1:
t.append("raise")
raise ZeroDivisionError()
elif d == 2:
t.append("return1")
return 1
t.append("return2")
return 2
def with_cm_simple2(a: int, b: int, c: int, d: int, t: ListOf(str)) -> int:
t.append("start")
with ConMan2(a, b, c, t):
t.append("body")
if d == 1:
t.append("raise")
raise ZeroDivisionError()
elif d == 2:
t.append("return1")
return 1
t.append("return2")
return 2
def with_cm_simple_mixed(a: int, b: int, c: int, d: int, t: ListOf(str)) -> int:
t.append("start")
with ConMan1(a, b, c, t):
t.append("body")
if d == 1:
t.append("raise")
raise ZeroDivisionError()
elif d == 2:
t.append("return1")
return 1
t.append("return2")
return 2
def with_cm_nested1(a, b, c, d, e, f, g, h, t) -> int:
t.append("start")
with ConMan1(a, b, c, t) as x:
t.append(f"outerbody {x.a} {x.b} {x.c}")
with ConMan1(e, f, g, t) as y:
t.append(f"innerbody {y.a} {y.b} {y.c}")
if h == 1:
t.append("innerraise")
raise FileNotFoundError()
elif h == 2:
t.append("innerreturn3")
return 3
if d == 1:
t.append("outerraise")
raise ZeroDivisionError()
elif d == 2:
t.append("outerreturn1")
return 1
t.append("return2")
return 2
def with_cm_nested2(a: int, b: int, c: int, d: int, e: int, f: int, g: int, h: int, t: ListOf(str)) -> int:
t.append("start")
with ConMan2(a, b, c, t) as x:
t.append(f"outerbody {x.a} {x.b} {x.c}")
with ConMan2(e, f, g, t) as y:
t.append(f"innerbody {y.a} {y.b} {y.c}")
if h == 1:
t.append("innerraise")
raise FileNotFoundError()
elif h == 2:
t.append("innerreturn3")
return 3
if d == 1:
t.append("outerraise")
raise ZeroDivisionError()
elif d == 2:
t.append("outerreturn1")
return 1
t.append("return2")
return 2
def with_no_enter() -> str:
not_a_cm = "not a context manager"
with not_a_cm:
pass
return "done"
class EnterWrongSignature(Class, Final):
def __enter__(self, x):
return self
def __exit__(self, x, y, z):
return True
def with_enter_wrong_sig() -> str:
with EnterWrongSignature():
pass
return "done"
# Note difference in error string, depending on definition of __exit__, even though it is an '__enter__' error.
# >>> class EnterWrongSignature1():
# ... def __enter__(self, x):
# ... return self
# ... def __exit__(self):
# ... return True
# ...
# >>> with EnterWrongSignature1():
# ... pass
# ...
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: __enter__() missing 1 required positional argument: 'x'
# >>> class EnterWrongSignature2():
# ... def __enter__(self, x):
# ... return self
# ...
# >>> with EnterWrongSignature2():
# ... pass
# ...
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# AttributeError: __exit__
class ExitWrongSignature(Class, Final):
def __enter__(self):
return self
def __exit__(self, x: int):
return self
def with_exit_wrong_sig(a: int) -> str:
with ExitWrongSignature():
if a == 1:
raise SyntaxError()
return "done"
class EnterNoExit(Class, Final):
def __enter__(self):
return self
def with_no_exit(a: int) -> str:
with EnterNoExit():
if a == 1:
raise SyntaxError()
elif a == 2:
return "return inside with"
return "done"
def with_cm_loop1(a, b, c, d, t) -> int:
t.append("start")
for i in range(3):
t.append(f"{i}:")
with ConMan1(a, b, c, t):
t.append("body")
if d == 1 and i == 1:
t.append("raise")
raise ZeroDivisionError()
elif d == 2 and i == 1:
t.append("break")
break
elif d == 3 and i == 1:
t.append("continue")
continue
elif d == 4 and i == 1:
t.append("return1")
return 1
t.append("end of body")
t.append("return2")
return 2
def with_cm_loop2(a: int, b: int, c: int, d: int, t: ListOf(str)) -> int:
t.append("start")
for i in range(3):
t.append(f"{i}:")
with ConMan2(a, b, c, t):
t.append("body")
if d == 1 and i == 1:
t.append("raise")
raise ZeroDivisionError()
elif d == 2 and i == 1:
t.append("break")
break
elif d == 3 and i == 1:
t.append("continue")
continue
elif d == 4 and i == 1:
t.append("return1")
return 1
t.append("end of body")
t.append("return2")
return 2
c_with_enter_wrong_sig = Compiled(with_enter_wrong_sig)
r1 = result_or_exception(with_enter_wrong_sig)
r2 = result_or_exception(c_with_enter_wrong_sig)
# both are TypeError, but string description is different
self.assertEqual(r1, r2)
c_with_exit_wrong_sig = Compiled(with_exit_wrong_sig)
r1 = result_or_exception(with_exit_wrong_sig)
r2 = result_or_exception(c_with_exit_wrong_sig)
# both are TypeError, but string description is different
self.assertEqual(r1, r2)
c_with_no_enter = Compiled(with_no_enter)
r1 = result_or_exception(with_no_enter)
r2 = result_or_exception(c_with_no_enter)
self.assertEqual(r1, r2)
c_with_no_exit = Compiled(with_no_exit)
for a in [0, 1, 2]:
r1 = result_or_exception(with_no_exit, a)
r2 = result_or_exception(c_with_no_exit, a)
self.assertEqual(r1, r2, a)
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1, 2]:
t1 = []
r1 = result_or_exception(with_cm_simple1, a, b, c, d, t1)
t2 = ListOf(str)([])
r2 = result_or_exception(Compiled(with_cm_simple2), a, b, c, d, t2)
self.assertEqual(r1, r2, (a, b, c, d))
self.assertEqual(t1, t2, (a, b, c, d))
t3 = ListOf(str)([])
r3 = result_or_exception(Compiled(with_cm_simple_mixed), a, b, c, d, t3)
self.assertEqual(r1, r3, (a, b, c, d))
self.assertEqual(t1, t3, (a, b, c, d))
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1, 2, 3, 4]:
t1 = []
r1 = result_or_exception(with_cm_loop1, a, b, c, d, t1)
t2 = ListOf(str)([])
r2 = result_or_exception(Compiled(with_cm_loop2), a, b, c, d, t2)
if r1 != r2 or t1 != t2:
print(r1)
print(r2)
print(t1)
print(t2)
self.assertEqual(r1, r2, (a, b, c, d))
self.assertEqual(t1, t2, (a, b, c, d))
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1, 2]:
for e in [0, 1]:
for f in [0, 1]:
for g in [0, 1]:
for h in [0, 1, 2]:
t1 = []
r1 = result_or_exception(with_cm_nested1, a, b, c, d, e, f, g, h, t1)
t2 = ListOf(str)([])
r2 = result_or_exception(Compiled(with_cm_nested2), a, b, c, d, e, f, g, h, t2)
self.assertEqual(r1, r2, (a, b, c, d, e, f, g, h))
self.assertEqual(t1, t2, (a, b, c, d, e, f, g, h))
@flaky(max_runs=3, min_passes=1)
def test_context_manager_perf(self):
class ConMan1():
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def __enter__(self):
if self.a == 1:
raise SyntaxError()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.b == 1:
raise NotImplementedError()
return self.c == 1
class ConMan2(Class, Final):
a = Member(int)
b = Member(int)
c = Member(int)
def __init__(self, a: int, b: int, c: int):
self.a = a
self.b = b
self.c = c
def __enter__(self):
if self.a == 1:
raise SyntaxError()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.b == 1:
raise NotImplementedError()
return self.c == 1
def with_cm_simple1(a, b, c, d) -> int:
with ConMan1(a, b, c):
if d == 1:
raise ZeroDivisionError()
elif d == 2:
return 1
return 2
def with_cm_simple2(a: int, b: int, c: int, d: int, t: ListOf(str)) -> int:
with ConMan2(a, b, c):
if d == 1:
raise ZeroDivisionError()
elif d == 2:
return 1
return 2
perf_test_cases = [
(with_cm_simple1, with_cm_simple2, (0, 0, 0, 0), 1.0),
(with_cm_simple1, with_cm_simple2, (0, 0, 0, 1), 1.0),
(with_cm_simple1, with_cm_simple2, (0, 0, 1, 0), 1.0),
(with_cm_simple1, with_cm_simple2, (0, 0, 1, 1), 1.0),
(with_cm_simple1, with_cm_simple2, (0, 1, 0, 0), 1.0),
(with_cm_simple1, with_cm_simple2, (1, 0, 0, 0), 1.0),
(with_cm_simple1, with_cm_simple1, (0, 0, 0, 0), 1.0),
(with_cm_simple1, with_cm_simple1, (0, 0, 0, 1), 1.0),
(with_cm_simple1, with_cm_simple1, (0, 0, 1, 0), 1.0),
(with_cm_simple1, with_cm_simple1, (0, 0, 1, 1), 1.0),
(with_cm_simple1, with_cm_simple1, (0, 1, 0, 0), 1.0),
(with_cm_simple1, with_cm_simple1, (1, 0, 0, 0), 1.0),
(with_cm_simple2, with_cm_simple2, (0, 0, 0, 0), 1.0),
(with_cm_simple2, with_cm_simple2, (0, 0, 0, 1), 1.0),
(with_cm_simple2, with_cm_simple2, (0, 0, 1, 0), 1.0),
(with_cm_simple2, with_cm_simple2, (0, 0, 1, 1), 1.0),
(with_cm_simple2, with_cm_simple2, (0, 1, 0, 0), 1.0),
(with_cm_simple2, with_cm_simple2, (1, 0, 0, 0), 1.0),
]
for f1, f2, a, limit in perf_test_cases:
m0 = psutil.Process().memory_info().rss / 1024
t0 = time.time()
repeat_test(f1, *a)
t1 = time.time()
m1 = psutil.Process().memory_info().rss / 1024
# burn in the compiler
repeat_test_compiled(f2, *a)
m2 = psutil.Process().memory_info().rss / 1024
t2 = time.time()
repeat_test_compiled(f2, *a)
t3 = time.time()
m3 = psutil.Process().memory_info().rss / 1024
ratio = (t3 - t2) / (t1 - t0)
print(f"{f1.__name__}{a}: compiled/interpreted is {ratio:.2%}.")
# performance is poor, so don't compare yet
# self.assertLessEqual(ratio, limit, (f1.__name__, a))
self.assertLessEqual(m3 - m2, m1 - m0 + 1024, (f1.__name__, a))
def test_context_manager_assignment(self):
class ConMan(Class, Final):
a = Member(int)
b = Member(int)
c = Member(int)
t = Member(ListOf(str))
def __init__(self, a: int, b: int, c: int, t: ListOf(str)):
self.a = a
self.b = b
self.c = c
self.t = t
def __enter__(self):
self.t.append("__enter__")
if self.a == 1:
self.t.append("raise in __enter__")
raise SyntaxError()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.t.append(f"__exit__ {str(exc_type)}")
if self.b == 1:
self.t.append("raise in __exit__")
raise NotImplementedError()
self.t.append(f"__exit__ returns {self.c == 1}")
return self.c == 1
def with_cm_assign(a: int, b: int, c: int, d: int, t: ListOf(str)) -> int:
t.append("start")
with ConMan(a, b, c, t) as x:
t.append(f"body {x.a} {x.b} {x.c}")
if d == 1:
t.append("raise")
raise ZeroDivisionError()
elif d == 2:
t.append("return1")
return 1
t.append("return2")
return 2
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1, 2]:
t1 = ListOf(str)([])
t2 = ListOf(str)([])
with_cm_assign_c = Compiled(with_cm_assign)
r1 = result_or_exception_str(with_cm_assign, a, b, c, d, t1)
r2 = result_or_exception_str(with_cm_assign_c, a, b, c, d, t2)
if r1 != r2 or t1 != t2:
print(r1)
print(r2)
print(t1)
print(t2)
self.assertEqual(r1, r2, (a, b, c, d))
self.assertEqual(t1, t2, (a, b, c, d))
def test_catch_definite_exception(self):
@Entrypoint
def g():
raise Exception("boo")
@Entrypoint
def f(x):
try:
g()
except Exception:
pass
self.assertEqual(f(1), None)
def test_catch_definite_exception_propagate_but_catch(self):
@Entrypoint
def g():
raise Exception("boo")
@Entrypoint
def f(x):
try:
try:
g()
except Exception:
raise Exception("Boo again!")
except Exception:
pass
self.assertEqual(f(1), None)
def test_catch_definite_exception_propagate(self):
@Entrypoint
def g():
raise Exception("boo")
@Entrypoint
def f(x):
try:
g()
except Exception:
raise Exception("Boo again!")
with self.assertRaisesRegex(Exception, "Boo again"):
f(1)
def test_catch_possible_exception(self):
@Entrypoint
def g():
raise Exception("boo")
@Entrypoint
def f(x):
try:
if x < 0:
return 0
g()
except Exception:
pass
self.assertEqual(f(1), None)
self.assertEqual(f(-1), 0)
def test_many_mutually_interesting_functions(self):
def f0(x):
pass
def f1(x):
f0(x)
def f2(x):
f1(x)
def f3(x):
f2(x)
# f4 takes many passes to get a type assignment
# because it has to see each child get processed
def f4(x):
f0(x)
f1(x)
f2(x)
f3(x)
# f5 will see f4 as existing, and needs to be
# recalculated when f4 gets its type completed
def f5(x):
f4(x)
# f6 depends on both functions simultaneously
def f6(x):
if x > 0:
f5(x)
if x > 0:
f4(x)
Entrypoint(f6)(10)
def test_not_compiled_called_from_compiled(self):
@NotCompiled
def f():
assert not isCompiled()
return "OK"
@Entrypoint
def g():
assert isCompiled()
return f()
self.assertEqual(g(), "OK")
def test_not_compiled_lambdas(self):
@Entrypoint
def callIt(f):
return f(1)
self.assertEqual(callIt(NotCompiled(lambda x: x + 1, int)), 2)
def test_same_code_with_different_globals(self):
def call(x):
return f(x) # noqa
ast = convertFunctionToAlgebraicPyAst(call)
f1 = evaluateFunctionDefWithLocalsInCells(ast, {'f': str}, {})
f2 = evaluateFunctionDefWithLocalsInCells(ast, {'f': int}, {})
@Entrypoint
def callFunc(f, x):
return f(x)
self.assertEqual(callFunc(f1, 10.5), "10.5")
self.assertEqual(callFunc(f2, 10.5), 10)
def test_reconstructed_code_has_same_identity_hash(self):
def call(x):
return x
ast = convertFunctionToAlgebraicPyAst(call)
assert ast.filename == call.__code__.co_filename
newCall = evaluateFunctionDefWithLocalsInCells(ast, {'f': str}, {})
assert newCall.__code__.co_filename == call.__code__.co_filename
assert identityHash(call.__code__) == identityHash(newCall.__code__)
def test_code_with_nested_listcomp(self):
def call(x):
return [[(i, 0, 0) for i in y] for y in x]
ast = convertFunctionToAlgebraicPyAst(call)
evaluateFunctionDefWithLocalsInCells(ast, {'f': str}, {})
def test_code_with_nested_setcomp(self):
def call(x):
return {[(i, 0, 0) for i in y] for y in x}
ast = convertFunctionToAlgebraicPyAst(call)
evaluateFunctionDefWithLocalsInCells(ast, {'f': str}, {})
def test_code_with_nested_dictcomp(self):
def call(x):
return {0: [(i, 0, 0) for i in y] for y in x}
ast = convertFunctionToAlgebraicPyAst(call)
evaluateFunctionDefWithLocalsInCells(ast, {'f': str}, {})
def test_closure_grabs_global_typed_object(self):
def countIt(x):
res = 0
for a in x:
res += aModuleLevelConstDict.get(a, 0)
return res
@Entrypoint
def callIt(f, x):
return f(x)
arg = ListOf(str)(["hi", "bye"] * 100000)
t0 = time.time()
v = GetCompiledTypes()
with v:
self.assertEqual(
callIt(countIt, arg),
100000
)
# our code should know the type of the const dict!
self.assertEqual(v.types['countIt'].varTypes['res'], int)
t0 = time.time()
callIt(countIt, arg)
print("took ", time.time() - t0)
self.assertLess(time.time() - t0, .1)
def test_closure_can_grab_and_modify_global_typed_object(self):
aModuleLevelDict['modify_count'] = 0
def countIt(x):
res = 0
for a in x:
res += aModuleLevelDict.get(a, 0)
aModuleLevelDict["modify_count"] += 1
return res
@Entrypoint
def callIt(f, x):
return f(x)
arg = ListOf(str)(["hi", "bye"] * 100000)
t0 = time.time()
v = GetCompiledTypes()
with v:
self.assertEqual(
callIt(countIt, arg),
100000
)
# our code should know the type of the const dict!
self.assertEqual(v.types['countIt'].varTypes['res'], int)
self.assertEqual(aModuleLevelDict['modify_count'], 200000)
t0 = time.time()
callIt(countIt, arg)
print("took ", time.time() - t0)
self.assertLess(time.time() - t0, .1)
def test_can_compile_after_compilation_failure(self):
class ThrowsCompilerExceptions(CompilableBuiltin):
def __eq__(self, other):
return isinstance(other, ThrowsCompilerExceptions)
def __hash__(self):
return hash("ThrowsCompilerExceptions")
def convert_call(self, context, instance, args, kwargs):
raise Exception("This always throws")
def h():
return 2
@Entrypoint
def f():
return h() + ThrowsCompilerExceptions()()
with self.assertRaisesRegex(Exception, "This always throws"):
f()
@Entrypoint
def g():
return h() + 1
self.assertEqual(g(), 3)
def test_converting_where_type_alternates(self):
def add(x, y):
return x if y is None else y if x is None else x + y
populated1 = ListOf(bool)([False, True, True, False])
populated2 = ListOf(bool)([False, True, False, True])
vals1 = ListOf(float)([0.0, 1.0, 2.0, 3.0])
@Entrypoint
def addUp(p1, p2, v1, v2):
out = ListOf(float)()
outP = ListOf(bool)()
for i in range(len(p1)):
if p1[i] and p2[i]:
res = add(v1[i], v2[i])
elif p1[i]:
res = add(v1[i], None)
elif p2[i]:
res = add(None, v2[i])
else:
res = None
if res is not None:
out.append(res)
outP.append(True)
else:
out.append(0.0)
outP.append(False)
return makeNamedTuple(v=out, p=outP)
v, p = addUp(populated1, populated2, vals1, vals1)
assert v == [0.0, 2.0, 2.0, 3.0]
assert p == [False, True, True, True]
def test_convert_not_on_ints_and_floats(self):
def check():
y = ListOf(int)()
y.append(not 10)
y.append(not 10.5)
y.append(not 0.0)
y.append(not 0.5)
y.append(not Int32(10))
y.append(not UInt32(10.5))
return y
self.assertEqual(
check(), Entrypoint(check)()
)
def test_compiler_can_see_type_members_of_instances(self):
@Entrypoint
def eltTypeOf(x):
return x.ElementType
assert eltTypeOf(ListOf(int)) == int
assert eltTypeOf(ListOf(int)()) == int
def test_function_entrypoint_multithreaded(self):
def makeAFunction(x):
T = OneOf(None, x)
def aFunction() -> T:
return x
return aFunction
assert type(Function(makeAFunction('a'))) is type(Function(makeAFunction('a'))) # noqa
assert type(Function(makeAFunction('b'))) is not type(Function(makeAFunction('a'))) # noqa
for c in range(10000):
if c % 100 == 0:
print("PASS ", c)
overloads = []
def wrapFunction(f):
overloads.append(Function(f).overloads)
f = makeAFunction(str(c))
threads = [threading.Thread(target=wrapFunction, args=(f,)) for _ in range(2)]
for t in threads:
t.start()
for t in threads:
t.join(.1)
assert len(overloads) == 2
def test_double_assignment(self):
def doubleAssign():
x = y = ListOf(int)() # noqa
return x
assert len(doubleAssign()) == 0
assert len(Entrypoint(doubleAssign)()) == 0
def test_double_nested_assignment(self):
def doubleAssign():
x = (y, z) = (1, 2)
assert x == (1, 2)
assert y == 1
assert z == 2
doubleAssign()
Entrypoint(doubleAssign)()
def test_double_nested_assignment_with_failure(self):
def doubleAssign():
try:
x = (y, z) = (1, 2, 3)
except ValueError:
pass
# the x assignment should have succeeded
assert x == (1, 2, 3)
doubleAssign()
Entrypoint(doubleAssign)()
def test_slice_objects(self):
@Entrypoint
def createSlice(start, stop, step):
return slice(start, stop, step)
assert isinstance(createSlice(1, 2, 3), slice)
def test_slice_objects_are_fast(self):
def count(start, stop, step):
res = 0.0
for i in range(start, stop, step):
res += slice(i).stop
return res
Entrypoint(count)(0, 1000000, 1)
t0 = time.time()
val1 = count(0, 1000000, 1)
t1 = time.time()
val2 = Entrypoint(count)(0, 1000000, 1)
t2 = time.time()
assert val1 == val2
speedup = (t1 - t0) / (t2 - t1)
assert speedup > 5
print("speedup is ", speedup)
def test_type_and_repr_of_slice_objects(self):
@Entrypoint
def typeOf():
return type(slice(1, 2, 3))
assert typeOf() is slice
@Entrypoint
def strOf():
return str(slice(1, 2, 3))
assert strOf() == str(slice(1, 2, 3))
@Entrypoint
def reprOf():
return repr(slice(1, 2, 3))
assert reprOf() == repr(slice(1, 2, 3))
def test_class_interaction_with_slice_is_fast(self):
class C(Class, Final):
def __getitem__(self, x) -> int:
return x.stop
def count(c, start, stop, step):
res = 0.0
for i in range(start, stop, step):
res += c[0:i]
return res
Entrypoint(count)(C(), 0, 1000000, 1)
t0 = time.time()
val1 = count(C(), 0, 1000000, 1)
t1 = time.time()
val2 = Entrypoint(count)(C(), 0, 1000000, 1)
t2 = time.time()
assert val1 == val2
speedup = (t1 - t0) / (t2 - t1)
assert speedup > 5
print("speedup is ", speedup)
def test_class_interaction_with_slice_pairs(self):
class C(Class, Final):
def __getitem__(self, x) -> int:
return x[0].stop + x[1].stop
def count(c, start, stop, step):
res = 0.0
for i in range(start, stop, step):
res += c[:i, :i]
return res
Entrypoint(count)(C(), 0, 1000000, 1)
t0 = time.time()
val1 = count(C(), 0, 1000000, 1)
t1 = time.time()
val2 = Entrypoint(count)(C(), 0, 1000000, 1)
t2 = time.time()
assert val1 == val2
speedup = (t1 - t0) / (t2 - t1)
assert speedup > 5
print("speedup is ", speedup)
def test_chained_comparisons(self):
def f1(x, y, z):
return x < y < z
def f2(x, y, z):
return x > y > z
def f3(x, y, z):
return x <= y < z
def f4(x, y, z):
return 1 <= x <= y <= 2 <= z
def f5(x, y, z):
return x < y > z
def f6(x, y, z):
return x > y < z
for f in [f1, f2, f3, f4, f5, f6]:
for x in range(3):
for y in range(3):
for z in range(3):
r1 = f(x, y, z)
r2 = Entrypoint(f)(x, y, z)
self.assertEqual(r1, r2)
# Now verify that each expression in the chained comparison is evaluated at most once
# and verify that normal short-circuit evaluation occurs.
def f7(w, x, y, z):
accumulator = []
def side_effect(x, accumulator):
accumulator.append(x)
return x
if side_effect(w, accumulator) < side_effect(x, accumulator) < side_effect(y, accumulator) < side_effect(z, accumulator):
return (True, accumulator)
else:
return (False, accumulator)
for w in range(4):
for x in range(4):
for y in range(4):
for z in range(4):
r1 = f7(w, x, y, z)
r2 = Entrypoint(f7)(w, x, y, z)
self.assertEqual(r1, r2)
def test_variable_restriction_is_correct(self):
@Entrypoint
def toTypedDict(x: dict):
x = Dict(int, int)(x)
return x
assert toTypedDict({1: 2}) == {1: 2}
def test_function_return_conversion_level_is_ImplicitContainers(self):
@Function
def toList(x) -> ListOf(int):
return x
@Entrypoint
def toListC(x) -> ListOf(int):
return x
assert toList([1, 2]) == toListC([1, 2]) == ListOf(int)([1, 2])
def test_iterate_with_multiple_variable_targets(self):
@Entrypoint
def iterate(iterable):
res = 0
for x, y in iterable:
res += y
return res
assert iterate(ListOf(ListOf(int))([[1, 2], [3, 4]])) == 6
with self.assertRaisesRegex(Exception, "not enough values to unpack"):
iterate(ListOf(ListOf(int))([[1, 2], [3]]))
def test_iterate_constant_expression_multiple(self):
@Entrypoint
def iterate():
res = 0
for x, y in ((1, 2), (3, 4)):
res += y
return res
assert iterate() == 6
def test_iterate_oneof(self):
@Entrypoint
def iterate(x: OneOf(ListOf(int), ListOf(float))):
res = 0
for val in x:
res += val
return res
assert iterate(ListOf(int)([1, 2, 3])) == 6
assert iterate.resultTypeFor(ListOf(int)).typeRepresentation == OneOf(float, int)
def test_iterate_oneof_segregates_variables(self):
@Entrypoint
def iterate(x: OneOf(ListOf(int), ListOf(str))):
for val in x:
# depending on the branch we're in, we should know that 'val'
# is either an int or a string
return typeKnownToCompiler(val)
return None
assert iterate(ListOf(int)([1, 2])) is int
assert iterate(ListOf(str)(["2"])) is str
def test_iterate_oneof_variable_types_join(self):
@Entrypoint
def iterate(x: OneOf(ListOf(int), ListOf(str))):
res = None
for val in x:
# depending on the branch we're in, we should know that 'val'
# is either an int or a string
res = val
return typeKnownToCompiler(res)
assert iterate(ListOf(int)([1, 2])) is OneOf(None, int, str)
assert iterate(ListOf(str)(["2"])) is OneOf(None, int, str)
def test_check_isinstance_on_oneof(self):
@Entrypoint
def doIt(var: OneOf(int, float)):
if isinstance(var, int):
return typeKnownToCompiler(var)
else:
return typeKnownToCompiler(var)
assert doIt(1.0) is float
assert doIt(1) is int
def test_check_one_of_type(self):
@Entrypoint
def doIt(var: OneOf(int, float)):
checkOneOfType(var)
print(var)
return typeKnownToCompiler(var)
assert doIt(1.0) is float
assert doIt(1) is int
def test_check_subtype(self):
class Base(Class):
pass
class Child1(Base):
pass
class Child2(Base):
pass
class Child3(Base):
pass
@Entrypoint
def doIt(var: Base):
checkType(var, Child1, Child2)
return typeKnownToCompiler(var)
assert doIt(Base()) is Base
assert doIt(Child1()) is Child1
assert doIt(Child2()) is Child2
assert doIt(Child3()) is Base
@flaky(max_runs=3, min_passes=1)
def test_check_one_of_type_perf_difference(self):
@Entrypoint
def accumulate(var: OneOf(int, float), times: int):
res = var
for t in range(times - 1):
res += var
return res
@Entrypoint
def accumulateWithCheck(var: OneOf(int, float), times: int):
# instruct the compiler to check what kind of variable this is
checkOneOfType(var)
res = var
for t in range(times - 1):
res += var
return res
accumulate(1, 100)
accumulateWithCheck(1, 100)
t0 = time.time()
accumulate(1, 1000000)
t1 = time.time()
accumulateWithCheck(1, 1000000)
t2 = time.time()
checkTime = t2 - t1
normalTime = t1 - t0
speedup = normalTime / checkTime
print("integer speedup is", speedup)
# it should be really big because the compiler can replace
# the sum with n*(n-1)/2, so it's basically constant time.
assert speedup > 100
accumulate(1.0, 100)
accumulateWithCheck(1.0, 100)
t0 = time.time()
accumulate(1.0, 1000000)
t1 = time.time()
accumulateWithCheck(1.0, 1000000)
t2 = time.time()
checkTime = t2 - t1
normalTime = t1 - t0
speedup = normalTime / checkTime
# i get about 10x, 5 on the github test boxes
print("float speedup is", speedup)
assert speedup > 2.0
def test_compile_annotated_assignment(self):
def f():
x: int = 20
x: int
return x
assert f() == Entrypoint(f)()
def test_with_exception(self):
class SimpleCM1():
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class SimpleCM2(Class, Final):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return True
class SimpleCM3(Class, Final):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def testCM(cm):
try:
with cm:
raise ZeroDivisionError()
except Exception:
return 1
return 0
r1 = testCM(SimpleCM1()) # ok
r2 = testCM(SimpleCM2()) # ok
r3 = testCM(SimpleCM3()) # segfault
self.assertEqual(r1, 1)
self.assertEqual(r2, 0)
self.assertEqual(r3, 1)
def test_context_manager_corruption(self):
class CM():
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return True
def f():
with CM():
raise NotImplementedError()
def repeat_a(n):
for i in range(n):
try:
f()
except Exception as e: # noqa: F841
pass
return 1/0
def repeat_b(n):
for i in range(n):
try:
Compiled(f)()
except Exception as e: # noqa: F841
pass
return 1/0
with self.assertRaises(ZeroDivisionError):
repeat_a(1000)
# At one point, this raised RecursionError instead of ZeroDivisionError
with self.assertRaises(ZeroDivisionError):
repeat_b(1000)
def test_context_manager_multiple_on_one_line1(self):
class ConMan1():
def __enter__(self):
return 1
def __exit__(self, exc_type, exc_val, exc_tb):
return True
class ConMan2():
def __enter__(self):
return 2
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError('ConMan2')
def f():
with ConMan1() as x, ConMan2() as y:
result = x + y
return result
c_f = Entrypoint(f)
c_f()
r1 = result_or_exception(f)
r2 = result_or_exception(c_f)
# Former problem: c_f raises RuntimeError 'No active exception to reraise'
self.assertEqual(r1, r2)
def test_context_manager_multiple_on_one_line2(self):
class ConMan():
def __init__(self, a, b, c, t):
self.a = a
self.b = b
self.c = c
self.t = t # trace
def __enter__(self):
self.t.append("__enter__")
if self.a == 1:
self.t.append("raise in __enter__")
raise SyntaxError()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.t.append(f"__exit__ {str(exc_type)} {exc_val}")
if self.b == 1:
self.t.append("raise in __exit__")
raise NotImplementedError()
self.t.append(f"__exit__ returns {self.c == 1}")
return self.c == 1
def with_cm_multiple(a, b, c, d, e, f, g, t) -> int:
t.append("start")
with ConMan(a, b, c, t) as x, ConMan(e, f, g, t) as y:
t.append(f"outerbody {x.a} {x.b} {x.c}")
t.append(f"innerbody {y.a} {y.b} {y.c}")
if d == 1:
t.append("outerraise")
raise ZeroDivisionError()
elif d == 2:
t.append("outerreturn1")
return 1
t.append("return2")
return 2
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1, 2]:
for e in [0, 1]:
for f in [0, 1]:
for g in [0, 1]:
t1 = []
r1 = result_or_exception(with_cm_multiple, a, b, c, d, e, f, g, t1)
t2 = []
r2 = result_or_exception(Entrypoint(with_cm_multiple), a, b, c, d, e, f, g, t2)
if r1 != r2 or t1 != t2:
print(f"mismatch {a}{b}{c}.{d}.{e}{f}{g} {r1} {r2}")
print(t1)
print(t2)
self.assertEqual(r1, r2, (a, b, c, d, e, f, g))
self.assertEqual(t1, t2, (a, b, c, d, e, f, g))
def test_import_module(self):
@Entrypoint
def importSomething():
import sys
return sys
assert importSomething() is sys
def test_class_as_context_manager(self):
class SimpleCM1():
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class SimpleCM2(Class, Final):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return True
class SimpleCM3(Class, Final):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def testCM(cm):
try:
with cm:
raise ZeroDivisionError()
except Exception:
return 1
return 0
assert testCM(SimpleCM1()) == 1
assert testCM(SimpleCM2()) == 0
assert testCM(SimpleCM3()) == 1
def test_access_oneof_variable(self):
@Entrypoint
def f(x) -> object:
return "aString"
@Entrypoint
def loop1():
val = "aString"
val = f(0)
for i in range(20):
val = f(i)
print(val)
@Entrypoint
def loop2():
val = f(0)
val = "aString"
for i in range(20):
val = f(i)
print(val)
loop1()
loop2()
def test_notcompiled_lambda_closure_refcounts(self):
x = ListOf(int)()
@NotCompiled
def f() -> int:
x
return 0
@Entrypoint
def returnIt(x):
return x
f = returnIt(f)
closure = f.getClosure()
assert refcount(closure) == 2
for _ in range(100):
f()
assert refcount(x) == 2
f = None
assert refcount(closure) == 1
assert refcount(x) == 2
closure = None
assert refcount(x) == 1
def test_map_large_named_tuples(self):
def getNamedTupleOfLists(n):
nameToList = {"a" + str(i): ListOf(str)([str(i)]) for i in range(n)}
return makeNamedTuple(**nameToList)
@Entrypoint
def slice(tupOfLists):
return map(lambda l: l[0], tupOfLists)
nt = getNamedTupleOfLists(100)
slice(nt)
|
the-stack_106_29121 | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import direct.gui.DirectGuiGlobals as DGG
from direct.gui.DirectSlider import DirectSlider
from rpcore.rpobject import RPObject
class Slider(RPObject):
""" This is a simple wrapper around DirectSlider, providing a simpler
interface """
def __init__(self, x=0, y=0, parent=None, size=100, min_value=0,
max_value=100, value=50, page_size=1, callback=None,
extra_args=None):
""" Inits the slider """
RPObject.__init__(self)
if extra_args is None:
extra_args = []
# Scale has to be 2.0, otherwise there will be an error.
self._node = DirectSlider(
pos=(size * 0.5 + x, 1, -y), parent=parent, range=(min_value, max_value),
value=value, pageSize=page_size, scale=2.0, command=callback,
extraArgs=extra_args, frameColor=(0.0, 0.0, 0.0, 1),
frameSize=(-size * 0.25, size * 0.25, -5, 5), relief=DGG.FLAT,
thumb_frameColor=(0.35, 0.53, 0.2, 1.0), thumb_relief=DGG.FLAT,
thumb_frameSize=(-2.5, 2.5, -5.0, 5.0),)
@property
def value(self):
""" Returns the currently assigned value """
return self._node["value"]
@value.setter
def value(self, value):
""" Sets the value of the slider """
self._node["value"] = value
@property
def node(self):
""" Returns a handle to the internally used node """
return self._node
|
the-stack_106_29123 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.11 Python SDK
Pure Storage FlashBlade REST 1.11 Python SDK. Compatible with REST API versions 1.0 - 1.11. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.11
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ErrorResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'list[PureError]'
}
attribute_map = {
'error': 'error'
}
def __init__(self, error=None): # noqa: E501
"""ErrorResponse - a model defined in Swagger""" # noqa: E501
self._error = None
self.discriminator = None
if error is not None:
self.error = error
@property
def error(self):
"""Gets the error of this ErrorResponse. # noqa: E501
a list of error objects # noqa: E501
:return: The error of this ErrorResponse. # noqa: E501
:rtype: list[PureError]
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this ErrorResponse.
a list of error objects # noqa: E501
:param error: The error of this ErrorResponse. # noqa: E501
:type: list[PureError]
"""
self._error = error
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ErrorResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_29124 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import xml.etree.ElementTree as ET
from pants.util.contextutil import open_zip
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
SHAPELESS_CLSFILE = 'org/pantsbuild/testproject/unicode/shapeless/ShapelessExample.class'
SHAPELESS_TARGET = 'testprojects/src/scala/org/pantsbuild/testproject/unicode/shapeless'
class ZincCompileIntegrationTest(BaseCompileIT):
def test_scala_compile_jar(self):
jar_suffix = 'z.jar'
with self.do_test_compile(SHAPELESS_TARGET,
expected_files=[jar_suffix]) as found:
with open_zip(self.get_only(found, jar_suffix), 'r') as jar:
self.assertTrue(jar.getinfo(SHAPELESS_CLSFILE),
'Expected a jar containing the expected class.')
def test_scala_empty_compile(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/emptyscala',
expected_files=[]) as found:
# no classes generated by this target
pass
def test_scala_shared_sources(self):
clsname = 'SharedSources.class'
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/sharedsources::',
expected_files=[clsname]) as found:
classes = found[clsname]
self.assertEqual(2, len(classes))
for cls in classes:
self.assertTrue(cls.endswith(
'org/pantsbuild/testproject/sharedsources/SharedSources.class'))
def test_scala_failure(self):
"""With no initial analysis, a failed compilation shouldn't leave anything behind."""
analysis_file = 'testprojects.src.scala.' \
'org.pantsbuild.testproject.compilation_failure.compilation_failure.analysis'
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testprojects/compilation_failure',
expected_files=[analysis_file],
expect_failure=True) as found:
self.assertEqual(0, len(found[analysis_file]))
def test_scala_with_java_sources_compile(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/javasources',
expected_files=['ScalaWithJavaSources.class',
'JavaSource.class']) as found:
self.assertTrue(
self.get_only(found, 'ScalaWithJavaSources.class').endswith(
'org/pantsbuild/testproject/javasources/ScalaWithJavaSources.class'))
self.assertTrue(
self.get_only(found, 'JavaSource.class').endswith(
'org/pantsbuild/testproject/javasources/JavaSource.class'))
def test_scalac_plugin_compile(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/scalac/plugin',
expected_files=['HelloScalac.class', 'scalac-plugin.xml']) as found:
self.assertTrue(
self.get_only(found, 'HelloScalac.class').endswith(
'org/pantsbuild/testproject/scalac/plugin/HelloScalac.class'))
tree = ET.parse(self.get_only(found, 'scalac-plugin.xml'))
root = tree.getroot()
self.assertEqual('plugin', root.tag)
self.assertEqual('hello_scalac', root.find('name').text)
self.assertEqual('org.pantsbuild.testproject.scalac.plugin.HelloScalac',
root.find('classname').text)
def test_scalac_debug_symbol(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/scalac/plugin',
expected_files=['HelloScalac.class', 'scalac-plugin.xml'],
extra_args=['--compile-zinc-debug-symbols']) as found:
pass
def test_zinc_unsupported_option(self):
with self.temporary_workdir() as workdir:
with self.temporary_cachedir() as cachedir:
# compile with an unsupported flag
pants_run = self.run_test_compile(
workdir,
cachedir,
'testprojects/src/scala/org/pantsbuild/testproject/emptyscala',
extra_args=[
'--compile-zinc-args=-recompile-all-fraction',
'--compile-zinc-args=0.5',
])
self.assert_success(pants_run)
# Confirm that we were warned.
self.assertIn('is not supported, and is subject to change/removal', pants_run.stdout_data)
def test_analysis_portability(self):
target = 'testprojects/src/scala/org/pantsbuild/testproject/javasources'
analysis_file_name = 'testprojects.src.scala.org.pantsbuild.testproject.javasources.javasources.analysis.portable'
# do_new_project_compile_and_return_analysis executes pants with different build root/work directory each time.
def do_new_project_compilation_and_return_analysis():
with self.do_test_compile(target, iterations=1, expected_files=[analysis_file_name],
workdir_outside_of_buildroot=True) as found:
files = found[analysis_file_name]
self.assertEqual(1, len(files))
with open(list(files)[0]) as file:
return file.read()
analysis1 = do_new_project_compilation_and_return_analysis()
analysis2 = do_new_project_compilation_and_return_analysis()
def extract_content(analysis):
# TODO(stuhood): Comparing content before stamps only, because there is different line in internal apis section.
# return re.sub(re.compile('lastModified\(\d+\)'), "lastModified()", analysis).split('\n')
return analysis.partition("stamps")[0].split("\n")
self.assertListEqual(extract_content(analysis1), extract_content(analysis2))
def test_zinc_fatal_warning(self):
def test_combination(target, default_fatal_warnings, expect_success):
with self.temporary_workdir() as workdir:
with self.temporary_cachedir() as cachedir:
if default_fatal_warnings:
arg = '--scala-platform-fatal-warnings'
else:
arg = '--no-scala-platform-fatal-warnings'
pants_run = self.run_test_compile(
workdir,
cachedir,
'testprojects/src/scala/org/pantsbuild/testproject/compilation_warnings:{}'.format(target),
extra_args=[arg])
if expect_success:
self.assert_success(pants_run)
else:
self.assert_failure(pants_run)
test_combination('defaultfatal', default_fatal_warnings=True, expect_success=False)
test_combination('defaultfatal', default_fatal_warnings=False, expect_success=True)
test_combination('fatal', default_fatal_warnings=True, expect_success=False)
test_combination('fatal', default_fatal_warnings=False, expect_success=False)
test_combination('nonfatal', default_fatal_warnings=True, expect_success=True)
test_combination('nonfatal', default_fatal_warnings=False, expect_success=True)
|
the-stack_106_29125 | """
The DQN improvement: Prioritized Experience Replay (based on https://arxiv.org/abs/1511.05952)
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.8.0
"""
import numpy as np
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(1)
class SumTree(object):
"""
This SumTree code is a modified version and the original code is from:
https://github.com/jaara/AI-blog/blob/master/SumTree.py
Story data with its priority in the tree.
"""
# data_pointer = 0
def __init__(self, capacity):
self.data_pointer = 0
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1) # inner nodes need extra space
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, data):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = data # update data_frame
self.update(tree_idx, p) # update tree_frame
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
r"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
"""
This Memory class is modified based on the original code from:
https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py
"""
epsilon = 0.01 # small amount to avoid zero priority
alpha = 0.6 # [0~1] convert the importance of TD error to priority
beta = 0.4 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.001
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), np.empty((n, self.tree.data[0].size)), np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob / min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
class DQNPrioritizedReplay:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.005,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=500,
memory_size=10000,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
prioritized=True,
sess=None,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
self.prioritized = prioritized # decide to use double q or not
self.learn_step_counter = 0
self._build_net()
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
if self.prioritized:
self.memory = Memory(capacity=memory_size)
else:
self.memory = np.zeros((self.memory_size, n_features*2+2))
if sess is None:
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
else:
self.sess = sess
if output_graph:
tf.summary.FileWriter("logs/", self.sess.graph)
self.cost_his = []
def _build_net(self):
def build_layers(s, c_names, n_l1, w_initializer, b_initializer, trainable):
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names, trainable=trainable)
l1 = tf.nn.relu(tf.matmul(s, w1) + b1)
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names, trainable=trainable)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names, trainable=trainable)
out = tf.matmul(l1, w2) + b2
return out
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
if self.prioritized:
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('eval_net'):
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 20, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
self.q_eval = build_layers(self.s, c_names, n_l1, w_initializer, b_initializer, True)
with tf.variable_scope('loss'):
if self.prioritized:
self.abs_errors = tf.reduce_sum(tf.abs(self.q_target - self.q_eval), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(self.q_target, self.q_eval))
else:
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
self.q_next = build_layers(self.s_, c_names, n_l1, w_initializer, b_initializer, False)
def store_transition(self, s, a, r, s_):
if self.prioritized: # prioritized replay
transition = np.hstack((s, [a, r], s_))
self.memory.store(transition) # have high priority for newly arrived transition
else: # random replay
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def learn(self):
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
if self.prioritized:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.batch_size)
else:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={self.s_: batch_memory[:, -self.n_features:],
self.s: batch_memory[:, :self.n_features]})
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
if self.prioritized:
_, abs_errors, self.cost = self.sess.run([self._train_op, self.abs_errors, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
else:
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
|
the-stack_106_29126 | # -*- coding: utf-8 -*-
import sys, numpy, scipy
import scipy.cluster.hierarchy as hier
import scipy.spatial.distance as dist
import csv
import scipy.stats as stats
import json
import networkx as nx
from networkx.readwrite import json_graph
def makeNestedJson(leaf) :
leaf=json.loads(leaf)
#A tree is a directed graph - create one with a dummy root
DG=nx.DiGraph()
DG.add_node('root')
#Construct the tree as a directed graph and annotate the nodes with attributes
#Edges go from parent to child
for e in leaf:
DG.add_node(e['id'],label=e['label'])
#If there's a parent, use it...
if 'parent' in e: DG.add_edge(e['parent'],e['id'])
#else create a dummy parent from the dummy root
else: DG.add_edge('root',e['id'])
#Get the tree as JSON
data = json_graph.tree_data(DG,root='root')
#and dump the data from the dummy root's children down...
return json.dumps(data['children'])
# This function puts root and makes hierarchy tree
def makeHier(data, length, typeRC, parentId, grandParentId):
# put very first data (root)
hierData = str(int(data[len(data)-1])) + "."
#print (hierData)
# data : whole data, len(hierMatrix)-1 : data's length, hierData : current stored data array
getElem (data, len(data)-1, hierData, length, typeRC, parentId, grandParentId)
# This function puts other data excluding root
# data : total hiermatrix, parentNum : cluster number, hier : total string which separate ".", length : each total length of col or row matrix, parentId : parent Id (it differs parent number)
def getElem(data, parentNum, hier, length, typeRC, parentId, grandParentId):
#'parent' : parentId , 'id' : data[parentNum] (current Id)
#print(rowLeafNum)
#print(colLeafNum)
# Check whether it is
if parentNum-4 >= 0 :
#isChecked = 0
# Put current data
if (parentNum != len(data)-1):
#leafData.append(str(int(hierMatrix[-1])) + ".")
hier += str(int(data[parentNum])) + "."
#
if (typeRC == "row"):
global rowLeafNum
rowLeafNum = rowLeafNum + 1
if int(data[parentNum]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum])][0]
global content
content['parent'] = int(grandParentId)
global content
content['id'] = int(data[parentNum])
global leafData
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
global colLeafNum
colLeafNum = colLeafNum + 1
#print(colHeaders)
#print(int(data[parentNum])-1)
if int(data[parentNum]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum])-1]
global colContent
colContent['parent'] = int(grandParentId)
global colContent
colContent['id'] = int(data[parentNum])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print ("gradParentId : " + str(int(grandParentId)))
#print ("parentId : " + str(int(parentId)))
#print ("id : " + str(int(data[parentNum])))
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
#print (hier)
#print(content)
#print(colContent)
#print("leafleafleafleafleafleafleaf")
#print(leafData)
#print(colLeafData)
if data[parentNum-3] >= length and data[parentNum-4] >= length:
#print (parentNum-3 , data[parentNum-3])
#print (parentNum-4 , data[parentNum-4])
getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier,length,typeRC, int(data[parentNum]-4), int(data[parentNum]))
getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier,length,typeRC, int(data[parentNum]-3), int(data[parentNum]))
elif data[parentNum-3] < length and data[parentNum-4] > length:
#print (parentNum-4 , data[parentNum-4])
hier += str(int(data[parentNum-3])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-3]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-3])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-3])
global leafData
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-3]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-3])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-3])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
removeNum = len(str(int(data[parentNum-3]))) + 1
hier = hier[:-removeNum]
getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier, length,typeRC, int(data[parentNum]-4), int(data[parentNum]))
elif data[parentNum-3] > length and data[parentNum-4] < length:
#print (parentNum-3 , data[parentNum-3])
hier += str(int(data[parentNum-4])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-4]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-4])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-4])
global leafData
leafData += str(content) + ", "
global dotLeafData
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-4]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-4])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-4])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
removeNum = len(str(int(data[parentNum-4]))) + 1
hier = hier[:-removeNum]
getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier, length,typeRC, int(data[parentNum]-3), int(data[parentNum]))
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
else:
hier += str(int(data[parentNum-4])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-4]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-4])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-4])
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-4]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-4])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-4])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
#print (parentNum-4 , data[parentNum-4])
#print(hier)
removeNum = len(str(int(data[parentNum-4]))) + 1
hier = hier[:-removeNum]
hier += str(int(data[parentNum-3])) + "."
#print (parentNum-3 , data[parentNum-3])
#print(hier)
#print (parentNum-3 , data[parentNum-3])
#print (parentNum-4 , data[parentNum-4])
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
#print("length : " + str(length))
#print("int(data[parentNum]): " + str(int(data[parentNum])))
if int(data[parentNum-3]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-3])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-3])
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-3]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-3])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-3])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
#print(content)
#print(rowNameArr[int(data[parentNum-3])])
"""if (data[parentNum-4] <= len(linkageMatrix)):
hier += str(int(data[parentNum-4])) + "."
leafData.append(hier)
#print (hier)
isChecked = 1
# print (parentNum-3 , data[parentNum-3])
if (data[parentNum-3] <= len(linkageMatrix)):
if isChecked == 1 :
removeNum = len(str(int(data[parentNum-4]))) + 1
hier = hier[:-removeNum]
hier += str(int(data[parentNum-3])) + "."
leafData.append(hier)
#print (parentNum-4 , data[parentNum-4])
#print (hier)"""
def searchNum (data, index, pId):
if index[0][0]< pId and ((index[0][0] % 5 == 0) or (index[0][0] % 5 == 1) or (index[0][0] % 5 == 4)):
return index[0][0]
else:
return -1
def runFun(clusterType):
#open the file assuming the data above is in a file called 'dataFile'
inFile = open('/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/n50_heatmap_test.data','r')
#save the column/row headers (conditions/genes) into an array
colHeaders = inFile.readline().strip().split()[1:]
rowHeaders = []
dataMatrix = []
#print(colHeaders)
#Extract row data
for line in inFile:
#print(line)
data = line.strip().split('\t')
#if len(data) < 5:
# data.insert(0,"nameless")
rowHeaders.append([data[0]])
#print(rowHeaders)
dataMatrix.append([float(x) for x in data[1:]])
#Extract col data
colDataMatrix= []
for i in range(0, len(colHeaders)):
colDataMatrix.append([row[i] for row in dataMatrix])
#print(colDataMatrix[0])
global rowNameArr
rowNameArr = rowHeaders
global colNameArr
colNameArr = colHeaders
#convert native data array into a numpy array
#print(dataMatrix)
dataMatrix = numpy.array(dataMatrix)
colDataMatrix = numpy.array(colDataMatrix)
#log2 transform
dataMatrix = numpy.log2(dataMatrix)
colDataMatrix = numpy.log2(colDataMatrix)
#zscore transform
dataMatrix = stats.zscore(dataMatrix,1,1)
colDataMatrix = stats.zscore(colDataMatrix,1,1)
#print(dataMatrix)
#print(colDataMatrix)
print("s1")
distanceMatrix = dist.pdist(dataMatrix)
colDistanceMatrix = dist.pdist(colDataMatrix)
#print("dataMatrix : " )
#print(distanceMatrix)
print("s2")
distanceSquareMatrix = dist.squareform(distanceMatrix)
colDistanceSquareMatrix = dist.squareform(colDistanceMatrix)
print("s3")
#clusterType = "ward"
linkageMatrix = hier.linkage(distanceSquareMatrix, clusterType)
colLinkageMatrix = hier.linkage(colDistanceSquareMatrix, clusterType)
#print(distanceSquareMatrix)
print("s4")
heatmapOrder = hier.leaves_list(linkageMatrix)
hierMatrix = [[]]
colHierMatrix = [[]]
print("s5")
newNum = len(linkageMatrix)
colNewNum = len(colLinkageMatrix)
for i in range(0, len(linkageMatrix)):
newNum += 1
hierMatrix = numpy.array(numpy.append(hierMatrix, numpy.append(linkageMatrix[i], [newNum])))
for i in range(0, len(colLinkageMatrix)):
colNewNum += 1
colHierMatrix = numpy.array(numpy.append(colHierMatrix, numpy.append(colLinkageMatrix[i], [colNewNum])))
print("s6")
#print ("heatmapOrder : ")
#print ( heatmapOrder)
#print(linkageMatrix)
#print(hierMatrix)
#print(hierMatrix[-1])
#print(colHierMatrix)
content['label'] = "root"
content['parent'] = "root"
content['id'] = int(hierMatrix[-1])
global leafData
leafData += str(content) + ", "
#leafData.append(str(int(hierMatrix[-1])) + ".")
colContent['label'] = "root"
colContent['parent'] = "root"
colContent['id'] = int(colHierMatrix[-1])
global colLeafData
colLeafData += str(colContent) + ", "
dotLeafData.append(str(int(hierMatrix[-1]))+".")
global dotcolLeafData
dotcolLeafData.append(str(int(colHierMatrix[-1]))+".")
#colLeafData.append(str(int(colHierMatrix[-1])) + ".")
makeHier(hierMatrix, len(linkageMatrix), "row", int(hierMatrix[-1]), len(linkageMatrix))
makeHier(colHierMatrix, len(colLinkageMatrix)+1, "col", int(colHierMatrix[-1]),len(colLinkageMatrix))
#print (leafData)
for i in range(len(dotLeafData)):
global dotLeafData
dotLeafData[i] = dotLeafData[i][:-1]
#print(leafData[i])
for i in range(len(dotcolLeafData)):
global dotcolLeafData
dotcolLeafData[i] = dotcolLeafData[i][:-1]
orderedDataMatrix = dataMatrix[heatmapOrder,:]
print("s7")
#print(orderedDataMatrix)
rowHeaders = numpy.array(rowHeaders)
orderedRowHeaders = rowHeaders[heatmapOrder,:]
#print(orderedRowHeaders)
print("s8")
matrixOutput = []
row = 0
for rowData in orderedDataMatrix:
col = 0
rowOutput = []
for colData in rowData:
rowOutput.append([colData, row, col])
col += 1
matrixOutput.append(rowOutput)
row += 1
print("s9")
global leafData
leafData = leafData[:-2]
leafData += "]"
#print (leafData)
global colLeafData
colLeafData = colLeafData[:-2]
global colLeafData
colLeafData += "]"
#print (colLeafData)
#maxData = 'var ' + clusterType + 'maxData = ' + str(numpy.amax(dataMatrix)) + ";\n"
#minData = 'var ' + clusterType + 'minData = ' + str(numpy.amin(dataMatrix)) + ";\n"
maxData = 'var ' + 'maxData = ' + str(numpy.amax(dataMatrix)) + ";\n"
minData = 'var ' + 'minData = ' + str(numpy.amin(dataMatrix)) + ";\n"
data = 'var ' + clusterType + 'data = ' + str(matrixOutput) + ";\n"
cols = 'var ' + clusterType + 'cols = ' + str(colHeaders) + ";\n"
#row = 'var rows = ' + str([x for x in orderedRowHeaders]) + ";\n"
#print ('var maxData = ' + str(numpy.amax(dataMatrix)) + ";")
#print ('var minData = ' + str(numpy.amin(dataMatrix)) + ";")
#print ('var data = ' + str(matrixOutput) + ";")
#print ('var cols = ' + str(colHeaders) + ";")
oneDimensionOrderedRowHeaders = []
for i in range(len(orderedRowHeaders)):
oneDimensionOrderedRowHeaders.append(orderedRowHeaders[i][0])
row = 'var ' + clusterType + 'rows = ' + str(oneDimensionOrderedRowHeaders) + ";\n"
#print ('var rows = ' + str(oneDimensionOrderedRowHeaders) + ";\n")
#print (json.dumps(leafData, sort_keys=False, indent=4))
#print (json.dumps(colLeafData, sort_keys=False, indent=4))
global leafData
leafData = leafData.replace("/", "")
global colLeafData
colLeafData = colLeafData.replace("/", "")
global leafData
leafData = leafData.replace("\'", "\"")
global colLeafData
colLeafData = colLeafData.replace("\'", "\"")
#print(type(leafData))
#print(leafData)
#print (makeNestedJson(leafData))
"""
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/rowJsonData.js","w")
file.write("var " + clusterType + "RowJson = " + str(makeNestedJson(leafData)) + ";")
file.close()
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/colJsonData.js","w")
file.write("var " + clusterType + "ColJson = " + str(makeNestedJson(colLeafData)) + ";")
file.close()
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/rowJsonData.json","w")
file.write(str(makeNestedJson(leafData)))
file.close()
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/colJsonData.json","w")
file.write(str(makeNestedJson(colLeafData)))
file.close()
"""
#Store heatmap infomation to js
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/genomixdata.js","a")
file.write(maxData)
file.write(minData)
file.write(data)
file.write(cols)
file.write(row)
file.close()
#print (leafData)
# Store hiararchy data infomation to csv file.
csv_file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/" + clusterType + "tree.csv","w")
cw = csv.writer(csv_file, delimiter=',', quotechar='|')
cw.writerow(("id","value"))
for i in range (len(dotLeafData)):
if int(dotLeafData[i][-1]) <= len(linkageMatrix):
num = ""
for j in range( len(dotLeafData[i])):
k = 1 + j
#print(leafData[i][-k])
if dotLeafData[i][-k] == ".":
break
else :
num += dotLeafData[i][-k]
#print(num)
cw.writerow((" "+str(dotLeafData[i]),""))
else :
#print()
cw.writerow((" "+str(dotLeafData[i]),""))
csv_file.close()
csv_file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/" + clusterType + "coltree.csv","w")
cw = csv.writer(csv_file, delimiter=',', quotechar='|')
cw.writerow(("id","value"))
for i in range (len(dotcolLeafData)):
if int(dotcolLeafData[i][-1]) <= len(colLinkageMatrix):
num = ""
for j in range( len(dotcolLeafData[i])):
k = 1 + j
#print(leafData[i][-k])
if dotcolLeafData[i][-k] == ".":
break
else :
global dotcolLeafData
num += dotcolLeafData[i][-k]
#print(num)
cw.writerow((" "+str(dotcolLeafData[i]),""))
else :
#print()
cw.writerow((" "+str(dotcolLeafData[i]),""))
csv_file.close()
def init():
cluster=["single","complete","average","weighted","centroid","median","ward"]
for i in range(0, 1):#len(cluster)):
#typeRC = ""
global leafData
leafData ="["
global colLeafData
colLeafData = "["
global dotLeafData
dotLeafData = []
global dotcolLeafData
dotcolLeafData = []
global content
content ={}
global colContent
colContent = {}
global rowNameArr
rowNameArr = []
global colNameArr
colNameArr = []
global rowLeafNum
rowLeafNum = 0
global colLeafNum
colLeafNum = 0
runFun(cluster[i])
init()
|
the-stack_106_29127 | #!/usr/bin/env python3
"""
Tasks
Get example output for tests - Ted
Parser for ODGI Bin file format - Ted
Component Segmentation Detection - Josiah and Joerg
Python memory object model - Josiah
Output format
"""
from typing import List, Tuple, Set, Dict
from pathlib import Path as osPath
from nested_dict import nested_dict
from datetime import datetime
from matrixcomponent.matrix import Path, Component, LinkColumn, Bin
from matrixcomponent.PangenomeSchematic import PangenomeSchematic
import os
import logging
import argparse
import matrixcomponent
import matrixcomponent.JSONparser as JSONparser
MAX_COMPONENT_SIZE = 100 # automatic calculation from cells_per_file did not go well
LOGGER = logging.getLogger(__name__)
"""logging.Logger: The logger for this module"""
def populate_component_occupancy(schematic: PangenomeSchematic):
for component in schematic.components:
# are matrix paths in the same order as schematic.path_names?
# side effect instead of return
component.occupants = [any([bin.coverage > 0.1 for bin in bins if bin])
for bins in component.matrix]
print("Populated Occupancy per component per path.")
def populate_component_matrix(paths: List[Path], schematic: PangenomeSchematic):
for component in schematic.components:
# paths paths are in the same order as schematic.path_names
for i, path in enumerate(paths):
relevant = [bin for bin in path.bins if
component.first_bin <= bin.bin_id <= component.last_bin] # very costly loop
padded = []
if relevant:
padded = [[]] * (component.last_bin - component.first_bin + 1)
for bin in relevant:
padded[bin.bin_id - component.first_bin] = \
Bin(bin.coverage, bin.inversion_rate, bin.first_nucleotide, bin.last_nucleotide)
component.matrix.append(padded) # ensure there's always 1 entry for each path
print("Populated Matrix per component per path.")
populate_component_occupancy(schematic)
def segment_matrix(matrix: List[Path], bin_width, cells_per_file, pangenome_length) -> PangenomeSchematic:
from matrixcomponent import JSON_VERSION
print(f"Starting Segmentation process on {len(matrix)} Paths.")
schematic = PangenomeSchematic(JSON_VERSION,
bin_width,
1,
1,
[], [p.name for p in matrix], 1, pangenome_length)
incoming, outgoing, dividers = dividers_with_max_size(matrix, cells_per_file)
start_pos = 0
for valid_start in dividers:
if valid_start != 0:
current = Component(start_pos, valid_start - 1)
# current.active_members = 1
schematic.components.append(current)
start_pos = valid_start
print(f"Created {len(schematic.components)} components")
# populate Component occupancy per Path
populate_component_matrix(matrix, schematic)
# populate all link columns onto schematic
nLinkColumns = 0
for component in schematic.components:
# TODO: order columns based on traversal patterns,
# TODO: insert additional columns for higher copy number
for origin_pos, participants in incoming[component.first_bin].items():
phase_dots = [indiv in participants for indiv in schematic.path_names]
entering = LinkColumn(origin_pos,
component.first_bin,
participants=phase_dots)
component.arrivals.append(entering)
nLinkColumns += 1
for arriving_pos, participants in outgoing[component.last_bin].items():
# phase_dots depends on row ordering of path names, optimized for display
phase_dots = [indiv in participants for indiv in schematic.path_names]
leaving = LinkColumn(component.last_bin,
arriving_pos,
participants=phase_dots)
component.departures.append(leaving)
nLinkColumns += 1
for i in range(len(schematic.components)-1):
component, next_component = schematic.components[i],schematic.components[i+1]
add_adjacent_connector_column(component, next_component, schematic)
print(f"Created {nLinkColumns} LinkColumns")
return schematic
def dividers_with_max_size(matrix: List[Path], cells_per_file: int):
"""Adds in additional dividers to ensure very large components are split into
multiple components with no Links."""
incoming, outgoing, dividers = find_dividers(matrix)
# estimate number of paths, x10 because most paths are empty
dividers_extended = []
prev = 0
for div in sorted(list(dividers)):
gap_size = div - prev
if gap_size > MAX_COMPONENT_SIZE:
for i in range(prev + MAX_COMPONENT_SIZE, div, MAX_COMPONENT_SIZE):
dividers_extended.append(i) # add a series of dividers spaced ^ apart
prev = div
dividers_extended.append(div)
return incoming, outgoing, dividers_extended
def add_adjacent_connector_column(component, next_component, schematic):
"""The last Departure LinkColumn is to the adjacent component
Use logic to decide on which rows need adjacent connectors
Start with the easy subtractive case of occupancy - departures and move to more complex,
multiple copy cases."""
adjacents = []
for row in range(len(schematic.path_names)):
connection_exists = False
if component.occupants[row] and next_component.occupants[row]: # occupant present
# n_arrivals = sum([column.participants[row] for column in component.arrivals])
departed = sum([column.participants[row] for column in component.departures])
# connection_exists = n_arrivals + 1 > departed
connection_exists = not departed # didn't depart
adjacents.append(connection_exists)
component.departures.append(LinkColumn( # LinkColumn for adjacents
component.last_bin,
component.last_bin + 1,
participants=adjacents))
def find_dividers(matrix: List[Path]) -> Tuple[Dict[int, Dict[int, set]],
Dict[int, Dict[int, set]], Set[int]]:
max_bin = 1
leaving = nested_dict(2, set) # containing the set of participating Paths on that link column
entering = nested_dict(2, set) # list of indices of new components
dividers = {1} # all start positions of components, start with st
copy_arrivals = set() # track self loops just in case component gets cut in half
uniq_links = set()
for i, path in enumerate(matrix):
print(f"Segmenting path {path.name} with number of bins {len(path.bins)} and total progress:", '{0:.1%}'.format(i / len(matrix)))
max_bin = max(max_bin, max(path._bin_set))
for link in path.links: # Links are generated by odgi based
upstream, downstream = link.upstream, link.downstream
# Links to 0 Bin indicate the beginning or end of a path. 0 Bin has no sequence
if 0 in [upstream, downstream]:
continue # ignore links to the telomere.
if upstream == downstream:
copy_arrivals.add(upstream)
continue # we don't want these to become dividers
# Is the gap range anywhere else in this individual?
# What if downstream < upstream?
divider_verified = downstream < upstream
if not divider_verified:
missing_range = list(range(upstream + 1, downstream))
for i in missing_range:
if i in path:
divider_verified = True
uniq_links.add((upstream, downstream))
break # stop as soon as we have confirmation
if divider_verified:
# if (upstream + 1) in leaving.keys() :
# print(f"Found inherited rearrangement {upstream+1}")
leaving[upstream][downstream].add(path.name) # the first position of the new component
entering[downstream][upstream].add(path.name)
dividers.add(upstream + 1)
dividers.add(downstream)
# TODO: insert prevarications about exact position
# Divider should be somewhere in here
# Tolerable range?
# Stack up others using the same LinkColumn
print(f"Largest bin_id was {max_bin}\n"
f"Found {len(dividers)} dividers.")
dividers.add(max_bin + 1) # end of pangenome
print(f"Eliminated {len(copy_arrivals)} self-loops")
n_links = sum([len(p.links) for p in matrix])
print(f"Input has {n_links} listed Links. "
f"Segmentation eliminated {(1-len(uniq_links)/n_links)*100}% of them.")
print(f"Found {len(uniq_links)} unique links")
return entering, leaving, dividers
def discard_useless_links(matrix: List[Path]):
"""https://github.com/vgteam/odgi/issues/48
Links that simply span a gap in the matrix can be discarded"""
for path in matrix:
keep = []
for link in path.links: # Links are generated by odgi based
missing_range = list(range(link.upstream + 1, link.downstream))
# Is the gap range anywhere else in this individual?
if any([i in path for i in missing_range if i > 0]):
keep.append(link)
path.links = keep # all other Paths get deleted
def setup_logging():
"""Setup the logging, add a log file"""
log_name = osPath(args.json_file).with_suffix('.log')
if args.output_folder:
log_name = osPath(args.output_folder).joinpath('log')
os.makedirs(args.output_folder, exist_ok=True)
t = datetime.now()
timestr = f"{t.year}{t.month:02}{t.day:02}-{t.hour:02}:{t.minute:02}:{t.second:02}"
log_name = str(log_name) + '.' + timestr
handler = logging.FileHandler(log_name)
handler.setLevel(args.log_level)
handler.setFormatter(logging.Formatter(matrixcomponent.LOGGING_FORMAT_STR,
datefmt=matrixcomponent.LOGGING_DATE_FORMAT))
logging.getLogger().addHandler(handler)
# Helper class to allow multi-line help messages for argparse user parameters:
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def write_json_files(json_file, schematic: PangenomeSchematic):
partitions, bin2file_mapping = schematic.split(args.cells_per_file)
folder = osPath(json_file).parent
if args.output_folder:
folder = osPath(args.output_folder)
os.makedirs(folder, exist_ok=True) # make directory for all files
for part in partitions:
p = folder.joinpath(part.filename)
with p.open('w') as fpgh9:
fpgh9.write(part.json_dump())
print("Saved results to", p)
schematic.write_index_file(folder, bin2file_mapping)
def get_arguments():
"""Create the command line interface and return the command line arguments
Returns
-------
Namespace
The command line arguments
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="")
parser.add_argument('-j', '--json-file',
dest='json_file',
required=True,
help='input JSON file')
parser.add_argument('-o', '--out-folder',
dest='output_folder',
help='output folder')
parser.add_argument('-c', '--cells-per-file',
dest='cells_per_file',
default=5000,
type=int,
help='Tip: Adjust this number to get chunk files output close to 2MB. '
'Number of cells per file (#bins per file = #cells / #paths)')
parser.add_argument('-l', '--log-level',
default='DEBUG',
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),
help='level of logging verbosity. DEBUG is most verbose')
args = parser.parse_args()
return args
def main():
global args
args = get_arguments()
setup_logging()
LOGGER.info(f'reading {osPath(args.json_file)}...\n')
paths, pangenome_length, bin_width = JSONparser.parse(args.json_file)
schematic = segment_matrix(paths, bin_width, args.cells_per_file, pangenome_length)
del paths
write_json_files(args.output_folder, schematic)
if __name__ == '__main__':
main()
# -j matrixcomponent/data/run1.B1phi1.i1.seqwish.w100.json -b 100 -o matrixcomponent/data/run1.B1phi1.i1.seqwish.w100/
# -j matrixcomponent/data/chrk_ath_12samples_10kb.w100000.json -b 100000 -o matrixcomponent/data/chrk_ath_12samples_10kb.w100000 --cells-per-file=10000
|
the-stack_106_29128 | import itertools
from functools import partial
import numpy as np
import tensorflow as tf
import deep500 as d5
from .tf_network import TensorflowNetwork
class TensorflowVisitor(d5.OnnxBaseVisitor):
def __init__(self):
self.counter = 0
self.net_input = {}
self.is_training = None
def visit_graph(self, graph: d5.ops.OnnxGraph, network: TensorflowNetwork):
self.net_input.clear()
tf.reset_default_graph()
self.is_training = tf.placeholder(tf.bool)
def visit_net_input(self, input: d5.ops.OnnxValueInfo, network: TensorflowNetwork):
if isinstance(input.type, d5.ops.OnnxTensorType):
tensor_type = input.type
self.net_input[input.name] = (tensor_type.type.to_numpy(), tensor_type.shape.shape)
else:
raise NotImplementedError('Only tensor input supported currently')
def visit_net_output(self, output: d5.ops.OnnxValueInfo, network: TensorflowNetwork):
network.output_names.append(output.name)
def visit_initializer(self, initializer: d5.ops.OnnxTensor, network: TensorflowNetwork):
network.feed_tensor(initializer.name, initializer.data, is_param=True)
if initializer.name in self.net_input:
del self.net_input[initializer.name]
def visit_constant(self, op: d5.ops.Constant, network: TensorflowNetwork):
tensor = tf.convert_to_tensor(op.value.get_value())
network.feed_internal_tensor(op.o_output, tensor)
def visit_initializer_end(self, network: TensorflowNetwork):
for name, (numpy_type, shape) in self.net_input.items():
placeholder = tf.placeholder(dtype=numpy_type, shape=shape, name=name)
network.feed_internal_tensor(name, placeholder)
def visit_add(self, op: d5.ops.Add, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.add(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_dropout(self, op: d5.ops.Dropout, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
ratio = op.ratio.get_value() if op.ratio else 0.5
Y = tf.layers.dropout(X, rate=ratio, training=self.is_training)
network.feed_internal_tensor(op.o_output, Y)
def visit_sub(self, op: d5.ops.Sub, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.subtract(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_mul(self, op: d5.ops.Mul, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.multiply(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_xor(self, op: d5.ops.Xor, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.logical_xor(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_div(self, op: d5.ops.Div, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.div(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_equal(self, op: d5.ops.Equal, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.equal(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_greater(self, op: d5.ops.Greater, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.greater(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_less(self, op: d5.ops.Less, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.less(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_or(self, op: d5.ops.Or, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
C = tf.logical_or(A, B)
network.feed_internal_tensor(op.o_C, C)
def visit_not(self, op: d5.ops.Not, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
Y = tf.logical_not(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_argmax(self, op: d5.ops.ArgMax, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
Y = tf.argmax(X)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_argmin(self, op: d5.ops.ArgMin, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
Y = tf.argmin(X)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_floor(self, op: d5.ops.Floor, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
Y = tf.floor(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_cast(self, op: d5.ops.Cast, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_input)
Y = tf.cast(X, op.to.get_value())
network.feed_internal_tensor(op.o_output, Y)
def visit_affine(self, op: d5.ops.Affine, network: TensorflowNetwork):
# y = alpha * x + beta,
X = network.fetch_internal_tensor(op.i_X)
Y = op.alpha.get_value() * X + op.beta.get_value()
network.feed_internal_tensor(op.o_Y, Y)
def visit_ceil(self, op: d5.ops.Ceil, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
Y = tf.ceil(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_reducel1(self, op: d5.ops.ReduceL1, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
axis = [int(v) for v in axis] if isinstance(axis, list) else axis
if len(axis) == 1:
axis = axis[0]
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.norm(X, ord=1, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducel2(self, op: d5.ops.ReduceL2, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
axis = [int(v) for v in axis] if isinstance(axis, list) else axis
if len(axis) == 1:
axis = axis[0]
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.norm(X, ord=2, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducesumsquare(self, op: d5.ops.ReduceSumSquare, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_sum(tf.square(X), axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducelogsum(self, op: d5.ops.ReduceLogSum, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.log(tf.reduce_sum(X, axis=axis, keepdims=keepdims))
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducesum(self, op: d5.ops.ReduceSum, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_sum(X, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducemean(self, op: d5.ops.ReduceMean, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_mean(X, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducemax(self, op: d5.ops.ReduceMax, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_max(X, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducemin(self, op: d5.ops.ReduceMin, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_min(X, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reduceprod(self, op: d5.ops.ReduceProd, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_prod(X, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_reducelogsumexp(self, op: d5.ops.ReduceLogSumExp, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_data)
axis = op.axes.get_value() if op.axes is not None else list(range(len(X.get_shape().as_list())))
keepdims = True if op.keepdims is None else op.keepdims.get_value() == 1
Y = tf.reduce_logsumexp(X, axis=axis, keepdims=keepdims)
network.feed_internal_tensor(op.o_reduced, Y)
def visit_relu(self, op: d5.ops.Relu, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
Y = tf.nn.relu(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_max(self, op: d5.ops.Max, network: TensorflowNetwork):
X = network.fetch_internal_tensors(op.input)
Y = tf.reduce_max(tf.stack(X), axis=0)
network.feed_internal_tensor(op.o_max, Y)
def visit_min(self, op: d5.ops.Min, network: TensorflowNetwork):
X = network.fetch_internal_tensors(op.input)
Y = tf.reduce_min(tf.stack(X), axis=0)
network.feed_internal_tensor(op.o_min, Y)
def visit_mean(self, op: d5.ops.Mean, network: TensorflowNetwork):
X = network.fetch_internal_tensors(op.input)
Y = tf.reduce_mean(tf.stack(X), axis=0)
network.feed_internal_tensor(op.o_mean, Y)
def visit_prelu(self, op: d5.ops.PRelu, network: TensorflowNetwork):
X, slope = network.fetch_internal_tensors([op.i_X, op.i_slope])
slope = self.expand_to_broadcast(slope, 1, len(X.get_shape()))
pos = tf.nn.relu(X)
neg = slope * (X - abs(X)) * 0.5
Y = pos + neg
network.feed_internal_tensor(op.o_Y, Y)
def visit_leakyrelu(self, op: d5.ops.LeakyRelu, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
alpha = op.alpha.get_value() if op.alpha else 0.01
Y = tf.nn.relu(X) - alpha * tf.nn.relu(-X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_slice(self, op: d5.ops.Slice, network: TensorflowNetwork):
# Adapted from:
# https://github.com/onnx/onnx-tensorflow/blob/master/onnx_tf/backends/backend_v1.py#L700
X = network.fetch_internal_tensor(op.i_data)
X_shape = X.get_shape().as_list()
X_begin = [0] * len(X_shape)
starts = op.starts.get_value()
ends = op.ends.get_value()
slice_len = len(starts)
axes = op.axes.get_value() if op.axes else list(range(slice_len))
for i in range(slice_len):
ends[i] = X_shape[axes[i]] + ends[i] if ends[i] < 0 else ends[i]
if X_shape[axes[i]] is not None:
ends[i] = np.min([X_shape[axes[i]], ends[i]])
starts[i] = np.min([X_shape[axes[i]], starts[i]])
X_begin[axes[i]] = starts[i]
X_shape[axes[i]] = ends[i] - starts[i]
Y = tf.slice(X, tf.constant(X_begin), tf.constant(X_shape))
network.feed_internal_tensor(op.o_output, Y)
def visit_clip(self, op: d5.ops.Clip, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_input)
max = op.max.get_value() if op.max else tf.reduce_max(X)
min = op.min.get_value() if op.min else tf.reduce_min(X)
Y = tf.clip_by_value(X, min, max)
network.feed_internal_tensor(op.o_output, Y)
def visit_sum(self, op: d5.ops.Sum, network: TensorflowNetwork):
X = network.fetch_internal_tensors(op.input)
Y = tf.reduce_sum(X, axis=0)
network.feed_internal_tensor(op.o_sum, Y)
def visit_abs(self, op: d5.ops.Abs, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
Y = tf.abs(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_neg(self, op: d5.ops.Neg, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
Y = tf.negative(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_pow(self, op: d5.ops.Pow, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_X, op.i_Y])
C = tf.pow(A, B)
network.feed_internal_tensor(op.o_Z, C)
def visit_reshape(self, op: d5.ops.Reshape, network: TensorflowNetwork):
X, Shape = network.fetch_internal_tensors([op.i_data, op.i_shape])
Y = tf.reshape(X, Shape)
network.feed_internal_tensor(op.o_reshaped, Y)
def expand_to_broadcast(self, X, broadcast_dim=1, total_num_dim=4):
if broadcast_dim < 0:
broadcast_dim += total_num_dim
dims = [broadcast_dim + i for i in range(len(X.shape))]
for i in range(total_num_dim):
if i not in dims:
X = tf.expand_dims(X, i)
return X
def visit_and(self, op: d5.ops.And, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
Y = tf.logical_and(A, B)
network.feed_internal_tensor(op.o_C, Y)
def visit_softmax(self, op: d5.ops.Softmax, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_input)
Y = tf.nn.softmax(X)
network.feed_internal_tensor(op.o_output, Y)
def visit_cross_entropy(self, op: d5.ops.CrossEntropy, network: TensorflowNetwork):
labels = tf.placeholder(tf.int32, name=op.i_target)
network.feed_internal_tensor(op.i_target, labels)
X = network.fetch_internal_tensor(op.i_X)
L = -tf.reduce_sum(labels * tf.log(X), 1)
L = tf.reduce_mean(L, axis=0)
network.loss_gradient = L
network.feed_internal_tensor(op.o_output, L)
network.add_output(op.o_output)
def visit_softmax_cross_entropy(self, op: d5.ops.SoftmaxCrossEntropy, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
labels = tf.placeholder(tf.int32, name=op.i_target)
network.feed_internal_tensor(op.i_target, labels)
labels = tf.one_hot(labels, X.get_shape().as_list()[-1])
result = tf.nn.softmax_cross_entropy_with_logits_v2(
labels,
X,
axis=None # Defaults to -1
)
L = tf.reduce_mean(result, axis=0)
network.loss_gradient = L
network.feed_internal_tensor(op.o_output, L)
network.add_output(op.o_output)
def visit_mean_squared_error(self, op: d5.ops.MeanSquaredError, network: TensorflowNetwork):
y_pred = network.fetch_internal_tensor(op.i_X)
y_true = tf.placeholder(tf.float32, name=op.i_target, shape=y_pred.shape)
network.feed_internal_tensor(op.i_target, y_true)
L = tf.nn.l2_loss(y_true - y_pred)
network.loss_gradient = L
network.feed_internal_tensor(op.o_output, L)
network.add_output(op.o_output)
def visit_matmul(self, op: d5.ops.MatMul, network: TensorflowNetwork):
A, B = network.fetch_internal_tensors([op.i_A, op.i_B])
Y = tf.matmul(A, B)
network.feed_internal_tensor(op.o_Y, Y)
def visit_pad(self, op, network):
data = network.fetch_internal_tensor(op.i_data)
if all([p == 0 for p in op.pads.value]): # No padding?
network.feed_internal_tensor(op.o_output, data)
return
y = tf.pad(
data,
op.pads.value,
mode=op.mode.value,
constant_values=op.value.value
)
network.feed_internal_tensor(op.o_output, y)
def visit_shape(self, op, network):
data = network.fetch_internal_tensor(op.i_data)
network.feed_internal_tensor(op.o_shape, tf.shape(data, out_type=tf.int64))
def visit_squeeze(self, op, network):
data = network.fetch_internal_tensor(op.i_data)
network.feed_internal_tensor(op.o_squeezed,
tf.squeeze(data, axis=op.axes.value))
def visit_unsqueeze(self, op, network):
data = network.fetch_internal_tensor(op.i_data)
result = data
for axis in op.axes.value:
result = tf.expand_dims(result, axis=axis)
network.feed_internal_tensor(op.o_expanded, result)
def visit_concat(self, op, network):
in_tensors = [network.fetch_internal_tensor(i) for i in op.input]
out = tf.concat(in_tensors, op.axis.value)
network.feed_internal_tensor(op.output[0], out)
def visit_lrn(self, op, network):
X = network.fetch_internal_tensor(op.i_X)
nsize = (op.size.value - 1) // 2
result = tf.nn.local_response_normalization(
X,
depth_radius=nsize,
bias=op.bias.value,
alpha=op.alpha.value / op.size.value,
beta=op.beta.value)
network.feed_internal_tensor(op.o_Y, result)
def visit_split(self, op, network):
input = network.fetch_internal_tensor(op.i_input)
results = tf.split(
input,
op.split.value,
axis=op.axis.value)
for res,out in zip(results, op.output):
network.feed_internal_tensor(out, res)
def visit_gather(self, op, network):
input = network.fetch_internal_tensor(op.i_data)
indices = network.fetch_internal_tensor(op.i_indices)
results = tf.gather(
input,
indices,
axis=op.axis.value)
network.feed_internal_tensor(op.o_output, results)
def visit_gemm(self, op: d5.ops.Gemm, network: TensorflowNetwork):
(A, B, C) = network.fetch_internal_tensors([op.i_A, op.i_B, op.i_C])
alpha = 1.0 if op.alpha is None else op.alpha.get_value()
beta = 1.0 if op.beta is None else op.beta.get_value()
trans_a = 0 if op.transA is None else op.transA.get_value()
trans_b = 0 if op.transB is None else op.transB.get_value()
if trans_a:
A = tf.transpose(A)
if trans_b:
B = tf.transpose(B)
Y = alpha * tf.matmul(A, B) + beta * C
network.feed_internal_tensor(op.o_Y, Y)
def visit_flatten(self, op: d5.ops.Flatten, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_input)
shape = tf.shape(X)
x_rank = len(X.shape)
axis = 1 if op.axis is None else op.axis.get_value()
if axis == 1 and x_rank > 1:
Y = tf.layers.flatten(X)
else:
if axis == 0:
cal_shape = (1, -1)
else:
cal_shape = (tf.reduce_prod(shape[0:axis]),
tf.reduce_prod(shape[axis:tf.size(shape)]))
Y = tf.reshape(X, cal_shape)
network.feed_internal_tensor(op.o_output, Y)
def visit_globalmaxpool(self, op: d5.ops.GlobalMaxPool, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
dims = tf.range(tf.rank(X))
_, dim_window = tf.split(dims, [2, tf.size(2) - 2])
Y = tf.reduce_max(X, axis=dim_window, keep_dims=True)
network.feed_internal_tensor(op.o_Y, Y)
def visit_maxpool(self, op: d5.ops.MaxPool, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
kernel_shape = op.kernel_shape.get_value()
strides = None if op.strides is None else op.strides.get_value()
auto_pad = None if op.auto_pad is None else op.auto_pad.get_value()
pads = None if op.pads is None else op.pads.get_value()
Y = self.pool(X, kernel_shape, partial(tf.nn.pool, pooling_type='MAX'), 'MAX',
strides=strides,
pads=pads,
count_include_pad=None,
auto_pad=auto_pad)
network.feed_internal_tensor(op.o_Y, Y)
def visit_averagepool(self, op: d5.ops.AveragePool, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
kernel_shape = op.kernel_shape.get_value()
strides = None if op.strides is None else op.strides.get_value()
auto_pad = None if op.auto_pad is None else op.auto_pad.get_value()
pads = None if op.pads is None else op.pads.get_value()
Y = self.pool(X, kernel_shape, partial(tf.nn.pool, pooling_type='AVG'), 'AVG',
strides=strides,
pads=pads,
count_include_pad=op.count_include_pad,
auto_pad=auto_pad)
network.feed_internal_tensor(op.o_Y, Y)
def visit_globalaveragepool(self, op: d5.ops.GlobalAveragePool, network: TensorflowNetwork):
X = network.fetch_internal_tensor(op.i_X)
modtyp = getattr(tf.keras.layers,
'GlobalAveragePooling%dD' % (len(X.shape) - 2), None)
if modtyp is None:
raise RuntimeError('Unsupported dimensions for global average pool'
'(%d)' % (len(X.shape) - 2))
# ONNX forces channels_first format
tfop = modtyp(data_format='channels_first')
# Spatial mean w.r.t. channel dimensions
Y = tfop.apply(X)
network.feed_internal_tensor(op.o_Y, Y)
def visit_conv(self, op: d5.ops.Conv, network: TensorflowNetwork):
X, W = network.fetch_internal_tensors([op.i_X, op.i_W])
bias = op.i_B is not None
B = None
if bias:
B = network.fetch_internal_tensor(op.i_B)
kernel_shape = op.kernel_shape.get_value() if op.kernel_shape else None
pads = op.pads.get_value() if op.pads else None
strides = op.strides.get_value() if op.strides else None
dilations = op.dilations.get_value() if op.dilations else None
group = op.group.get_value() if op.group else None
Y = self._conv(X, W, kernel_shape is not None, B=B, kernel_shape=kernel_shape, dilations=dilations,
strides=strides, pads=pads, group=group)
network.feed_internal_tensor(op.o_Y, Y)
def visit_convtranspose(self, op: d5.ops.ConvTranspose, network: TensorflowNetwork):
X, W = network.fetch_internal_tensors([op.i_X, op.i_W])
bias = op.i_B is not None
B = None
if bias:
B = network.fetch_internal_tensor(op.i_B)
kernel_shape = op.kernel_shape.get_value() if op.kernel_shape else None
pads = op.pads.get_value() if op.pads else None
strides = op.strides.get_value() if op.strides else None
dilations = op.dilations.get_value() if op.dilations else None
group = op.group.get_value() if op.group else None
output_padding = op.output_padding.get_value() if op.output_padding else None
output_shape = op.output_shape.get_value() if op.output_shape else None
Y = self._conv(X, W, kernel_shape is not None, B=B, kernel_shape=kernel_shape, dilations=dilations,
strides=strides, pads=pads, group=group, output_padding=output_padding,
output_shape=output_shape, transpose=True)
network.feed_internal_tensor(op.o_Y, Y)
# Taken from https://github.com/onnx/onnx-tensorflow/
def _conv_(self, x, in_weights, kernel_shape, pads, strides, dilations, group, transpose, is_bias=False, b=None,
output_shape=None, output_padding=None):
""" Convolution method for both conv and transposed conv
For transposed conv,
Attr pads is not used for input, but declares how much output is padded.
Here, output means output from transposed conv which already pad output_padding if set.
So the pseudo explanation for output should be:
output = conv_transpoe_output + output_padding - pads
And conv_transpoe_output shape should be:
conv_transpoe_output_shape[i] = strides[i] * (input_shape[i] - 1) + kernel_shape[i]
"""
x_rank = len(x.get_shape())
x_shape = x.get_shape().as_list()
spatial_size = x_rank - 2
support_cuda = self.supports_device("CUDA")
storage_format, compute_format = self.get_data_format(x_rank, support_cuda)
compute_c_idx = compute_format.find("C")
spatial_format = "".join([d for d in compute_format if d not in ["N", "C"]])
weights_rank = len(in_weights.get_shape())
if transpose:
# Translate weights from (C x M x KH x KW) to (KH x KW X M X C)
perm = list(range(2, weights_rank)) + [1, 0]
else:
# Translate weights from (M x C x KH x KW) to (KH x KW X C X M)
perm = list(range(2, weights_rank)) + [1, 0]
assert in_weights.get_shape().as_list()[2:] == kernel_shape, (
"kernel_shape "
"attr of convolution does not match the actual weight "
"passed to this operation, attr {}, actual {}").format(
kernel_shape,
in_weights.get_shape().as_list())
weights = tf.transpose(in_weights, perm)
pads = pads if pads else [0, 0] * spatial_size
if not transpose:
x = self.get_padding_as_op(x, pads)
group = group if group else 1
weight_groups = tf.split(weights, num_or_size_splits=group, axis=-1)
if support_cuda:
xs = tf.split(x, num_or_size_splits=group, axis=1)
else:
x = tf.transpose(
x, perm=self.get_perm_from_formats(storage_format, compute_format))
xs = tf.split(x, num_or_size_splits=group, axis=-1)
if transpose:
if dilations != [1] * spatial_size:
raise RuntimeError("Cannot set non-1 dilation for conv transpose.")
convolved = []
for (x, weight) in zip(xs, weight_groups):
x_spatial_shape = [
x_shape[storage_format.find(d)] for d in spatial_format
]
weights_shape = weights.get_shape().as_list()
# calculate output shape
if output_shape is None:
conv_output_shape = [x_shape[storage_format.find("N")]] + [
strides[i] * (x_spatial_shape[i] - 1) + weights_shape[i]
for i in list(range(spatial_size))
]
conv_output_shape.insert(compute_c_idx, weights_shape[-2])
else:
conv_output_shape = [output_shape[0]] + [
s + pads[i] + pads[spatial_size + i]
for i, s in enumerate(output_shape[2:])
]
conv_output_shape.insert(compute_c_idx, output_shape[1])
# make strides to match input rank
strides_full = [1] + strides
strides_full.insert(compute_c_idx, 1)
# get corresponding function in tf_backed
if spatial_size == 1:
conv_func = tf.contrib.nn.conv1d_transpose
elif spatial_size == 2:
conv_func = tf.nn.conv2d_transpose
elif spatial_size == 3:
conv_func = tf.nn.conv3d_transpose
else:
raise NotImplementedError(
"Transposed convolution for {}d is not implemented in Tensorflow".
format(spatial_size))
# use raw input x to do transposed conv
conv_rs = conv_func(
x,
weights,
conv_output_shape,
strides_full,
padding="VALID",
data_format=compute_format)
# pad output first by output_padding attr
if output_padding is not None and output_shape is None:
output_padding = [[0, 0]
] + [[0, p] for p in output_padding]
output_padding.insert(compute_c_idx, [0, 0])
conv_rs = tf.pad(conv_rs, output_padding)
# remove pads set in pads attr
conv_rs_shape = conv_rs.get_shape().as_list()
begin = [0] + pads[:spatial_size]
begin.insert(compute_c_idx, 0)
size = [
s if d in ["N", "C"] else s - pads[spatial_format.find(d)] -
pads[spatial_format.find(d) + spatial_size]
for d, s in zip(compute_format, conv_rs_shape)
]
conv_rs = tf.slice(conv_rs, begin=begin, size=size)
convolved.append(conv_rs)
else:
convolved = [
tf.nn.convolution(
x,
weight,
"VALID",
strides=strides,
dilation_rate=dilations,
data_format=compute_format)
for (x, weight) in zip(xs, weight_groups)
]
if not is_bias:
if support_cuda:
output = tf.concat(convolved, axis=1)
else:
output = tf.concat(convolved, axis=-1)
output = tf.transpose(
output,
perm=self.get_perm_from_formats(compute_format, storage_format))
else:
bias = b
bias = self._explicit_broadcast(
bias, broadcast_dim=compute_c_idx, total_num_dim=x_rank)
if support_cuda:
output = tf.concat(convolved, axis=1)
output = tf.add(output, bias)
else:
output = tf.concat(convolved, axis=-1)
output = tf.add(output, bias)
output = tf.transpose(
output,
perm=self.get_perm_from_formats(compute_format, storage_format))
return output
def _conv(self, X, W, has_kernel_shape,
B=None,
kernel_shape=None,
dilations=None,
strides=None,
pads=None,
group=None,
output_shape=None,
output_padding=None,
transpose=False):
""" Convolution method for both conv and transposed conv
For transposed conv,
Attr pads is not used for input, but declares how much output is padded.
Here, output means output from transposed conv which already pad output_padding if set.
So the pseudo explanation for output should be:
output = conv_transpose_output + output_padding - pads
And conv_transpose_output shape should be:
conv_transpose_output_shape[i] = strides[i] * (input_shape[i] - 1) + kernel_shape[i]
"""
x = X
x_rank = len(x.get_shape())
x_shape = x.get_shape().as_list()
spatial_size = x_rank - 2
support_cuda = self.supports_device("CUDA")
storage_format, compute_format = self.get_data_format(x_rank, support_cuda)
compute_c_idx = compute_format.find("C")
spatial_format = "".join([d for d in compute_format if d not in ["N", "C"]])
in_weights = W
weights_rank = len(in_weights.get_shape())
if transpose:
# Translate weights from (C x M x KH x KW) to (KH x KW X M X C)
perm = list(range(2, weights_rank)) + [1, 0]
else:
# Translate weights from (M x C x KH x KW) to (KH x KW X C X M)
perm = list(range(2, weights_rank)) + [1, 0]
if has_kernel_shape:
assert in_weights.get_shape().as_list()[2:] == kernel_shape, (
"kernel_shape "
"attr of convolution does not match the actual weight "
"passed to this operation, attr {}, actual {}").format(
kernel_shape,
in_weights.get_shape().as_list())
weights = tf.transpose(in_weights, perm)
dilations = [1] * spatial_size if dilations is None else dilations
strides = [1] * spatial_size if strides is None else strides
pads = [0, 0] * spatial_size if pads is None else pads
if not transpose:
x = self.get_padding_as_op(x, pads)
group = 1 if group is None else group
weight_groups = tf.split(weights, num_or_size_splits=group, axis=-1)
if support_cuda:
xs = tf.split(x, num_or_size_splits=group, axis=1)
else:
x = tf.transpose(
x, perm=self.get_perm_from_formats(storage_format, compute_format))
xs = tf.split(x, num_or_size_splits=group, axis=-1)
if transpose:
if dilations != [1] * spatial_size:
raise RuntimeError("Cannot set non-1 dilation for conv transpose.")
convolved = []
for (x, weight) in zip(xs, weight_groups):
x_spatial_shape = [
x_shape[storage_format.find(d)] for d in spatial_format
]
weights_shape = weights.get_shape().as_list()
# calculate output shape
if output_shape is None:
conv_output_shape = [x_shape[storage_format.find("N")]] + [
strides[i] * (x_spatial_shape[i] - 1) + weights_shape[i]
for i in list(range(spatial_size))
]
conv_output_shape.insert(compute_c_idx, weights_shape[-2])
else:
conv_output_shape = [output_shape[0]] + [
s + pads[i] + pads[spatial_size + i]
for i, s in enumerate(output_shape[2:])
]
conv_output_shape.insert(compute_c_idx, output_shape[1])
# make strides to match input rank
strides_full = [1] + strides
strides_full.insert(compute_c_idx, 1)
# get corresponding function in tf_backed
if spatial_size == 1:
conv_func = tf.contrib.nn.conv1d_transpose
strides_full = strides[0]
elif spatial_size == 2:
conv_func = tf.nn.conv2d_transpose
elif spatial_size == 3:
conv_func = tf.nn.conv3d_transpose
else:
raise NotImplementedError(
"Transposed convolution for {}d is not implemented in Tensorflow".
format(spatial_size))
# use raw input x to do transposed conv
conv_rs = conv_func(
x,
weights,
conv_output_shape,
strides_full,
padding="VALID",
data_format=compute_format)
# pad output first by output_padding attr
if output_padding is not None and output_shape is None:
output_padding = [[0, 0]
] + [[0, p] for p in output_padding]
output_padding.insert(compute_c_idx, [0, 0])
conv_rs = tf.pad(conv_rs, output_padding)
# remove pads set in pads attr
conv_rs_shape = conv_rs.get_shape().as_list()
begin = [0] + pads[:spatial_size]
begin.insert(compute_c_idx, 0)
size = [
s if d in ["N", "C"] else s - pads[spatial_format.find(d)] -
pads[spatial_format.find(d) + spatial_size]
for d, s in zip(compute_format, conv_rs_shape)
]
conv_rs = tf.slice(conv_rs, begin=begin, size=size)
convolved.append(conv_rs)
else:
convolved = [
tf.nn.convolution(
x,
weight,
"VALID",
strides=strides,
dilation_rate=dilations,
data_format=compute_format)
for (x, weight) in zip(xs, weight_groups)
]
if B is None:
if support_cuda:
output = tf.concat(convolved, axis=1)
else:
output = tf.concat(convolved, axis=-1)
output = tf.transpose(
output,
perm=self.get_perm_from_formats(compute_format, storage_format))
else:
bias = B
bias = self._explicit_broadcast(
bias, broadcast_dim=compute_c_idx, total_num_dim=x_rank)
if support_cuda:
output = tf.concat(convolved, axis=1)
output = tf.add(output, bias)
else:
output = tf.concat(convolved, axis=-1)
output = tf.add(output, bias)
output = tf.transpose(
output,
perm=self.get_perm_from_formats(compute_format, storage_format))
return output
def _explicit_broadcast(cls, tensor, broadcast_dim=1, total_num_dim=4):
if broadcast_dim < 0:
broadcast_dim += total_num_dim
dims = [broadcast_dim + i for i in range(len(tensor.shape))]
for i in range(total_num_dim):
if i not in dims:
tensor = tf.expand_dims(tensor, i)
return tensor
def _compatibility_pool(cls, X, kernel_shape, strides, pads, auto_pad, pooling_type, count_include_pad):
def py_pool(x, kernel_shape, strides, pads, out_shape, count_include_pad,
pooling_type):
pooling_type = pooling_type.decode('UTF-8')
x_shape = np.shape(x)
spatial_size = len(x_shape[2:])
pad_attr = [(0, 0), (0, 0)] + [
(pads[i], pads[i + spatial_size]) for i in range(spatial_size)
]
constant_values = np.nan if count_include_pad == 0 else 0
padded = np.pad(
x, pad_attr, mode="constant", constant_values=constant_values)
pad_shape = [
pads[i] + pads[i + spatial_size] for i in range(spatial_size)
]
y = np.zeros([x_shape[0], x_shape[1]] + list(out_shape))
for shape in itertools.product(
range(x_shape[0]), range(x_shape[1]), *[
range(
int((x_shape[i + 2] + pad_shape[i] - kernel_shape[i]
) / strides[i] + 1)) for i in range(spatial_size)
]):
window = padded[shape[0], shape[1]]
window_vals = np.array([
window[i] for i in list(
itertools.product(*[
range(strides[i] * shape[i + 2],
strides[i] * shape[i + 2] + kernel_shape[i])
for i in range(spatial_size)
]))
])
if pooling_type == 'AVG':
f = np.average
elif pooling_type == 'MAX':
f = np.max
else:
raise NotImplementedError(
'Pooling type {} does not support. Should be AVG, MAX'.format(
pooling_type))
if count_include_pad == 0:
y[shape] = f(window_vals[np.where(~np.isnan(window_vals))])
else:
y[shape] = f(window_vals)
return y.astype(np.float32)
x = X
x_shape = x.shape.as_list()
spatial_size = len(x_shape) - 2
kernel_shape = kernel_shape
strides = strides
pads = pads if pads is not None else [0] * spatial_size * 2
auto_pad = auto_pad if auto_pad is not None else ""
count_include_pad = count_include_pad if count_include_pad is not None else 0
out_shape, pads = cls._pool_get_shapes(auto_pad, x_shape[2:], kernel_shape,
strides, pads)
pooled = tf.py_func(py_pool, [
x, kernel_shape, strides, pads, out_shape, count_include_pad,
pooling_type
], tf.float32)
pooled.set_shape(x_shape[0:2] + out_shape)
return pooled
def visit_batchnormalization(self, op: d5.ops.BatchNormalization, network: TensorflowNetwork):
X, B, scale, r_mean, r_var = network.fetch_internal_tensors([
op.i_X, op.i_B, op.i_scale, op.i_mean, op.i_var])
momentum = 0.9 if op.momentum is None else op.momentum.get_value()
epsilon = op.epsilon.get_value() if op.epsilon else 1e-5
# Axis is fixed to 1 since ONNX forces the NCHW data layout.
tfop = tf.layers.BatchNormalization(axis=1, momentum=momentum,
epsilon=epsilon)
Y = tfop.apply(X, training=self.is_training)
# Add network initializers for running mean, variance, and gamma/beta
network.initializers[tfop.gamma] = op.i_scale
if op.i_B is not None:
network.initializers[tfop.beta] = op.i_B
network.initializers[tfop.moving_mean] = op.i_mean
network.initializers[tfop.moving_variance] = op.i_var
network.feed_internal_tensor(op.o_Y, Y)
PAD_TF_INCOMPATIBLE = "PAD_TF_INCOMPATIBLE"
def pool(self, X, kernel_shape, pool_func, pooling_type,
strides=None, pads=None, count_include_pad=None, auto_pad=None):
x = X
x_rank = len(x.get_shape())
x_shape = x.get_shape().as_list()
spatial_size = x_rank - 2
support_cuda = self.supports_device("CUDA")
storage_format, compute_format = self.get_data_format(x_rank, support_cuda)
strides = [1] * spatial_size if strides is None else strides
pads = pads if pads else None
pad = TensorflowVisitor.PAD_TF_INCOMPATIBLE
# from version 7
count_include_pad = 0 if count_include_pad is None else count_include_pad
# If padding is specified, try to recover it from explicit padding
# specification to tf_backed padding mode:
if pads is not None:
pad = self._get_tf_pad(x_shape[2:], kernel_shape, strides, pads)
else:
# Neither pad nor auto_pad is specified, assume no padding.
if auto_pad is None:
pad = "VALID"
# We consult auto_pad if pad is not specified and auto_pad
# is available.
else:
if auto_pad == "SAME_UPPER":
pad = "SAME"
elif auto_pad == "VALID":
pad = "VALID"
elif auto_pad == "SAME_LOWER":
pad = TensorflowVisitor.PAD_TF_INCOMPATIBLE
if count_include_pad == 1:
_, pads = self._pool_get_shapes(auto_pad, x_shape[2:],
kernel_shape, strides,
[0] * spatial_size * 2)
if count_include_pad == 0:
if pad is TensorflowVisitor.PAD_TF_INCOMPATIBLE:
return self._compatibility_pool(X, kernel_shape, strides, pads, auto_pad, pooling_type,
count_include_pad)
else:
if pads != [0] * spatial_size * 2:
x = self.get_padding_as_op(x, pads)
pad = "VALID"
if support_cuda:
pooled = pool_func(
x,
kernel_shape,
padding=pad,
strides=strides,
data_format=compute_format)
else:
x = tf.transpose(
x, perm=self.get_perm_from_formats(storage_format, compute_format))
pooled = pool_func(
x,
kernel_shape,
padding=pad,
strides=strides,
data_format=compute_format)
pooled = tf.transpose(
pooled, perm=self.get_perm_from_formats(compute_format, storage_format))
return pooled
def _pool_get_shapes(self, auto_pad, x_shape, kernel_shape, strides, pads):
def _get_pad_shape(auto_pad, input_spatial_shape, kernel_spatial_shape,
strides_spatial, output_spatial_shape):
pad_shape = [0] * len(input_spatial_shape)
if auto_pad in ("SAME_UPPER", "SAME_LOWER"):
for i in range(len(input_spatial_shape)):
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial[i] + kernel_spatial_shape[i] - \
input_spatial_shape[i]
elif auto_pad in ("VALID", ""):
pass
return pad_shape
def _get_output_shape(auto_pad, input_spatial_shape, kernel_spatial_shape,
strides_spatial):
out_shape = [0] * len(input_spatial_shape)
if auto_pad in ("SAME_UPPER", "SAME_LOWER"):
for i in range(len(input_spatial_shape)):
out_shape[i] = int(
np.ceil(
float(input_spatial_shape[i]) / float(strides_spatial[i])))
elif auto_pad in ("VALID", ""):
for i in range(len(input_spatial_shape)):
out_shape[i] = int(
np.ceil(
float(input_spatial_shape[i] - (kernel_spatial_shape[i] - 1))
/ float(strides_spatial[i])))
return out_shape
spatial_size = len(x_shape)
new_pads = pads[:]
if auto_pad in ["SAME_UPPER", "SAME_LOWER"]:
out_shape = _get_output_shape(auto_pad, x_shape, kernel_shape, strides)
pad_shape = _get_pad_shape(auto_pad, x_shape, kernel_shape, strides,
out_shape)
for i in range(spatial_size):
if auto_pad == "SAME_LOWER":
new_pads[i + spatial_size] = pad_shape[i] // 2
new_pads[i] = pad_shape[i] - new_pads[i + spatial_size]
elif auto_pad == "SAME_UPPER":
new_pads[i] = pad_shape[i] // 2
new_pads[i + spatial_size] = pad_shape[i] - new_pads[i]
elif auto_pad in ["", "VALID"]:
pad_shape = [
pads[i] + pads[i + spatial_size] for i in range(spatial_size)
]
out_shape = _get_output_shape(auto_pad, np.add(x_shape, pad_shape),
kernel_shape, strides)
return out_shape, new_pads
# input_shape, kernel_shape, strides are specified for
# spatial dims only.
def _get_tf_pad(self, input_shape, kernel_shape, strides, pads):
assert pads is not None
num_sp_dim = int(len(kernel_shape))
if pads == [0] * num_sp_dim * 2:
return "VALID"
_, same_pads = self._pool_get_shapes("SAME_UPPER", input_shape, kernel_shape,
strides, pads)
if pads == same_pads:
return "SAME"
return TensorflowVisitor.PAD_TF_INCOMPATIBLE
# End of adaptation from onnx-tensorflow
def get_perm_from_formats(self, _from, _to):
return list(map(lambda x: _from.find(x), _to))
def get_data_format(self, x_rank, support_cuda):
sp_dim_names = ["D", "H", "W"]
sp_dim_lst = []
for i in range(x_rank - 2):
sp_dim_lst.append(sp_dim_names[-i - 1])
sp_dim_string = "".join(reversed(sp_dim_lst))
storage_format = "NC" + sp_dim_string
if support_cuda:
compute_format = "NC" + sp_dim_string
else:
compute_format = "N" + sp_dim_string + "C"
return storage_format, compute_format
def get_random_name(self):
self.counter += 1
return "random_name_{}".format(self.counter)
def get_padding_as_op(self, X, pads):
n_dim = int(len(pads) / 2)
tf_pads = np.transpose(np.array(pads).reshape([2, n_dim]))
tf_pads = [0, 0, 0, 0] + tf_pads.flatten().tolist()
padding = tf.constant(
np.array(tf_pads).reshape([n_dim + 2, 2])
.astype(np.int32)) # tf_backed requires int32 paddings
return tf.pad(X, padding)
def supports_device(self, device_name):
return False
|
the-stack_106_29129 | # Complex Data Structures - nested lists that contain lists, dicts that contain dicts, etc etc
import re
"""
step 1: look at to see what type the data structure is (list or dict)
step 2: look at the length of the data structure
step 3: slowly drill down into the nested data to figure out how it is setup
-its useful to use variables to name the different layers
step 4: once you understand the data structure write a function to process the data for you
# example of loop that gives us key value varables in for loop though dictionary
test_dict = {'key1': 1, 'key2': 2}
for key, value in test_dict.items():
print(key)
print(value)
# converting a data structure to another format:
# use loops with key value pairs to work with data and build new structures
# example of data structure changes
test_dict = {'key1': 1, 'key2': 2}
test_list = []
for key, value in test_dict.items():
test_list.append({key: value})
print(test_list)
"""
# Serialization
"""
Serialization - converting things in our program to bytes that can be sent across the wire or be writen to a file
Serialization needed to send data from one device to another, must be platform independent
Examples of Serialization are JSON and YMAL
JSON
Is good for computer to computer communications.
Is commonly used in web application APIs
Good for networking devices APIs
Not good if humans have to write JSON, is picky and very condensed
YAML
Easy to read and write in its expanded form
Uses indentation
Serialization - converting things in our program to bytes that can be sent across the wire or be writen to a file
Serialization needed to send data from one device to another, must be platform independent
"""
# YAML
"""
YAML uses indents in its structure
Can be used in compressed or uncompressed
YAML is a super set of JSON and JSON should be able to be understood by YAML parsers
list notation:
---
-first_element
-second_element
dict notation:
---
key1: value1
key2: value2
nested dict example:
---
router1:
device_type: cisco_iso
ip_addr: 1.1.1.1
username: admin
password: pass123
other types:
---
- some string
- "another string"
# the following is a null value
- null
# booleans
- True
- False
- true
- false
- on
- off
- yes
- no
# strings
- "yes" # needed to keep the string yest from
- |
This is a multiline string in YMAL.
It can have more than one line
You do this with the pipe |
- >
This gives us strings with new lines
# notice the lack of indentation! the list itself is the value to the key somelist
somelist:
- 0
- 1
- 2
"""
# JSON
"""
JSON example
import json
# JSON will not let you put a comma at the end of the last element!
# this wouldn't work: [1,2,3,] same for dicts
my_data = {
'key1': 'value1',
'key2': 'value2'
}
some_list = list(range(10))
my_data[some_list] = some_list
filename = "output_file.json"
with open(filename, 'wt') as f:
# use json.dump() to write to files
# json.dump(<data>, <file_we_write_to>)
json.dump(my_data, f, indent=4) #indent helps you look at the json a lot easier
# use json.dumps() writes the data out as a string
# load JSON to python example
filename = input("Input filename: )
with open(filename) as f:
data = json.load(f)
"""
# CiscoConfParse
"""
CiscoConfParse - is a python library that helps us work with configs!
-turns our config files into a tree structure making it a lot easier to work with
-Works in cisco and cisco like devices
- this library helps you deal with the interfaces inside cisco config files!
- gives us the children of the global commands (commands for interfaces)
-Very useful when you are dealing with interfaces that have multiple levels of depth
-Saves us from having to
example:
from ciscoconfparse import CiscoConfParse
# make a CiscoConfParse object out of our file
cisco_obj = CiscoConfParse("config_file")
# to pass in config information from a string
# must first convert your config string into a list
# use the string method .splitlines()
my_config = '''
interface f0/1
duplex auto
speed auto
description port shut down
shut
'''
my_obj = CiscoConfParse(my_config.splitlines())
# use this to see what methods CiscoConfParse has
# notice the find objects methods
# in general we will mostly use find object methods to help us find structures we care about
print(dir(my_obj))
# use help to see how to use various methods the CiscoConfParse class has
print(help(my_obj.find_objects))
# notice this method takes in a regular expression
# remember to use raw strings when working with regular expressions
# this finds all the interfaces and returns a list of the interfaces
interface = my_obj.find_objects(r"^interface")
# we can now use the children method on the interface object we just created to view the interface configurations
interface[0].children()
# loop over interface commands
for child in interface[0].children:
# notice we use the .text method on the child object
print(child.text)
##########################################
# example that finds interfaces with ip addresses
from ciscoconfparse import CiscoConfParse
# remember most of the time we load from a file
config_obj = CiscoConfParse("filepath")
# or load from a string converted to a list with new lines
# This is useful when using netmiko to connect to devices and running show run
config_obj = CiscoConfParse(config_string.splitlines())
# look for any interface with an IP using the .find_objects_w_child method
# note the parentspec argument and the childspec argument
# notice the below finds no ip address
cisco_obj.find_objects_w_child(parentspec=r"^interface", childspec=r"ip address)
# finds only the interfaces with ip addresses using the space to filter the no ip address commands out
cisco_obj.find_objects_w_child(parentspec=r"^interface", childspec=r"^\s+ip address)
# gives us a list of cisco objects that are the parent objects that we found using our filter
match = cisco_obj.find_objects_w_child(parentspec=r"^interface", childspec=r"^\s+ip address")
# look at the children of first cisco interface object we found with an IP
match[0].children
example:
config_obj = CiscoConfParse("config_filepath")
parent = config_obj.find_objects(r"^line con 0")
# only finds and returns one IOSCfgLine object so we can get rid of the list and just work with the object
parent = parent[0]
# see if the object is a parent
parent.is_parent
# make a list of IOSCfgLine objects that are objects of the parents children
children = parent.children
a_child = children[1]
# find if the child is a parent or child
a_child.is_parent
a_child.is_child
# return the parent object to the a_child object
parent = a_child.parent
# does the child have siblings
# (objects that are also children to the same parent) same level in the tree higerarcky*
a_child.siblings
# find all objects without children using .find_objets_wo_child
without_child = cisco_obj.find_objects_wo_child(parentspec=r"^interface", childspec=r"no ipaddress")
match = cisco_obj.find_objects(r"crypto map CRYPTO")
# only has one crypto map so we can change variable match to the object itself instead of the list that contains the obj
match = match[0]
crypto_kids = match.children
# you can search for exprestions in the parents children using the .re_search_children() method
crypto_kid = match.re_search_children(r"set pfs ")
"""
#Exercises:
"""
My solutions to the exercises can be found at:
Class3 Reference Solutions
1. Using the below ARP data, create a five element list. Each list element should be a dictionary
with the following keys: "mac_addr", "ip_addr", "interface".
At the end of this process, you should have five dictionaries contained inside a single list.
Protocol Address Age Hardware Addr Type Interface
Internet 10.220.88.1 67 0062.ec29.70fe ARPA Gi0/0/0
Internet 10.220.88.20 29 c89c.1dea.0eb6 ARPA Gi0/0/0
Internet 10.220.88.22 - a093.5141.b780 ARPA Gi0/0/0
Internet 10.220.88.37 104 0001.00ff.0001 ARPA Gi0/0/0
Internet 10.220.88.38 161 0002.00ff.0001 ARPA Gi0/0/0
"""
#
#Regular Expression Special Characters
'''
. Any Single character
+ One or more times
* Zero or more times
^ Beginning of line
$ End of line
\w alphanumeric letters and digits
\W non-alphanumeric charaters such as punctuation
\s Whitespace character class
\d Digit character class
\S Non-whitespace character class
[] Construct your own character class
() Parenthesis to save things
'''
arp_data = '''
Protocol Address Age Hardware Addr Type Interface
Internet 10.220.88.1 67 0062.ec29.70fe ARPA Gi0/0/0
Internet 10.220.88.20 29 c89c.1dea.0eb6 ARPA Gi0/0/0
Internet 10.220.88.22 - a093.5141.b780 ARPA Gi0/0/0
Internet 10.220.88.37 104 0001.00ff.0001 ARPA Gi0/0/0
Internet 10.220.88.38 161 0002.00ff.0001 ARPA Gi0/0/0
'''
arp_data = arp_data.strip()
arp_list = arp_data.splitlines()
print(arp_list)
desired_list = []
keys = arp_list[0].split()
arp_dict = {}
for line in arp_list:
values = line.split()
arp_dict.update({keys[e]: values[e] for e in range(len(values))})
desired_list.append(arp_dict)
print(desired_list)
"""
2a. Create a list where each of the list elements is a dictionary representing one of the network devices in the lab. Do this for at least four of the lab devices. The dictionary should have keys corresponding to the device_name, host (i.e. FQDN), username, and password. Use a fictional username/password to avoid checking the lab password into GitHub.
2b. Write the data structure you created in part 2a out to a YAML file. Use expanded YAML format. How could you re-use this YAML file later when creating Netmiko connections to devices?
3. NAPALM using nxos_ssh has the following data structure in one of its unit tests (the below data is in JSON format).
{
"Ethernet2/1": {
"ipv4": {
"1.1.1.1": {
"prefix_length": 24
}
}
},
"Ethernet2/2": {
"ipv4": {
"2.2.2.2": {
"prefix_length": 27
},
"3.3.3.3": {
"prefix_length": 25
}
}
},
"Ethernet2/3": {
"ipv4": {
"4.4.4.4": {
"prefix_length": 16
}
},
"ipv6": {
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": 64
},
"2001:db8::1": {
"prefix_length": 10
}
}
},
"Ethernet2/4": {
"ipv6": {
"fe80::2ec2:60ff:fe4f:feb2": {
"prefix_length": 64
},
"2001:11:2233::a1": {
"prefix_length": 24
},
"2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2": {
"prefix_length": 64
}
}
}
}
Read this JSON data in from a file.
From this data structure extract all of the IPv4 and IPv6 addresses that are used on this NXOS device. From this data create two lists: 'ipv4_list' and 'ipv6_list'. The 'ipv4_list' should be a list of all of the IPv4 addresses including prefixes; the 'ipv6_list' should be a list of all of the IPv6 addresses including prefixes.
4. You have the following JSON ARP data from an Arista switch:
{
"dynamicEntries": 2,
"ipV4Neighbors": [
{
"hwAddress": "dc38.e111.97cf",
"address": "172.17.17.1",
"interface": "Ethernet45",
"age": 0
},
{
"hwAddress": "90e2.ba5c.25fd",
"address": "172.17.16.1",
"interface": "Ethernet36",
"age": 0
}
],
"notLearnedEntries": 0,
"totalEntries": 2,
"staticEntries": 0
}
From a file, read this JSON data into your Python program. Process this ARP data and return a dictionary where the dictionary keys are the IP addresses and the dictionary values are the MAC addresses. Print this dictionary to standard output.
5. In your lab environment, there is a file located at ~/.netmiko.yml. This file contains all of the devices used in the lab. Create a Python program that processes this YAML file and then uses Netmiko to connect to the Cisco3 router. Print out the router prompt from this device.
Note, the device dictionaries in the .netmiko.yml file use key-value pairs designed to work directly with Netmiko. The .netmiko.yml also contains group definitions for: cisco, arista, juniper, and nxos groups. These group definitions are lists of devices. Once again, don't check the .netmiko.yml into GitHub.
6. Use Netmiko to retrieve 'show run' from the Cisco4 device. Feed this configuration into CiscoConfParse.
Use CiscoConfParse to find all of the interfaces on Cisco4 that have an IP address. Print out the interface name and IP address for each interface. Your solution should work if there is more than one IP address configured on Cisco4. For example, if you configure a loopback interface on Cisco4 with an IP address, then your solution should continue to work. The output from this program should look similar to the following:
$ python confparse_ex6.py
Interface Line: interface GigabitEthernet0/0/0
IP Address Line: ip address 10.220.88.23 255.255.255.0
7. You have the following BGP configuration from a Cisco IOS-XR router:
router bgp 44
bgp router-id 10.220.88.38
address-family ipv4 unicast
!
neighbor 10.220.88.20
remote-as 42
description pynet-rtr1
address-family ipv4 unicast
route-policy ALLOW in
route-policy ALLOW out
!
!
neighbor 10.220.88.32
remote-as 43
address-family ipv4 unicast
route-policy ALLOW in
route-policy ALLOW out
From this BGP configuration, retrieve all of BGP peer IP addresses and their corresponding remote-as. Return a list of tuples. The tuples should be (neighbor_ip, remote_as). Print your data-structure to standard output.
Your output should look similar to the following. Use ciscoconfparse to accomplish this.
BGP Peers:
[('10.220.88.20', '42'), ('10.220.88.32', '43')]
""" |
the-stack_106_29135 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import netaddr
from oslo.serialization import jsonutils
import requests
from requests import exceptions as r_exc
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
TIMEOUT = 20.0
LOG = logging.getLogger(__name__)
HEADER_CONTENT_TYPE_JSON = {'content-type': 'application/json'}
URL_BASE = 'https://%(host)s/api/v1/%(resource)s'
# CSR RESTapi URIs
URI_VPN_IPSEC_POLICIES = 'vpn-svc/ipsec/policies'
URI_VPN_IPSEC_POLICIES_ID = URI_VPN_IPSEC_POLICIES + '/%s'
URI_VPN_IKE_POLICIES = 'vpn-svc/ike/policies'
URI_VPN_IKE_POLICIES_ID = URI_VPN_IKE_POLICIES + '/%s'
URI_VPN_IKE_KEYRINGS = 'vpn-svc/ike/keyrings'
URI_VPN_IKE_KEYRINGS_ID = URI_VPN_IKE_KEYRINGS + '/%s'
URI_VPN_IKE_KEEPALIVE = 'vpn-svc/ike/keepalive'
URI_VPN_SITE_TO_SITE = 'vpn-svc/site-to-site'
URI_VPN_SITE_TO_SITE_ID = URI_VPN_SITE_TO_SITE + '/%s'
URI_VPN_SITE_TO_SITE_STATE = URI_VPN_SITE_TO_SITE + '/%s/state'
URI_VPN_SITE_ACTIVE_SESSIONS = URI_VPN_SITE_TO_SITE + '/active/sessions'
URI_ROUTING_STATIC_ROUTES = 'routing-svc/static-routes'
URI_ROUTING_STATIC_ROUTES_ID = URI_ROUTING_STATIC_ROUTES + '/%s'
def make_route_id(cidr, interface):
"""Build ID that will be used to identify route for later deletion."""
net = netaddr.IPNetwork(cidr)
return '%(network)s_%(prefix)s_%(interface)s' % {
'network': net.network,
'prefix': net.prefixlen,
'interface': interface}
class CsrRestClient(object):
"""REST CsrRestClient for accessing the Cisco Cloud Services Router."""
def __init__(self, settings):
self.port = str(settings.get('protocol_port', 55443))
self.host = ':'.join([settings.get('rest_mgmt_ip', ''), self.port])
self.auth = (settings['username'], settings['password'])
self.inner_if_name = settings.get('inner_if_name', '')
self.outer_if_name = settings.get('outer_if_name', '')
self.token = None
self.vrf = settings.get('vrf', '')
self.vrf_prefix = 'vrf/%s/' % self.vrf if self.vrf else ""
self.status = requests.codes.OK
self.timeout = settings.get('timeout')
self.max_tries = 5
self.session = requests.Session()
def _response_info_for(self, response, method):
"""Return contents or location from response.
For a POST or GET with a 200 response, the response content
is returned.
For a POST with a 201 response, return the header's location,
which contains the identifier for the created resource.
If there is an error, return the response content, so that
it can be used in error processing ('error-code', 'error-message',
and 'detail' fields).
"""
if method in ('POST', 'GET') and self.status == requests.codes.OK:
LOG.debug('RESPONSE: %s', response.json())
return response.json()
if method == 'POST' and self.status == requests.codes.CREATED:
return response.headers.get('location', '')
if self.status >= requests.codes.BAD_REQUEST and response.content:
if 'error-code' in response.content:
content = jsonutils.loads(response.content)
LOG.debug("Error response content %s", content)
return content
def _request(self, method, url, **kwargs):
"""Perform REST request and save response info."""
try:
LOG.debug("%(method)s: Request for %(resource)s payload: "
"%(payload)s",
{'method': method.upper(), 'resource': url,
'payload': kwargs.get('data')})
start_time = time.time()
response = self.session.request(method, url, verify=False,
timeout=self.timeout, **kwargs)
LOG.debug("%(method)s Took %(time).2f seconds to process",
{'method': method.upper(),
'time': time.time() - start_time})
except (r_exc.Timeout, r_exc.SSLError) as te:
# Should never see SSLError, unless requests package is old (<2.0)
timeout_val = 0.0 if self.timeout is None else self.timeout
LOG.warning(_LW("%(method)s: Request timeout%(ssl)s "
"(%(timeout).3f sec) for CSR(%(host)s)"),
{'method': method,
'timeout': timeout_val,
'ssl': '(SSLError)'
if isinstance(te, r_exc.SSLError) else '',
'host': self.host})
self.status = requests.codes.REQUEST_TIMEOUT
except r_exc.ConnectionError:
LOG.exception(_LE("%(method)s: Unable to connect to "
"CSR(%(host)s)"),
{'method': method, 'host': self.host})
self.status = requests.codes.NOT_FOUND
except Exception as e:
LOG.error(_LE("%(method)s: Unexpected error for CSR (%(host)s): "
"%(error)s"),
{'method': method, 'host': self.host, 'error': e})
self.status = requests.codes.INTERNAL_SERVER_ERROR
else:
self.status = response.status_code
LOG.debug("%(method)s: Completed [%(status)s]",
{'method': method, 'status': self.status})
return self._response_info_for(response, method)
def authenticate(self):
"""Obtain a token to use for subsequent CSR REST requests.
This is called when there is no token yet, or if the token has expired
and attempts to use it resulted in an UNAUTHORIZED REST response.
"""
url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'}
headers = {'Content-Length': '0',
'Accept': 'application/json'}
headers.update(HEADER_CONTENT_TYPE_JSON)
LOG.debug("%(auth)s with CSR %(host)s",
{'auth': 'Authenticating' if self.token is None
else 'Reauthenticating', 'host': self.host})
self.token = None
response = self._request("POST", url, headers=headers, auth=self.auth)
if response:
self.token = response['token-id']
LOG.debug("Successfully authenticated with CSR %s", self.host)
return True
LOG.error(_LE("Failed authentication with CSR %(host)s [%(status)s]"),
{'host': self.host, 'status': self.status})
def _do_request(self, method, resource, payload=None, more_headers=None,
full_url=False):
"""Perform a REST request to a CSR resource.
If this is the first time interacting with the CSR, a token will
be obtained. If the request fails, due to an expired token, the
token will be obtained and the request will be retried once more.
"""
if self.token is None:
if not self.authenticate():
return
if full_url:
url = resource
else:
url = ('https://%(host)s/api/v1/%(resource)s' %
{'host': self.host, 'resource': resource})
headers = {'Accept': 'application/json', 'X-auth-token': self.token}
if more_headers:
headers.update(more_headers)
if payload:
payload = jsonutils.dumps(payload)
response = self._request(method, url, data=payload, headers=headers)
if self.status == requests.codes.UNAUTHORIZED:
if not self.authenticate():
return
headers['X-auth-token'] = self.token
response = self._request(method, url, data=payload,
headers=headers)
if self.status != requests.codes.REQUEST_TIMEOUT:
return response
LOG.error(_LE("%(method)s: Request timeout for CSR(%(host)s)"),
{'method': method, 'host': self.host})
def get_request(self, resource, full_url=False):
"""Perform a REST GET requests for a CSR resource."""
return self._do_request('GET', resource, full_url=full_url)
def post_request(self, resource, payload=None):
"""Perform a POST request to a CSR resource."""
return self._do_request('POST', resource, payload=payload,
more_headers=HEADER_CONTENT_TYPE_JSON)
def put_request(self, resource, payload=None):
"""Perform a PUT request to a CSR resource."""
return self._do_request('PUT', resource, payload=payload,
more_headers=HEADER_CONTENT_TYPE_JSON)
def delete_request(self, resource):
"""Perform a DELETE request on a CSR resource."""
return self._do_request('DELETE', resource,
more_headers=HEADER_CONTENT_TYPE_JSON)
# VPN Specific APIs
def create_ike_policy(self, policy_info):
base_ike_policy_info = {u'version': u'v1',
u'local-auth-method': u'pre-share'}
base_ike_policy_info.update(policy_info)
return self.post_request(URI_VPN_IKE_POLICIES,
payload=base_ike_policy_info)
def create_ipsec_policy(self, policy_info):
base_ipsec_policy_info = {u'mode': u'tunnel'}
base_ipsec_policy_info.update(policy_info)
return self.post_request(URI_VPN_IPSEC_POLICIES,
payload=base_ipsec_policy_info)
def create_pre_shared_key(self, psk_info):
return self.post_request(self.vrf_prefix + URI_VPN_IKE_KEYRINGS,
payload=psk_info)
def create_ipsec_connection(self, connection_info):
base_conn_info = {
u'vpn-type': u'site-to-site',
u'ip-version': u'ipv4',
u'local-device': {
u'tunnel-ip-address': self.outer_if_name,
u'ip-address': self.inner_if_name
}
}
connection_info.update(base_conn_info)
if self.vrf:
connection_info[u'tunnel-vrf'] = self.vrf
return self.post_request(self.vrf_prefix + URI_VPN_SITE_TO_SITE,
payload=connection_info)
def configure_ike_keepalive(self, keepalive_info):
base_keepalive_info = {u'periodic': True}
keepalive_info.update(base_keepalive_info)
return self.put_request(URI_VPN_IKE_KEEPALIVE, keepalive_info)
def create_static_route(self, route_info):
return self.post_request(self.vrf_prefix + URI_ROUTING_STATIC_ROUTES,
payload=route_info)
def delete_static_route(self, route_id):
return self.delete_request(
self.vrf_prefix + URI_ROUTING_STATIC_ROUTES_ID % route_id)
def set_ipsec_connection_state(self, tunnel, admin_up=True):
"""Set the IPSec site-to-site connection (tunnel) admin state.
Note: When a tunnel is created, it will be admin up.
"""
info = {u'vpn-interface-name': tunnel, u'enabled': admin_up}
return self.put_request(
self.vrf_prefix + URI_VPN_SITE_TO_SITE_STATE % tunnel, info)
def delete_ipsec_connection(self, conn_id):
return self.delete_request(
self.vrf_prefix + URI_VPN_SITE_TO_SITE_ID % conn_id)
def delete_ipsec_policy(self, policy_id):
return self.delete_request(URI_VPN_IPSEC_POLICIES_ID % policy_id)
def delete_ike_policy(self, policy_id):
return self.delete_request(URI_VPN_IKE_POLICIES_ID % policy_id)
def delete_pre_shared_key(self, key_id):
return self.delete_request(
self.vrf_prefix + URI_VPN_IKE_KEYRINGS_ID % key_id)
def read_tunnel_statuses(self):
results = self.get_request(self.vrf_prefix +
URI_VPN_SITE_ACTIVE_SESSIONS)
if self.status != requests.codes.OK or not results:
return []
tunnels = [(t[u'vpn-interface-name'], t[u'status'])
for t in results['items']]
return tunnels
|
the-stack_106_29136 |
import frappe
from frappe.patches.v7_0.re_route import update_routes
from frappe.installer import remove_from_installed_apps
def execute():
if 'knowledge_base' in frappe.get_installed_apps():
frappe.reload_doc('website', 'doctype', 'help_category')
frappe.reload_doc('website', 'doctype', 'help_article')
update_routes(['Help Category', 'Help Article'])
remove_from_installed_apps('knowledge_base')
# remove module def
if frappe.db.exists('Module Def', 'Knowledge Base'):
frappe.delete_doc('Module Def', 'Knowledge Base')
# set missing routes
for doctype in ('Help Category', 'Help Article'):
for d in frappe.get_all(doctype, fields=['name', 'route']):
if not d.route:
doc = frappe.get_doc(doctype, d.name)
doc.set_route()
doc.db_update() |
the-stack_106_29137 | """
Charaterization funcions
"""
import pprint
pp = pprint.PrettyPrinter(indent=4)
from typing import Optional, TypeVar, Union, Tuple, List
import os
import sys
import shutil
import pandas as pd
import numpy as np
from collections import Counter
#ignore divide by zero warinings
import warnings
warnings.filterwarnings("ignore", message="divide by zero encountered in divide")
warnings.filterwarnings("ignore", message="divide by zero encountered")
warnings.filterwarnings("ignore", message="invalid value encountered")
from ligning.rules import linkage_names, monomer_types
from ligning.utils import formula_to_MW, graph_to_smile, nxgraph, molecule, nparray, graph_to_mol, smiles_to_formula
import ligning.utils as ut
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit.Chem import MolFromSmiles
from ligning.polymer import Polymer
# For each monomer
# H - 11 nodes, no OCH3 group
# G - 13 nodes, 1 OCH3 group
# S - 15 nodes, 2 OCH3 group
def get_metrics_polymer(
P: Polymer,
additional: Optional[bool] = False,
cal_MW: Optional[bool] = False
) -> Tuple[nparray, int, float]:
"""Get the metrics and count for a polymer
Parameters
----------
P : Polymer
polymer object
additional : Optional[bool], optional
include additional metrics, by default False
cal_MW : bool, optional
flag to calculate molecular weight, by default False
Returns
-------
metrics_P : nparray
the metrics array
monomer_count : int
the number of monomers
MW : float
the molecular weight
"""
MW = None
ch_polymer = Characterize(P)
ch_polymer.get_metrics(cal_MW=cal_MW, additional=additional)
metrics_P = ch_polymer.metrics
monomer_count = ch_polymer.monomer_count
if cal_MW:
MW = ch_polymer.MW
return metrics_P, monomer_count, MW
def get_counts_polymer(
P: Polymer,
additional: Optional[bool] = False,
cal_MW: Optional[bool] = False
) -> Tuple[nparray, int, float]:
"""Get the counts array for a polymer
Parameters
----------
P : Polymer
polymer object
additional : Optional[bool], optional
include additional metrics, by default False
cal_MW : bool, optional
flag to calculate molecular weight, by default False
Returns
-------
counts_P : nparray
the counts array
monomer_count : int
the number of monomers
MW : float
the molecular weight
"""
MW = None
ch_polymer = Characterize(P)
ch_polymer.get_counts(cal_MW=cal_MW, additional=additional)
counts_P = ch_polymer.counts
monomer_count = ch_polymer.monomer_count
if cal_MW:
MW = ch_polymer.MW
return counts_P, monomer_count, MW
class CharacterizeGraph():
"""
polymer charaterization object
"""
def __init__(self, G: nxgraph):
"""Initialize with a polymer graph
Parameters
----------
G : nxgraph
polymer graph
"""
self.G = G
self.Mol = None
# Initialize other properties
self.mtype_count = None
self.monomer_count = None
self.linkages_count = None
self.OCH3_count = None
self.OH_count = None
self.MW = None
self.metrics = None
self.mol = None
self.smiles = None
def count_types(self) -> dict:
"""count the monomer types
Returns
-------
mtype_count : dict
the monomer type counts
"""
mtypes = [self.G.nodes[ni]['mtype'] for ni in list(self.G.nodes)]
mtype_count = {'H': mtypes.count('H')/11, # divide by the number of non-H atoms
'G': mtypes.count('G')/13,
'S': mtypes.count('S')/15}
self.mtype_count = mtype_count
return self.mtype_count
def count_monomers(self) -> float:
"""count the total number of monomers, i.e. the polymer size
Returns
-------
monomer_count : float
the polymer size
"""
self.monomer_count = np.sum(list(self.mtype_count.values()))
return self.monomer_count
def count_linkages(self) -> dict:
"""count the type of linkages
Returns
-------
linkages_count: dict
the linkage type counts
"""
bonds = [self.G.edges[ei]['btype'] for ei in list(self.G.edges)]
linkages_count = {}
for linkage_name_i in linkage_names:
linkages_count[linkage_name_i] = float(bonds.count(linkage_name_i)) #the bonds under the same name
# adjust for double counting
linkages_count['4-O-5'] = linkages_count['4-O-5'] /2
linkages_count['beta-O-4'] = linkages_count['beta-O-4'] /2
linkages_count['beta-5'] = linkages_count['beta-5'] /2
linkages_count['beta-beta'] = linkages_count['beta-beta'] /3
self.linkages_count = linkages_count
return self.linkages_count
def count_OCH3(self) -> float:
"""count the number of -OCH3 group
Returns
-------
OCH3_count : float
-OCH3 counts
"""
groups = [self.G.nodes[ni]['group'] for ni in list(self.G.nodes)]
OCH3_count = float(groups.count('OCH3'))/2 # divid by 2 since C and O are both marked as 'OCH3'
self.OCH3_count = OCH3_count
return self.OCH3_count
def count_OH(self) -> float:
"""count the number of available -OH3 group
Returns
-------
OH_count : float
-OH counts
"""
groups = [self.G.nodes[ni]['group'] for ni in list(self.G.nodes)]
OH_4_indices = [ni for ni, gi in enumerate(groups) if gi == '4OH']
OH_9_indices = [ni for ni, gi in enumerate(groups) if gi == '9OH']
OH_4_available = [ni for ni in OH_4_indices if len(list(self.G.neighbors(ni))) == 1]
OH_9_available = [ni for ni in OH_9_indices if len(list(self.G.neighbors(ni))) == 1]
self.OH_count = float(len(OH_4_available) + len(OH_9_available))
return self.OH_count
def cal_MW(self) -> float:
"""calculate the molecular weight
Returns
-------
float
Molecular weight of the polymer
"""
self.smiles = graph_to_smile(self.G)
#self.Mol = MolFromSmiles(self.smiles)
#self.MW = ExactMolWt(self.Mol)
self.formula = smiles_to_formula(self.smiles)
self.MW = formula_to_MW(self.formula)
return self.MW
def cal_all(self, cal_MW = False, print_flag = True):
"""Main count function
Parameters
----------
cal_MW : bool, optional
flag to calculate molecular weight, by default False
print_flag : bool, optional
flag to print all properties, by default True
"""
# Count the types of monomers
self.count_types()
# Count the total number of monomers
self.count_monomers()
# Count the type of bonds
self.count_linkages()
# Count the number of -OCH3
self.count_OCH3()
# Count the number of available -OH
self.count_OH()
# Calculate the molecular weight
if cal_MW:
self.cal_MW()
# Print the output as a dictionary
if print_flag:
pp.pprint(vars(self))
def cal_metrics(self, cal_MW: Optional[bool] = False) -> nparray:
"""calculate the 10 by 1 metrics array
Returns
-------
metrics : nparray
metrics array
"""
self.cal_all(cal_MW, print_flag = False)
monomer_distribution_input = list(self.mtype_count.values())
linkage_distribution_input = list(self.linkages_count.values())
# Normalize the distribution to 0-1
monomer_distribution = np.array(monomer_distribution_input)/np.sum(monomer_distribution_input)
linkage_distribution = np.array(linkage_distribution_input)/np.sum(linkage_distribution_input)
#print(linkage_distribution_input)
#print(linkage_distribution)
#prevent bad divsion
if np.isnan(np.sum(linkage_distribution)):
linkage_distribution = np.zeros_like(linkage_distribution)
# Concatenate the metrics
self.metrics = np.concatenate((monomer_distribution, linkage_distribution), axis= 0)
class Characterize(CharacterizeGraph):
"""
polymer characterization object
"""
def __init__(self, P: Polymer):
"""Initialize with a polymer object
Parameters
----------
P : Polymer
polymer object
"""
super().__init__(P.G)
self.bigG = P.bigG
self.connections_count = None
self.branching_coeff = None
def count_types(self) -> dict:
"""count the monomer types
Returns
-------
mtype_count : dict
the monomer type counts
"""
mtypes = [self.bigG.nodes[ni]['mtype'] for ni in list(self.bigG.nodes)]
mtype_count = {'H': mtypes.count('H'),
'G': mtypes.count('G'),
'S': mtypes.count('S')}
self.mtype_count = mtype_count
return self.mtype_count
def count_monomers(self) -> int:
"""count the total number of monomers, i.e. the polymer size
Returns
-------
monomer_count : float
the polymer size
"""
self.monomer_count = len(self.bigG)
return self.monomer_count
def count_linkages(self) -> dict:
"""count the type of linkages
Returns
-------
linkages_count: dict
the linkage type counts
"""
bonds = [self.bigG.edges[ei]['btype'] for ei in list(self.bigG.edges)]
linkages_count = {}
for linkage_name_i in linkage_names:
linkages_count[linkage_name_i] = bonds.count(linkage_name_i) #the bonds under the same name
self.linkages_count = linkages_count
return self.linkages_count
def count_OCH3(self) -> float:
return super().count_OCH3()
def count_OH(self) -> float:
return super().count_OH()
def cal_MW(self) -> float:
return super().cal_MW()
def count_connections(self) -> dict:
"""Count the number of connections of each monomer
Returns
-------
connections_count: dict
the number of connections
"""
connections = [self.bigG.degree(ni) for ni in list(self.bigG.nodes)]
self.connections_count = dict(Counter(connections))
return self.connections_count
def cal_branching(self) -> float:
"""calculate the branching coefficient
Returns
-------
branching_coeff : float
the branching coefficient:
ratio of branched monomers to total monomers
"""
if self.connections_count is None:
self.count_connections()
if self.monomer_count is None:
self.count_monomers()
n_branched = 0
# Count the number of branched monomers
# a branched monomer is one that is bonded to three or more monomers
for ki, vi in self.connections_count.items():
if ki >= 3:
n_branched += vi
# number of branched monomers
self.n_branched = n_branched
self.branching_coeff = n_branched/self.monomer_count
return self.branching_coeff
def cal_all(self, cal_MW = False, print_flag = True):
"""Main count function
Parameters
----------
cal_MW : bool, optional
flag to calculate molecular weight, by default False
print_flag : bool, optional
flag to print all properties, by default True
"""
# Count the types of monomers
self.count_types()
# Count the total number of monomers
self.count_monomers()
# Count the type of bonds
self.count_linkages()
# Count the number of -OCH3
self.count_OCH3()
# Count the number of available -OH
self.count_OH()
# Count the connections
self.count_connections()
# Calculate the branching coefficient
self.cal_branching()
# Calculate the molecular weight
if cal_MW:
self.cal_MW()
# Print the output as a dictionary
if print_flag:
pp.pprint(vars(self))
def get_metrics(self, additional: Optional[bool] = False, cal_MW: Optional[bool] = False) -> nparray:
"""Get the metrics array for a polymer
Parameters
----------
additional : Optional[bool], optional
include additional metrics, by default False
Returns
-------
metrics : nparray
metrics array
"""
self.cal_all(cal_MW, print_flag = False)
monomer_distribution_input = list(self.mtype_count.values())
linkage_distribution_input = list(self.linkages_count.values())
# Normalize the distribution to 0-1
monomer_distribution = np.array(monomer_distribution_input)/np.sum(monomer_distribution_input)
linkage_distribution = np.array(linkage_distribution_input)/np.sum(linkage_distribution_input)
#print(linkage_distribution_input)
#print(linkage_distribution)
#prevent bad divsion
if np.isnan(np.sum(linkage_distribution)):
linkage_distribution = np.zeros_like(linkage_distribution)
# Concatenate the metrics
self.metrics = np.concatenate((monomer_distribution, linkage_distribution), axis= 0)
# to include additional features - the branching coefficient
if additional:
metrics_additional = np.array([self.branching_coeff])
self.metrics = np.concatenate((self.metrics, metrics_additional), axis= 0)
return self.metrics
def get_counts(self, additional: Optional[bool] = False, cal_MW: Optional[bool] = False) -> nparray:
"""Get the count array for a polymer
Parameters
----------
additional : Optional[bool], optional
include additional metrics, by default False
Returns
-------
counts : nparray
counts array
"""
self.cal_all(cal_MW, print_flag = False)
monomer_counts = list(self.mtype_count.values())
linkages_counts = list(self.linkages_count.values())
# Concatenate the counts
self.counts = np.concatenate((monomer_counts, linkages_counts), axis= 0)
# to include additional features - the branching coefficient
if additional:
counts_additional = np.array([self.n_branched])
self.counts = np.concatenate((self.counts, counts_additional), axis= 0)
return self.counts
class Population():
"""
Characterize a population of polymers
"""
def __init__(
self,
population: List[Polymer],
name: Optional[str]='lignin_x',
InputPath: Optional[str] =os.path.abspath(os.getcwd()),
ResultsName: Optional[str]='results',
TrialIndex: Optional[str]=None):
"""initalize a population
Parameters
----------
population : List[Polymer]
a population of polymer objects
name : Optional[str], optional
name of the population, by default 'lignin_x'
InputPath : str, optional
the input path, by default os.path.abspath(os.getcwd())
ResultsName : str, optional
results folder name, by default 'results'
"""
self.population = population
self.name = name
# Set the directory structures
OutputLibrary = name + '_libray.csv'
OutputStats = name + '_population_stats.csv'
if TrialIndex==None:
ResultsPath = os.path.join(InputPath, ResultsName, name)
else:
ResultsPath = os.path.join(InputPath, ResultsName, name, "i"+TrialIndex)
self.OutputPathLibrary = os.path.join(ResultsPath, OutputLibrary)
self.OutputPathStats = os.path.join(ResultsPath, OutputStats)
# Set the columns names for data
metrics_names = ['branching_coeff', 'MW', 'monomer_count', 'OH_count', 'OCH3_count', 'smiles']
self.column_names = monomer_types + linkage_names + metrics_names
# Initialize the stats and data
self.characterization_objects = []
self.stats = None
self.data = None
def characterize_all(self):
"""
Characterize each individual polymer
"""
if len(self.characterization_objects) == 0:
for polymer_i in self.population:
ch_i = Characterize(polymer_i)
ch_i.cal_all(print_flag=False, cal_MW=True)
self.characterization_objects.append(ch_i)
else:
pass
def analyze(self):
"""
Analyze the population
Output the data and stats both in csv files
"""
# delete the previous csv files
ut.clean_csv_cache(self.OutputPathLibrary)
ut.clean_csv_cache(self.OutputPathStats)
self.characterize_all()
for ch_i in self.characterization_objects:
row_i = []
row_i += list(ch_i.mtype_count.values())
row_i += list(ch_i.linkages_count.values())
row_i.append(ch_i.branching_coeff)
row_i.append(ch_i.MW)
row_i.append(ch_i.monomer_count)
row_i.append(ch_i.OH_count)
row_i.append(ch_i.OCH3_count)
row_i.append(ch_i.smiles)
# write to the data output file
ut.write_output_on_individual(row_i, self.column_names, self.OutputPathLibrary)
# Calculate the population stats
population_data = pd.read_csv(self.OutputPathLibrary)
numerical_metrics = list(population_data.mean().index)
population_mean = np.array([population_data.mean()])
population_std = np.array([population_data.std()])
population_CI = np.array([ut.cal_CI(np.array(population_data[ci])) for ci in numerical_metrics])
stats = np.concatenate((population_mean, population_std, population_CI.T), axis=0)
pd_stats = pd.DataFrame(stats, index = ['mean', 'std', 'CI_lower', 'CI_upper'], columns=numerical_metrics)
pd_stats.to_csv(self.OutputPathStats) #,index_label=False)
# update the self
self.stats = pd_stats
self.data = population_data
def get_counts(self, additional: Optional[bool] = False):
"""Get the metrics matrix for the entire population
Parameters
----------
additional : Optional[bool], optional
include additional metrics, by default False
Returns
-------
metrics : nparray
metrics matrix
"""
self.characterize_all()
counts_matrix = []
for ch_i in self.characterization_objects:
counts_matrix.append(list(ch_i.get_counts(additional=additional)))
self.counts = np.array(counts_matrix)
self.counts_sum = np.sum(self.counts, axis = 0)
return self.counts
def get_metrics_mean(self, additional: Optional[bool] = False):
"""Get the metrics matrix for the entire population
Parameters
----------
additional : Optional[bool], optional
include additional metrics, by default False
Returns
-------
metrics : nparray
metrics mean array
"""
self.get_counts(additional)
MW = []
monomer_counts = []
# if the MW and monomer count have not been calculated
if self.data is None:
for ch_i in self.characterization_objects:
MW.append(ch_i.MW)
monomer_counts.append(ch_i.monomer_count)
# Extract MW and monomer count directly from the data
else:
MW = self.data['MW']
monomer_counts = self.data['monomer_count']
MW = np.array(MW)
monomer_counts = np.array(monomer_counts)
# Compute number and weight average
self.number_average_MW = ut.MW_array_to_number_average(MW)
self.weight_average_MW = ut.MW_array_to_weight_average(MW)
self.monomer_counts_average = np.mean(monomer_counts)
# Construct an array for the means
metric_mean = list(ut.counts_to_metrics(self.counts_sum, additional= additional))
metric_mean += [self.number_average_MW, self.weight_average_MW, self.monomer_counts_average]
self.metric_mean = np.array(metric_mean)
self.metrics_mean_dict = {}
column_names_population = monomer_types + linkage_names
if additional:
column_names_population += ['branching_coeff']
column_names_population += ['MW', 'MW_weighted', 'monomer_count']
for ci, mi in zip(column_names_population, self.metric_mean):
self.metrics_mean_dict[ci] = mi
return self.metrics_mean_dict
|
the-stack_106_29138 | from common import (
checks,
host_helpers,
plugintools,
)
SERVICES = ["etcdctl",
"calicoctl",
"flanneld",
"kubectl2",
"kubelet",
"containerd-shim",
"containerd",
"dockerd",
"kubelet",
"kube-proxy",
"calico-node",
]
# Snaps that only exist in a K8s deployment
SNAPS_K8S = [r'charm[\S]+',
r'conjure-up',
r'cdk-addons',
r'helm',
r'kubernetes-[\S]+',
r'kube-[\S]+',
r'kubectl',
r'kubelet',
r'kubeadm',
r'kubefed',
]
# Snaps that are used in a K8s deployment
SNAPS_DEPS = [r'core[0-9]*',
r'docker',
r'go',
r'vault',
r'etcd',
]
class KubernetesChecksBase(plugintools.PluginPartBase,
checks.ServiceChecksBase):
def __init__(self):
super().__init__(SERVICES, hint_range=(0, 3))
self.nethelp = host_helpers.HostNetworkingHelper()
@property
def flannel_ports(self):
ports = []
for port in self.nethelp.host_interfaces:
if "flannel" in port.name:
ports.append(port)
return ports
@property
def bind_interfaces(self):
"""
Fetch interfaces used by Kubernetes.
"""
return {'flannel': self.flannel_ports}
|
the-stack_106_29139 | import argparse
import torch
import torchvision
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))
])
def training_set():
dataset = torchvision.datasets.MNIST('data', train=True, download=True, transform=transform)
return dataset
def test_set():
dataset = torchvision.datasets.MNIST('data', train=False, transform=transform)
return dataset
def main():
parser = argparse.ArgumentParser(description='MNIST PyTorch: Download Dataset')
args = parser.parse_args()
print('Downloading MNIST dataset...')
# Train set.
print('Train Set')
trainset = training_set()
print(trainset)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=True)
train_data = enumerate(train_loader)
batch, (data, target) = next(train_data)
print('Shape: ' + str(data.shape))
# Test set.
print('Test Set')
testset = test_set()
print(testset)
test_loader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True)
test_data = enumerate(test_loader)
batch, (data, target) = next(test_data)
print('Shape: ' + str(data.shape))
if __name__ == '__main__':
main() |
the-stack_106_29141 | class Solution:
"""
@param x: an integer
@param y: an integer
@return: return an integer, denote the Hamming Distance between two integers
"""
def hammingDistance(self, x, y):
# write your code here
count=0
xor=x^y
while xor:
if xor%2!=0:
count+=1
xor=xor>>1
return count |
the-stack_106_29142 | # Copyright 2019 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Code to download and parse the Kinetics dataset for TensorFlow models.
The [Kinetics data set](
https://deepmind.com/research/open-source/open-source-datasets/kinetics/)
is a data set for human action recognition maintained by DeepMind and Google.
This script downloads the annotations and prepares data from similar annotations
if local video files are available.
This script does not provide any means of accessing YouTube videos.
Running this code as a module generates the data set on disk. First, the
required files are downloaded (_download_data) which enables constructing the
label map. Then (in generate_examples), for each split in the data set, the
metadata is generated from the annotations for each example
(_generate_metadata), and MediaPipe is used to fill in the video frames
(_run_mediapipe). This script processes local video files defined in a custom
CSV in a comparable manner to the Kinetics data set for evaluating and
predicting values on your own data. The data set is written to disk as a set of
numbered TFRecord files.
The custom CSV format must match the Kinetics data set format, with columns
corresponding to [[label_name], video, start, end, split] followed by lines with
those fields. (Label_name is optional.) These field names can be used to
construct the paths to the video files using the Python string formatting
specification and the video_path_format_string flag:
--video_path_format_string="/path/to/video/{video}.mp4"
Generating the data on disk can take considerable time and disk space.
(Image compression quality is the primary determiner of disk usage. TVL1 flow
determines runtime.)
Once the data is on disk, reading the data as a tf.data.Dataset is accomplished
with the following lines:
kinetics = Kinetics("kinetics_data_path")
dataset = kinetics.as_dataset("custom")
# implement additional processing and batching here
images_and_labels = dataset.make_one_shot_iterator().get_next()
images = images_and_labels["images"]
labels = image_and_labels["labels"]
This data is structured for per-clip action classification where images is
the sequence of images and labels are a one-hot encoded value. See
as_dataset() for more details.
Note that the number of videos changes in the data set over time, so it will
likely be necessary to change the expected number of examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import csv
import os
import random
import subprocess
import sys
import tarfile
import tempfile
import urllib
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from mediapipe.util.sequence import media_sequence as ms
CITATION = r"""@article{kay2017kinetics,
title={The kinetics human action video dataset},
author={Kay, Will and Carreira, Joao and Simonyan, Karen and Zhang, Brian and Hillier, Chloe and Vijayanarasimhan, Sudheendra and Viola, Fabio and Green, Tim and Back, Trevor and Natsev, Paul and others},
journal={arXiv preprint arXiv:1705.06950},
year={2017},
url = {https://deepmind.com/research/open-source/kinetics},
}"""
ANNOTATION_URL = "https://storage.googleapis.com/deepmind-media/Datasets/kinetics700.tar.gz"
SECONDS_TO_MICROSECONDS = 1000000
GRAPHS = ["tvl1_flow_and_rgb_from_file.pbtxt"]
FILEPATTERN = "kinetics_700_%s_25fps_rgb_flow"
SPLITS = {
"train": {
"shards": 1000,
"examples": 540247
},
"validate": {
"shards": 100,
"examples": 34610
},
"test": {
"shards": 100,
"examples": 69103
},
"custom": {
"csv": None, # Add a CSV for your own data here.
"shards": 1, # Change this number to increase sharding.
"examples": -1
}, # Negative 1 allows any number of examples.
}
NUM_CLASSES = 700
class Kinetics(object):
"""Generates and loads the Kinetics data set."""
def __init__(self, path_to_data):
if not path_to_data:
raise ValueError("You must supply the path to the data directory.")
self.path_to_data = path_to_data
def as_dataset(self, split, shuffle=False, repeat=False,
serialized_prefetch_size=32, decoded_prefetch_size=32,
parse_labels=True):
"""Returns Kinetics as a tf.data.Dataset.
After running this function, calling padded_batch() on the Dataset object
will produce batches of data, but additional preprocessing may be desired.
If using padded_batch, the indicator_matrix output distinguishes valid
from padded frames.
Args:
split: either "train" or "test"
shuffle: if true, shuffles both files and examples.
repeat: if true, repeats the data set forever.
serialized_prefetch_size: the buffer size for reading from disk.
decoded_prefetch_size: the buffer size after decoding.
parse_labels: if true, also returns the "labels" below. The only
case where this should be false is if the data set was not constructed
with a label map, resulting in this field being missing.
Returns:
A tf.data.Dataset object with the following structure: {
"images": float tensor, shape [time, height, width, channels]
"flow": float tensor, shape [time, height, width, 2]
"num_frames": int32 tensor, shape [], number of frames in the sequence
"labels": float32 tensor, shape [num_classes], one hot encoded. Only
present if parse_labels is true.
"""
logging.info("If you see an error about labels, and you don't supply "
"labels in your CSV, set parse_labels=False")
def parse_fn(sequence_example):
"""Parses a Kinetics example."""
context_features = {
ms.get_example_id_key(): ms.get_example_id_default_parser(),
}
if parse_labels:
context_features[
ms.get_clip_label_string_key()] = tf.FixedLenFeature((), tf.string)
context_features[
ms.get_clip_label_index_key()] = tf.FixedLenFeature((), tf.int64)
sequence_features = {
ms.get_image_encoded_key(): ms.get_image_encoded_default_parser(),
ms.get_forward_flow_encoded_key():
ms.get_forward_flow_encoded_default_parser(),
}
parsed_context, parsed_sequence = tf.io.parse_single_sequence_example(
sequence_example, context_features, sequence_features)
images = tf.image.convert_image_dtype(
tf.map_fn(tf.image.decode_jpeg,
parsed_sequence[ms.get_image_encoded_key()],
back_prop=False,
dtype=tf.uint8), tf.float32)
num_frames = tf.shape(images)[0]
flow = tf.image.convert_image_dtype(
tf.map_fn(tf.image.decode_jpeg,
parsed_sequence[ms.get_forward_flow_encoded_key()],
back_prop=False,
dtype=tf.uint8), tf.float32)
# The flow is quantized for storage in JPEGs by the FlowToImageCalculator.
# The quantization needs to be inverted.
flow = (flow[:, :, :, :2] - 0.5) * 2 * 20.
output_dict = {
"images": images,
"flow": flow,
"num_frames": num_frames,
}
if parse_labels:
target = tf.one_hot(parsed_context[ms.get_clip_label_index_key()], 700)
output_dict["labels"] = target
return output_dict
if split not in SPLITS:
raise ValueError("Split %s not in %s" % split, str(SPLITS.keys()))
all_shards = tf.io.gfile.glob(
os.path.join(self.path_to_data, FILEPATTERN % split + "-*-of-*"))
random.shuffle(all_shards)
all_shards_dataset = tf.data.Dataset.from_tensor_slices(all_shards)
cycle_length = min(16, len(all_shards))
dataset = all_shards_dataset.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=cycle_length,
block_length=1, sloppy=True,
buffer_output_elements=serialized_prefetch_size))
dataset = dataset.prefetch(serialized_prefetch_size)
if shuffle:
dataset = dataset.shuffle(serialized_prefetch_size)
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(parse_fn)
dataset = dataset.prefetch(decoded_prefetch_size)
return dataset
def generate_examples(self, path_to_mediapipe_binary,
path_to_graph_directory,
only_generate_metadata=False,
splits_to_process="train,val,test",
video_path_format_string=None,
download_labels_for_map=True):
"""Downloads data and generates sharded TFRecords.
Downloads the data files, generates metadata, and processes the metadata
with MediaPipe to produce tf.SequenceExamples for training. The resulting
files can be read with as_dataset(). After running this function the
original data files can be deleted.
Args:
path_to_mediapipe_binary: Path to the compiled binary for the BUILD target
mediapipe/examples/desktop/demo:media_sequence_demo.
path_to_graph_directory: Path to the directory with MediaPipe graphs in
mediapipe/graphs/media_sequence/.
only_generate_metadata: If true, do not run mediapipe and write the
metadata to disk instead.
splits_to_process: csv string of which splits to process. Allows providing
a custom CSV with the CSV flag. The original data is still downloaded
to generate the label_map.
video_path_format_string: The format string for the path to local files.
download_labels_for_map: If true, download the annotations to create the
label map.
"""
if not path_to_mediapipe_binary:
raise ValueError(
"You must supply the path to the MediaPipe binary for "
"mediapipe/examples/desktop/demo:media_sequence_demo.")
if not path_to_graph_directory:
raise ValueError(
"You must supply the path to the directory with MediaPipe graphs in "
"mediapipe/graphs/media_sequence/.")
logging.info("Downloading data.")
download_output = self._download_data(download_labels_for_map)
for key in splits_to_process.split(","):
logging.info("Generating metadata for split: %s", key)
all_metadata = list(self._generate_metadata(
key, download_output, video_path_format_string))
logging.info("An example of the metadata: ")
logging.info(all_metadata[0])
random.seed(47)
random.shuffle(all_metadata)
shards = SPLITS[key]["shards"]
shard_names = [os.path.join(
self.path_to_data, FILEPATTERN % key + "-%05d-of-%05d" % (
i, shards)) for i in range(shards)]
writers = [tf.io.TFRecordWriter(shard_name) for shard_name in shard_names]
with _close_on_exit(writers) as writers:
for i, seq_ex in enumerate(all_metadata):
if not only_generate_metadata:
print("Processing example %d of %d (%d%%) \r" % (
i, len(all_metadata), i * 100 / len(all_metadata)), end="")
for graph in GRAPHS:
graph_path = os.path.join(path_to_graph_directory, graph)
seq_ex = self._run_mediapipe(
path_to_mediapipe_binary, seq_ex, graph_path)
writers[i % len(writers)].write(seq_ex.SerializeToString())
logging.info("Data extraction complete.")
def _generate_metadata(self, key, download_output,
video_path_format_string=None):
"""For each row in the annotation CSV, generates the corresponding metadata.
Args:
key: which split to process.
download_output: the tuple output of _download_data containing
- annotations_files: dict of keys to CSV annotation paths.
- label_map: dict mapping from label strings to numeric indices.
video_path_format_string: The format string for the path to local files.
Yields:
Each tf.SequenceExample of metadata, ready to pass to MediaPipe.
"""
annotations_files, label_map = download_output
with open(annotations_files[key], "r") as annotations:
reader = csv.reader(annotations)
for i, csv_row in enumerate(reader):
if i == 0: # the first row is the header
continue
# rename the row with a constitent set of names.
if len(csv_row) == 5:
row = dict(zip(["label_name", "video", "start", "end", "split"],
csv_row))
else:
row = dict(zip(["video", "start", "end", "split"],
csv_row))
metadata = tf.train.SequenceExample()
ms.set_example_id(bytes23(row["video"] + "_" + row["start"]),
metadata)
ms.set_clip_media_id(bytes23(row["video"]), metadata)
ms.set_clip_alternative_media_id(bytes23(row["split"]), metadata)
if video_path_format_string:
filepath = video_path_format_string.format(**row)
ms.set_clip_data_path(bytes23(filepath), metadata)
assert row["start"].isdigit(), "Invalid row: %s" % str(row)
assert row["end"].isdigit(), "Invalid row: %s" % str(row)
if "label_name" in row:
ms.set_clip_label_string([bytes23(row["label_name"])], metadata)
if label_map:
ms.set_clip_label_index([label_map[row["label_name"]]], metadata)
yield metadata
def _download_data(self, download_labels_for_map):
"""Downloads and extracts data if not already available."""
if sys.version_info >= (3, 0):
urlretrieve = urllib.request.urlretrieve
else:
urlretrieve = urllib.urlretrieve
logging.info("Creating data directory.")
tf.io.gfile.makedirs(self.path_to_data)
logging.info("Downloading annotations.")
paths = {}
if download_labels_for_map:
tar_path = os.path.join(self.path_to_data, ANNOTATION_URL.split("/")[-1])
if not tf.io.gfile.exists(tar_path):
urlretrieve(ANNOTATION_URL, tar_path)
with tarfile.open(tar_path) as annotations_tar:
annotations_tar.extractall(self.path_to_data)
for split in ["train", "test", "validate"]:
csv_path = os.path.join(self.path_to_data, "kinetics700/%s.csv" % split)
if not tf.io.gfile.exists(csv_path):
with tarfile.open(tar_path) as annotations_tar:
annotations_tar.extractall(self.path_to_data)
paths[split] = csv_path
for split, contents in SPLITS.items():
if "csv" in contents and contents["csv"]:
paths[split] = contents["csv"]
label_map = (self.get_label_map_and_verify_example_counts(paths) if
download_labels_for_map else None)
return paths, label_map
def _run_mediapipe(self, path_to_mediapipe_binary, sequence_example, graph):
"""Runs MediaPipe over MediaSequence tf.train.SequenceExamples.
Args:
path_to_mediapipe_binary: Path to the compiled binary for the BUILD target
mediapipe/examples/desktop/demo:media_sequence_demo.
sequence_example: The SequenceExample with metadata or partial data file.
graph: The path to the graph that extracts data to add to the
SequenceExample.
Returns:
A copy of the input SequenceExample with additional data fields added
by the MediaPipe graph.
Raises:
RuntimeError: if MediaPipe returns an error or fails to run the graph.
"""
if not path_to_mediapipe_binary:
raise ValueError("--path_to_mediapipe_binary must be specified.")
input_fd, input_filename = tempfile.mkstemp()
output_fd, output_filename = tempfile.mkstemp()
cmd = [path_to_mediapipe_binary,
"--calculator_graph_config_file=%s" % graph,
"--input_side_packets=input_sequence_example=%s" % input_filename,
"--output_side_packets=output_sequence_example=%s" % output_filename]
with open(input_filename, "wb") as input_file:
input_file.write(sequence_example.SerializeToString())
mediapipe_output = subprocess.check_output(cmd)
if b"Failed to run the graph" in mediapipe_output:
raise RuntimeError(mediapipe_output)
with open(output_filename, "rb") as output_file:
output_example = tf.train.SequenceExample()
output_example.ParseFromString(output_file.read())
os.close(input_fd)
os.remove(input_filename)
os.close(output_fd)
os.remove(output_filename)
return output_example
def get_label_map_and_verify_example_counts(self, paths):
"""Verify the number of examples and labels have not changed."""
for name, path in paths.items():
with open(path, "r") as f:
lines = f.readlines()
# the header adds one line and one "key".
num_examples = len(lines) - 1
keys = [l.split(",")[0] for l in lines]
label_map = None
if name == "train":
classes = sorted(list(set(keys[1:])))
num_keys = len(set(keys)) - 1
assert NUM_CLASSES == num_keys, (
"Found %d labels for split: %s, should be %d" % (
num_keys, name, NUM_CLASSES))
label_map = dict(zip(classes, range(len(classes))))
if SPLITS[name]["examples"] > 0:
assert SPLITS[name]["examples"] == num_examples, (
"Found %d examples for split: %s, should be %d" % (
num_examples, name, SPLITS[name]["examples"]))
return label_map
def bytes23(string):
"""Creates a bytes string in either Python 2 or 3."""
if sys.version_info >= (3, 0):
return bytes(string, "utf8")
else:
return bytes(string)
@contextlib.contextmanager
def _close_on_exit(writers):
"""Call close on all writers on exit."""
try:
yield writers
finally:
for writer in writers:
writer.close()
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if flags.FLAGS.path_to_custom_csv:
SPLITS["custom"]["csv"] = flags.FLAGS.path_to_custom_csv
Kinetics(flags.FLAGS.path_to_kinetics_data).generate_examples(
flags.FLAGS.path_to_mediapipe_binary,
flags.FLAGS.path_to_graph_directory,
flags.FLAGS.only_generate_metadata,
flags.FLAGS.splits_to_process,
flags.FLAGS.video_path_format_string,
flags.FLAGS.download_labels_for_map)
if __name__ == "__main__":
flags.DEFINE_string("path_to_kinetics_data",
"",
"Path to directory to write data to.")
flags.DEFINE_string("path_to_mediapipe_binary",
"",
"Path to the MediaPipe run_graph_file_io_main binary.")
flags.DEFINE_string("path_to_graph_directory",
"",
"Path to directory containing the graph files.")
flags.DEFINE_boolean("only_generate_metadata",
False,
"If true, only generate the metadata files.")
flags.DEFINE_boolean("download_labels_for_map",
True,
"If true, download the annotations to construct the "
"label map.")
flags.DEFINE_string("splits_to_process",
"custom",
"Process these splits. Useful for custom data splits.")
flags.DEFINE_string("video_path_format_string",
None,
"The format string for the path to local video files. "
"Uses the Python string.format() syntax with possible "
"arguments of {video}, {start}, {end}, {label_name}, and "
"{split}, corresponding to columns of the data csvs.")
flags.DEFINE_string("path_to_custom_csv",
None,
"If present, processes this CSV as a custom split.")
app.run(main)
|
the-stack_106_29144 | import cv2
import json
'''
Tool to visualize crowd counting ground truth annotations.
Input:
json_path: the path to json annotation files
json format: {
"JPG_FILE_PATH":[
{"x": x_coordinate,
"y": x_coordinate
},
{"x": x_coordinate,
"y": x_coordinate
},
...
],
...
}
scale: image zoom scale
is_show: is show image with annotation or not
is_save: is save image with annotation or not
'''
def cc_visualize(json_path,
scale=0.5,
is_show=False,
is_save=True):
# JSON reading
with open(json_path, 'r') as load_f:
load_dict = json.load(load_f)
total_point_num = 0
for img_path in load_dict:
pointList = load_dict[img_path]
print('image path: ', img_path)
print('total number of points: ', len(pointList))
total_point_num += len(pointList)
# load img and resize
img = cv2.imread(img_path)
height, width = img.shape[:2]
img = cv2.resize(img, (int(width * scale), int(height * scale)), interpolation=cv2.INTER_AREA)
height, width = img.shape[:2]
for point in pointList:
cv2.circle(img, (int(point['x'] * width), int(point['y'] * height)),
radius=3, color=(0, 0, 255), thickness=-1)
if is_show:
cv2.imshow('img', img)
cv2.waitKey(0)
if is_save:
cv2.imwrite('Pointed_' + img_path.replace('/', '_'), img, [int(cv2.IMWRITE_JPEG_QUALITY), 75])
print('Finished! Total point number:', total_point_num)
|
the-stack_106_29145 | # -*- coding=utf-8 -*-
#!/usr/bin/python3
import math
import tensorflow as tf
from tensorflow.keras import backend as K
from yolo3.postprocess import yolo3_decode
def softmax_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25):
"""
Compute softmax focal loss.
Reference Paper:
"Focal Loss for Dense Object Detection"
https://arxiv.org/abs/1708.02002
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, num_classes).
y_pred: Predicted logits,
tensor of shape (?, num_boxes, num_classes).
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
# Returns
softmax_focal_loss: Softmax focal loss, tensor of shape (?, num_boxes).
"""
# Scale predictions so that the class probas of each sample sum to 1
#y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# Clip the prediction value to prevent NaN's and Inf's
#epsilon = K.epsilon()
#y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
y_pred = tf.nn.softmax(y_pred)
y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)
# Calculate Cross Entropy
cross_entropy = -y_true * tf.math.log(y_pred)
# Calculate Focal Loss
softmax_focal_loss = alpha * tf.pow(1 - y_pred, gamma) * cross_entropy
return softmax_focal_loss
def sigmoid_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25):
"""
Compute sigmoid focal loss.
Reference Paper:
"Focal Loss for Dense Object Detection"
https://arxiv.org/abs/1708.02002
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, num_classes).
y_pred: Predicted logits,
tensor of shape (?, num_boxes, num_classes).
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
# Returns
sigmoid_focal_loss: Sigmoid focal loss, tensor of shape (?, num_boxes).
"""
sigmoid_loss = K.binary_crossentropy(y_true, y_pred, from_logits=True)
pred_prob = tf.sigmoid(y_pred)
p_t = ((y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob)))
modulating_factor = tf.pow(1.0 - p_t, gamma)
alpha_weight_factor = (y_true * alpha + (1 - y_true) * (1 - alpha))
sigmoid_focal_loss = modulating_factor * alpha_weight_factor * sigmoid_loss
#sigmoid_focal_loss = tf.reduce_sum(sigmoid_focal_loss, axis=-1)
return sigmoid_focal_loss
def box_iou(b1, b2):
"""
Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
"""
# Expand dim to apply broadcasting.
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# Expand dim to apply broadcasting.
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def box_giou(b_true, b_pred):
"""
Calculate GIoU loss on anchor boxes
Reference Paper:
"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression"
https://arxiv.org/abs/1902.09630
Parameters
----------
b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
Returns
-------
giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh/2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh/2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b_true_area + b_pred_area - intersect_area
# calculate IoU, add epsilon in denominator to avoid dividing by 0
iou = intersect_area / (union_area + K.epsilon())
# get enclosed area
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
# calculate GIoU, add epsilon in denominator to avoid dividing by 0
giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon())
giou = K.expand_dims(giou, -1)
return giou
def box_diou(b_true, b_pred, use_ciou=False):
"""
Calculate DIoU/CIoU loss on anchor boxes
Reference Paper:
"Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression"
https://arxiv.org/abs/1911.08287
Parameters
----------
b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
use_ciou: bool flag to indicate whether to use CIoU loss type
Returns
-------
diou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh/2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh/2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b_true_area + b_pred_area - intersect_area
# calculate IoU, add epsilon in denominator to avoid dividing by 0
iou = intersect_area / (union_area + K.epsilon())
# box center distance
center_distance = K.sum(K.square(b_true_xy - b_pred_xy), axis=-1)
# get enclosed area
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
# get enclosed diagonal distance
enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)
# calculate DIoU, add epsilon in denominator to avoid dividing by 0
diou = iou - 1.0 * (center_distance) / (enclose_diagonal + K.epsilon())
if use_ciou:
# calculate param v and alpha to extend to CIoU
v = 4*K.square(tf.math.atan2(b_true_wh[..., 0], b_true_wh[..., 1]) - tf.math.atan2(b_pred_wh[..., 0], b_pred_wh[..., 1])) / (math.pi * math.pi)
# a trick: here we add an non-gradient coefficient w^2+h^2 to v to customize it's back-propagate,
# to match related description for equation (12) in original paper
#
#
# v'/w' = (8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (h/(w^2+h^2)) (12)
# v'/h' = -(8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (w/(w^2+h^2))
#
# The dominator w^2+h^2 is usually a small value for the cases
# h and w ranging in [0; 1], which is likely to yield gradient
# explosion. And thus in our implementation, the dominator
# w^2+h^2 is simply removed for stable convergence, by which
# the step size 1/(w^2+h^2) is replaced by 1 and the gradient direction
# is still consistent with Eqn. (12).
v = v * tf.stop_gradient(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])
alpha = v / (1.0 - iou + v)
diou = diou - alpha*v
diou = K.expand_dims(diou, -1)
return diou
def _smooth_labels(y_true, label_smoothing):
label_smoothing = K.constant(label_smoothing, dtype=K.floatx())
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
def yolo3_loss(args, anchors, num_classes, ignore_thresh=.5, label_smoothing=0, elim_grid_sense=False, use_focal_loss=False, use_focal_obj_loss=False, use_softmax_loss=False, use_giou_loss=False, use_diou_loss=True):
'''
YOLOv3 loss function.
Parameters
----------
yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
y_true: list of array, the output of preprocess_true_boxes
anchors: array, shape=(N, 2), wh
num_classes: integer
ignore_thresh: float, the iou threshold whether to ignore object confidence loss
Returns
-------
loss: tensor, shape=(1,)
'''
num_layers = len(anchors)//3 # default setting
yolo_outputs = args[:num_layers]
y_true = args[num_layers:]
if num_layers == 3:
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
scale_x_y = [1.05, 1.1, 1.2] if elim_grid_sense else [None, None, None]
else:
anchor_mask = [[3,4,5], [0,1,2]]
scale_x_y = [1.05, 1.05] if elim_grid_sense else [None, None]
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
loss = 0
total_location_loss = 0
total_confidence_loss = 0
total_class_loss = 0
batch_size = K.shape(yolo_outputs[0])[0] # batch size, tensor
batch_size_f = K.cast(batch_size, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
if label_smoothing:
true_class_probs = _smooth_labels(true_class_probs, label_smoothing)
true_objectness_probs = _smooth_labels(object_mask, label_smoothing)
else:
true_objectness_probs = object_mask
grid, raw_pred, pred_xy, pred_wh = yolo3_decode(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, scale_x_y=scale_x_y[l], calc_loss=True)
pred_box = K.concatenate([pred_xy, pred_wh])
# Darknet raw box to calculate loss.
raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
# Find ignore mask, iterate over each of batch.
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
def loop_body(b, ignore_mask):
true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
iou = box_iou(pred_box[b], true_box)
best_iou = K.max(iou, axis=-1)
ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
return b+1, ignore_mask
_, ignore_mask = tf.while_loop(lambda b,*args: b<batch_size, loop_body, [0, ignore_mask])
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
if use_focal_obj_loss:
# Focal loss for objectness confidence
confidence_loss = sigmoid_focal_loss(true_objectness_probs, raw_pred[...,4:5])
else:
confidence_loss = object_mask * K.binary_crossentropy(true_objectness_probs, raw_pred[...,4:5], from_logits=True)+ \
(1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
if use_focal_loss:
# Focal loss for classification score
if use_softmax_loss:
class_loss = softmax_focal_loss(true_class_probs, raw_pred[...,5:])
else:
class_loss = sigmoid_focal_loss(true_class_probs, raw_pred[...,5:])
else:
if use_softmax_loss:
# use softmax style classification output
class_loss = object_mask * K.expand_dims(K.categorical_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True), axis=-1)
else:
# use sigmoid style classification output
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
if use_giou_loss:
# Calculate GIoU loss as location loss
raw_true_box = y_true[l][...,0:4]
giou = box_giou(raw_true_box, pred_box)
giou_loss = object_mask * box_loss_scale * (1 - giou)
giou_loss = K.sum(giou_loss) / batch_size_f
location_loss = giou_loss
elif use_diou_loss:
# Calculate DIoU loss as location loss
raw_true_box = y_true[l][...,0:4]
diou = box_diou(raw_true_box, pred_box)
diou_loss = object_mask * box_loss_scale * (1 - diou)
diou_loss = K.sum(diou_loss) / batch_size_f
location_loss = diou_loss
else:
# Standard YOLOv3 location loss
# K.binary_crossentropy is helpful to avoid exp overflow.
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
xy_loss = K.sum(xy_loss) / batch_size_f
wh_loss = K.sum(wh_loss) / batch_size_f
location_loss = xy_loss + wh_loss
confidence_loss = K.sum(confidence_loss) / batch_size_f
class_loss = K.sum(class_loss) / batch_size_f
loss += location_loss + confidence_loss + class_loss
total_location_loss += location_loss
total_confidence_loss += confidence_loss
total_class_loss += class_loss
# Fit for tf 2.0.0 loss shape
loss = K.expand_dims(loss, axis=-1)
return loss, total_location_loss, total_confidence_loss, total_class_loss
|
the-stack_106_29146 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PacketCaptureQueryStatusResult(Model):
"""Status of packet capture session.
:param name: The name of the packet capture resource.
:type name: str
:param id: The ID of the packet capture resource.
:type id: str
:param capture_start_time: The start time of the packet capture session.
:type capture_start_time: datetime
:param packet_capture_status: The status of the packet capture session.
Possible values include: 'NotStarted', 'Running', 'Stopped', 'Error',
'Unknown'
:type packet_capture_status: str or
~azure.mgmt.network.v2017_03_01.models.PcStatus
:param stop_reason: The reason the current packet capture session was
stopped.
:type stop_reason: str
:param packet_capture_error: List of errors of packet capture session.
:type packet_capture_error: list[str or
~azure.mgmt.network.v2017_03_01.models.PcError]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'capture_start_time': {'key': 'captureStartTime', 'type': 'iso-8601'},
'packet_capture_status': {'key': 'packetCaptureStatus', 'type': 'str'},
'stop_reason': {'key': 'stopReason', 'type': 'str'},
'packet_capture_error': {'key': 'packetCaptureError', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(PacketCaptureQueryStatusResult, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.capture_start_time = kwargs.get('capture_start_time', None)
self.packet_capture_status = kwargs.get('packet_capture_status', None)
self.stop_reason = kwargs.get('stop_reason', None)
self.packet_capture_error = kwargs.get('packet_capture_error', None)
|
the-stack_106_29147 | # Copyright (c) 2022, RC and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, _dict
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
columns = [
{
"fieldname": "id",
"fieldtype": "Link",
"label": "ID",
"options": "Sales Order",
"width": 90
},
{
"fieldname": "salesman",
"fieldtype": "Data",
"label": "Salesman",
"width": 200
},
{
"fieldname": "customer",
"fieldtype": "Link",
"label": "Customer",
"options": "Customer",
"width": 100
},
{
"fieldname": "customer_name",
"fieldtype": "Data",
"label": "Customer Name",
"width": 200
},
{
"fieldname": "date",
"fieldtype": "Date",
"label": "Date",
"width": 100
},
{
"fieldname": "total",
"fieldtype": "Currency",
"label": "Total",
"width": 120
},
{
"fieldname": "payment",
"fieldtype": "Currency",
"label": "Payment",
"width": 120
},
{
"fieldname": "returns",
"fieldtype": "Currency",
"label": "Returns",
"width": 120
},
{
"fieldname": "pending",
"fieldtype": "Currency",
"label": "Pending",
"width": 120
},
{
"fieldname": "status",
"fieldtype": "Data",
"label": "Status",
"width": 120
},
{
"fieldname": "delivery_status",
"fieldtype": "Data",
"label": "Delivery",
"width": 120
},
{
"fieldname": "billing",
"fieldtype": "Data",
"label": "Billing",
"width": 120
},
{
"fieldname": "delivered",
"fieldtype": "Data",
"label": "Delivered",
"width": 120
},
{
"fieldname": "billed_percent",
"fieldtype": "Data",
"label": "Billed Percent",
"width": 120
},
{
"fieldname": "owner",
"fieldtype": "Data",
"label": "Owner",
"width": 120
},
{
"fieldname": "allow_delivery",
"fieldtype": "Check",
"label": "Allow Delivery",
"width": 100
},
{
"fieldname": "delivery_comments",
"fieldtype": "Data",
"label": "Delivery Comments",
"width": 200
}
]
return columns
def get_data(filters):
customer_filter = ""
if not (filters.get("customer_group")):
frappe.throw(_("Please set Customer Group First"))
else:
customer_filter = """ and customer_group in (select name from `tabCustomer Group`
where lft>=(select lft from `tabCustomer Group` where name = '{0}')
and rgt<=(select rgt from `tabCustomer Group` where name = '{0}' ))""".format(filters.get("customer_group"))
query = """select
so.name,
so.customer,
so.customer_name,
so.transaction_date,
so.rounded_total,
(select
sum(per.allocated_amount)
from `tabPayment Entry` as pe
left join `tabPayment Entry Reference` per on per.parent = pe.name
where pe.docstatus=1 and per.sales_order = so.name
group by per.sales_order) as payment,
(select
sum(si2.rounded_total)
from `tabSales Invoice` as si2
where si2.docstatus=1 and si2.is_return=1 and si2.cust_sales_order_number = so.name
group by si2.cust_sales_order_number) as returns,
so.status,
so.delivery_status,
so.billing_status,
so.per_delivered,
so.per_billed,
so.owner,
so.allow_delivery,
so.delivery_approval_comments
from `tabSales Order` as so
where so.docstatus = 1 and so.status != 'Closed' {0}""".format(customer_filter)
if filters.get('from_date'):
query += " and so.transaction_date >= '{0}'".format(filters.get('from_date'))
if filters.get('to_date'):
query += " and so.transaction_date <= '{0}'".format(filters.get('to_date'))
if filters.get('created_by'):
query += " and so.owner = '{0}'".format(filters.get('created_by'))
result = frappe.db.sql(query,as_dict=True)
data = []
for row in result:
row = {
"id": row.name,
"salesman": frappe.db.get_value('User', {'email': row.owner}, 'full_name'),
"customer": row.customer,
"customer_name": row.customer_name,
"date": row.transaction_date,
"total": row.rounded_total,
"payment": row.payment,
"returns": row.returns,
"pending": (row.rounded_total if row.rounded_total else 0) - (-(row.returns) if row.returns else 0) - (row.payment if row.payment else 0),
"status": row.status,
"delivery_status": row.delivery_status,
"billing": row.billing_status,
"delivered": row.per_delivered,
"billing_percent": row.per_billed,
"owner": row.owner,
"allow_delivery": row.allow_delivery,
"delivery_comments": row.delivery_approval_comments
}
data.append(row)
return data |
the-stack_106_29149 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 23:09:15 2018
@author: vinayak
"""
from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
from scipy import ndimage, misc
from scipy.misc import imsave
K.set_image_data_format('channels_first')
import cv2
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from fr_utils import *
from inception_blocks_v2 import *
#%matplotlib inline
#%load_ext autoreload
#%autoreload 2
np.set_printoptions(threshold=np.nan)
FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,positive)))
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,negative)))
# Step 3: subtract the two previous distances and add alpha.
basic_loss = pos_dist - neg_dist + alpha
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.maximum(basic_loss,0.0)
### END CODE HERE ###
return loss
with tf.Session() as test:
tf.set_random_seed(1)
y_true = (None, None, None)
y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),
tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),
tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))
loss = triplet_loss(y_true, y_pred)
print("loss = " + str(loss.eval()))
FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])
load_weights_from_FaceNet(FRmodel)
database = {}
database["danielle"] = img_to_encoding("images/danielle.png", FRmodel)
database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
database["tian"] = img_to_encoding("images/tian.jpg", FRmodel)
database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
database["kian"] = img_to_encoding("images/kian.jpg", FRmodel)
database["dan"] = img_to_encoding("images/dan.jpg", FRmodel)
database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
database["bertrand"] = img_to_encoding("images/bertrand.jpg", FRmodel)
database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
database["felix"] = img_to_encoding("images/felix.jpg", FRmodel)
database["benoit"] = img_to_encoding("images/benoit.jpg", FRmodel)
database["arnaud"] = img_to_encoding("images/arnaud.jpg", FRmodel)
# GRADED FUNCTION: verify
#FACE VERIFICATION
def verify(image_path, identity, database, model):
"""
Function that verifies if the person on the "image_path" image is "identity".
Arguments:
image_path -- path to an image
identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
model -- your Inception model instance in Keras
Returns:
dist -- distance between the image_path and the image of "identity" in the database.
door_open -- True, if the door should open. False otherwise.
"""
# Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (โ 1 line)
encoding = img_to_encoding(image_path, FRmodel)
# Step 2: Compute distance with identity's image (โ 1 line)
dist = np.linalg.norm(database[identity]-encoding)
# Step 3: Open the door if dist < 0.7, else don't open (โ 3 lines)
if dist < 0.7:
print("It's " + str(identity) + ", welcome home!")
door_open = None
else:
print("It's not " + str(identity) + ", please go away")
door_open = None
return dist, door_open
verify("images/camera_0.jpg", "younes", database, FRmodel)
verify("images/camera_2.jpg", "kian", database, FRmodel)
#FACE RECOGNITION
# GRADED FUNCTION: who_is_it
def who_is_it(image_path, database, model):
"""
Implements face recognition for the happy house by finding who is the person on the image_path image.
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (โ 1 line)
encoding = img_to_encoding(image_path, FRmodel)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (โ1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (โ 1 line)
dist = np.linalg.norm(db_enc-encoding)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (โ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
if min_dist > 0.7:
print("Not in the database.")
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
who_is_it('images/vinayak.jpg', database, FRmodel)
|
the-stack_106_29151 | __all__ = [
'RegressionRandomIndexComponent',
'RegressionPredictionSummaryComponent',
'PredictedVsActualComponent',
'ResidualsComponent',
'RegressionVsColComponent',
'RegressionModelSummaryComponent',
]
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from ..dashboard_methods import *
class RegressionRandomIndexComponent(ExplainerComponent):
def __init__(self, explainer, title="Select Random Index", name=None,
subtitle="Select from list or pick at random",
hide_title=False, hide_subtitle=False,
hide_index=False, hide_pred_slider=False,
hide_residual_slider=False, hide_pred_or_y=False,
hide_abs_residuals=False, hide_button=False,
index=None, pred_slider=None, y_slider=None,
residual_slider=None, abs_residual_slider=None,
pred_or_y="preds", abs_residuals=True, round=2,
description=None, **kwargs):
"""Select a random index subject to constraints component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Select Random Index".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): hide title
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_index (bool, optional): Hide index selector.
Defaults to False.
hide_pred_slider (bool, optional): Hide prediction slider.
Defaults to False.
hide_residual_slider (bool, optional): hide residuals slider.
Defaults to False.
hide_pred_or_y (bool, optional): hide prediction or actual toggle.
Defaults to False.
hide_abs_residuals (bool, optional): hide absolute residuals toggle.
Defaults to False.
hide_button (bool, optional): hide button. Defaults to False.
index ({str, int}, optional): Initial index to display.
Defaults to None.
pred_slider ([lb, ub], optional): Initial values for prediction
values slider [lowerbound, upperbound]. Defaults to None.
y_slider ([lb, ub], optional): Initial values for y slider
[lower bound, upper bound]. Defaults to None.
residual_slider ([lb, ub], optional): Initial values for residual slider
[lower bound, upper bound]. Defaults to None.
abs_residual_slider ([lb, ub], optional): Initial values for absolute
residuals slider [lower bound, upper bound]
Defaults to None.
pred_or_y (str, {'preds', 'y'}, optional): Initial use predictions
or y slider. Defaults to "preds".
abs_residuals (bool, optional): Initial use residuals or absolute
residuals. Defaults to True.
round (int, optional): rounding used for slider spacing. Defaults to 2.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
assert self.explainer.is_regression, \
("explainer is not a RegressionExplainer so the RegressionRandomIndexComponent "
"will not work. Try using the ClassifierRandomIndexComponent instead.")
self.index_name = 'random-index-reg-index-'+self.name
if self.explainer.y_missing:
self.hide_residual_slider = True
self.hide_pred_or_y = True
self.hide_abs_residuals = True
self.pred_or_y = "preds"
self.y_slider = [0.0, 1.0]
self.residual_slider = [0.0, 1.0]
self.abs_residual_slider = [0.0, 1.0]
if self.pred_slider is None:
self.pred_slider = [self.explainer.preds.min(), self.explainer.preds.max()]
if not self.explainer.y_missing:
if self.y_slider is None:
self.y_slider = [self.explainer.y.min(), self.explainer.y.max()]
if self.residual_slider is None:
self.residual_slider = [self.explainer.residuals.min(), self.explainer.residuals.max()]
if self.abs_residual_slider is None:
self.abs_residual_slider = [self.explainer.abs_residuals.min(), self.explainer.abs_residuals.max()]
assert (len(self.pred_slider)==2 and self.pred_slider[0]<=self.pred_slider[1]), \
"pred_slider should be a list of a [lower_bound, upper_bound]!"
assert (len(self.y_slider)==2 and self.y_slider[0]<=self.y_slider[1]), \
"y_slider should be a list of a [lower_bound, upper_bound]!"
assert (len(self.residual_slider)==2 and self.residual_slider[0]<=self.residual_slider[1]), \
"residual_slider should be a list of a [lower_bound, upper_bound]!"
assert (len(self.abs_residual_slider)==2 and self.abs_residual_slider[0]<=self.abs_residual_slider[1]), \
"abs_residual_slider should be a list of a [lower_bound, upper_bound]!"
self.y_slider = [float(y) for y in self.y_slider]
self.pred_slider = [float(p) for p in self.pred_slider]
self.residual_slider = [float(r) for r in self.residual_slider]
self.abs_residual_slider = [float(a) for a in self.abs_residual_slider]
assert self.pred_or_y in ['preds', 'y'], "pred_or_y should be in ['preds', 'y']!"
if self.description is None: self.description = f"""
You can select a {self.explainer.index_name} directly by choosing it
from the dropdown (if you start typing you can search inside the list),
or hit the Random {self.explainer.index_name} button to randomly select
a {self.explainer.index_name} that fits the constraints. For example
you can select a {self.explainer.index_name} with a very high predicted
{self.explainer.target}, or a very low observed {self.explainer.target},
or a {self.explainer.index_name} whose predicted {self.explainer.target}
was very far off from the observed {self.explainer.target} and so had a
high (absolute) residual.
"""
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(f"Select {self.explainer.index_name}", id='random-index-reg-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='random-index-reg-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dcc.Dropdown(id='random-index-reg-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=8), hide=self.hide_index),
make_hideable(
dbc.Col([
dbc.Button(f"Random {self.explainer.index_name}", color="primary", id='random-index-reg-button-'+self.name, block=True),
dbc.Tooltip(f"Select a random {self.explainer.index_name} according to the constraints",
target='random-index-reg-button-'+self.name),
], md=4), hide=self.hide_button),
], form=True),
dbc.Row([
make_hideable(
dbc.Col([
html.Div([
dbc.Label("Predicted range:", id='random-index-reg-pred-slider-label-'+self.name,
html_for='random-index-reg-pred-slider-'+self.name),
dbc.Tooltip(f"Only select {self.explainer.index_name} where the "
f"predicted {self.explainer.target} was within the following range:",
target='random-index-reg-pred-slider-label-'+self.name),
dcc.RangeSlider(
id='random-index-reg-pred-slider-'+self.name,
min=float(self.explainer.preds.min()),
max=float(self.explainer.preds.max()),
step=np.float_power(10, -self.round),
value=[self.pred_slider[0], self.pred_slider[1]],
marks={float(self.explainer.preds.min()): str(np.round(self.explainer.preds.min(), self.round)),
float(self.explainer.preds.max()): str(np.round(self.explainer.preds.max(), self.round))},
allowCross=False,
tooltip = {'always_visible' : False}
)
], id='random-index-reg-pred-slider-div-'+self.name),
html.Div([
dbc.Label("Observed range:", id='random-index-reg-y-slider-label-'+self.name,
html_for='random-index-reg-y-slider-'+self.name),
dbc.Tooltip(f"Only select {self.explainer.index_name} where the "
f"observed {self.explainer.target} was within the following range:",
target='random-index-reg-y-slider-label-'+self.name),
dcc.RangeSlider(
id='random-index-reg-y-slider-'+self.name,
min=float(self.explainer.y.min()),
max=float(self.explainer.y.max()),
step=np.float_power(10, -self.round),
value=[self.y_slider[0], self.y_slider[1]],
marks={float(self.explainer.y.min()): str(np.round(self.explainer.y.min(), self.round)),
float(self.explainer.y.max()): str(np.round(self.explainer.y.max(), self.round))},
allowCross=False,
tooltip = {'always_visible' : False}
)
], id='random-index-reg-y-slider-div-'+self.name),
], md=8), hide=self.hide_pred_slider),
make_hideable(
dbc.Col([
dbc.Label("Range:", id='random-index-reg-preds-or-y-label-'+self.name, html_for='random-index-reg-preds-or-y-'+self.name),
dbc.Select(
id='random-index-reg-preds-or-y-'+self.name,
options=[
{'label': 'Predicted', 'value': 'preds'},
{'label': 'Observed', 'value': 'y'},
],
value=self.pred_or_y),
dbc.Tooltip(f"You can either only select a random {self.explainer.index_name}"
f"from within a certain range of observed {self.explainer.target} or"
f"from within a certain range of predicted {self.explainer.target}.",
target='random-index-reg-preds-or-y-label-'+self.name)
], md=4), hide=self.hide_pred_or_y),
]),
dbc.Row([
make_hideable(
dbc.Col([
html.Div([
dbc.Label("Residuals range:", id='random-index-reg-residual-slider-label-'+self.name,
html_for='random-index-reg-residual-slider-'+self.name),
dbc.Tooltip(f"Only select {self.explainer.index_name} where the "
f"residual (difference between observed {self.explainer.target} and predicted {self.explainer.target})"
" was within the following range:",
target='random-index-reg-residual-slider-label-'+self.name),
dcc.RangeSlider(
id='random-index-reg-residual-slider-'+self.name,
min=float(self.explainer.residuals.min()),
max=float(self.explainer.residuals.max()),
step=np.float_power(10, -self.round),
value=[self.residual_slider[0], self.residual_slider[1]],
marks={float(self.explainer.residuals.min()): str(np.round(self.explainer.residuals.min(), self.round)),
float(self.explainer.residuals.max()): str(np.round(self.explainer.residuals.max(), self.round))},
allowCross=False,
tooltip={'always_visible' : False}
)
], id='random-index-reg-residual-slider-div-'+self.name),
html.Div([
dbc.Label("Absolute residuals", id='random-index-reg-abs-residual-slider-label'+self.name,
html_for='random-index-reg-abs-residual-slider-'+self.name),
dbc.Tooltip(f"Only select {self.explainer.index_name} where the absolute "
f"residual (difference between observed {self.explainer.target} and predicted {self.explainer.target})"
" was within the following range:",
target='random-index-reg-abs-residual-slider-label'+self.name),
dcc.RangeSlider(
id='random-index-reg-abs-residual-slider-'+self.name,
min=float(self.explainer.abs_residuals.min()),
max=float(self.explainer.abs_residuals.max()),
step=np.float_power(10, -self.round),
value=[self.abs_residual_slider[0], self.abs_residual_slider[1]],
marks={float(self.explainer.abs_residuals.min()): str(np.round(self.explainer.abs_residuals.min(), self.round)),
float(self.explainer.abs_residuals.max()): str(np.round(self.explainer.abs_residuals.max(), self.round))},
allowCross=False,
tooltip={'always_visible' : False}
)
], id='random-index-reg-abs-residual-slider-div-'+self.name),
], md=8), hide=self.hide_residual_slider),
make_hideable(
dbc.Col([
dbc.Label("Residuals:", id='random-index-reg-abs-residual-label-'+self.name,
html_for='random-index-reg-abs-residual-'+self.name),
dbc.Select(
id='random-index-reg-abs-residual-'+self.name,
options=[
{'label': 'Residuals', 'value': 'relative'},
{'label': 'Absolute Residuals', 'value': 'absolute'},
],
value='absolute' if self.abs_residuals else 'relative'),
dbc.Tooltip(f"You can either only select random a {self.explainer.index_name} "
f"from within a certain range of residuals "
f"(difference between observed and predicted {self.explainer.target}), "
f"so for example only {self.explainer.index_name} for whom the prediction "
f"was too high or too low."
f"Or you can select only from a certain absolute residual range. So for "
f"example only select {self.explainer.index_name} for which the prediction was at "
f"least a certain amount of {self.explainer.units} off.",
target='random-index-reg-abs-residual-label-'+self.name),
], md=4), hide=self.hide_abs_residuals),
]),
# make_hideable(
# html.Div([
# html.Div([
# dbc.Row([
# dbc.Col([
# html.Div([
# dbc.Label("Residuals range:", id='random-index-reg-residual-slider-label-'+self.name,
# html_for='random-index-reg-residual-slider-'+self.name),
# dbc.Tooltip(f"Only select {self.explainer.index_name} where the "
# f"residual (difference between observed {self.explainer.target} and predicted {self.explainer.target})"
# " was within the following range:",
# target='random-index-reg-residual-slider-label-'+self.name),
# dcc.RangeSlider(
# id='random-index-reg-residual-slider-'+self.name,
# min=float(self.explainer.residuals.min()),
# max=float(self.explainer.residuals.max()),
# step=np.float_power(10, -self.round),
# value=[self.residual_slider[0], self.residual_slider[1]],
# marks={float(self.explainer.residuals.min()): str(np.round(self.explainer.residuals.min(), self.round)),
# float(self.explainer.residuals.max()): str(np.round(self.explainer.residuals.max(), self.round))},
# allowCross=False,
# tooltip={'always_visible' : False}
# )
# ], style={'margin-bottom':0})
# ], md=8)
# ]),
# ], id='random-index-reg-residual-slider-div-'+self.name),
# html.Div([
# dbc.Row([
# dbc.Col([
# html.Div([
# dbc.Label("Absolute residuals", id='random-index-reg-abs-residual-slider-label'+self.name,
# html_for='random-index-reg-abs-residual-slider-'+self.name),
# dbc.Tooltip(f"Only select {self.explainer.index_name} where the absolute "
# f"residual (difference between observed {self.explainer.target} and predicted {self.explainer.target})"
# " was within the following range:",
# target='random-index-reg-abs-residual-slider-label'+self.name),
# dcc.RangeSlider(
# id='random-index-reg-abs-residual-slider-'+self.name,
# min=float(self.explainer.abs_residuals.min()),
# max=float(self.explainer.abs_residuals.max()),
# step=np.float_power(10, -self.round),
# value=[self.abs_residual_slider[0], self.abs_residual_slider[1]],
# marks={float(self.explainer.abs_residuals.min()): str(np.round(self.explainer.abs_residuals.min(), self.round)),
# float(self.explainer.abs_residuals.max()): str(np.round(self.explainer.abs_residuals.max(), self.round))},
# allowCross=False,
# tooltip={'always_visible' : False}
# )
# ], style={'margin-bottom':0})
# ], md=8)
# ])
# ], id='random-index-reg-abs-residual-slider-div-'+self.name),
# ]), hide=self.hide_residual_slider),
# dbc.Row([
# make_hideable(
# dbc.Col([
# dbc.Label("Residuals:", id='random-index-reg-abs-residual-label-'+self.name,
# html_for='random-index-reg-abs-residual-'+self.name),
# dbc.Select(
# id='random-index-reg-abs-residual-'+self.name,
# options=[
# {'label': 'Residuals', 'value': 'relative'},
# {'label': 'Absolute Residuals', 'value': 'absolute'},
# ],
# value='absolute' if self.abs_residuals else 'relative'),
# dbc.Tooltip(f"You can either only select random a {self.explainer.index_name} "
# f"from within a certain range of residuals "
# f"(difference between observed and predicted {self.explainer.target}), "
# f"so for example only {self.explainer.index_name} for whom the prediction "
# f"was too high or too low."
# f"Or you can select only from a certain absolute residual range. So for "
# f"example only select {self.explainer.index_name} for which the prediction was at "
# f"least a certain amount of {self.explainer.units} off.",
# target='random-index-reg-abs-residual-label-'+self.name),
# ], md=4), hide=self.hide_pred_or_y),
# make_hideable(
# dbc.Col([
# html.Div([
# dbc.Select(
# id='random-index-reg-abs-residual-'+self.name,
# options=[
# {'label': 'Use Residuals', 'value': 'relative'},
# {'label': 'Use Absolute Residuals', 'value': 'absolute'},
# ],
# value='absolute' if self.abs_residuals else 'relative'),
# ], id='random-index-reg-abs-residual-div-'+self.name),
# dbc.Tooltip(f"You can either only select random a {self.explainer.index_name} "
# f"from within a certain range of residuals "
# f"(difference between observed and predicted {self.explainer.target}), "
# f"so for example only {self.explainer.index_name} for whom the prediction "
# f"was too high or too low."
# f"Or you can select only from a certain absolute residual range. So for "
# f"example only select {self.explainer.index_name} for which the prediction was at "
# f"least a certain amount of {self.explainer.units} off.",
# target='random-index-reg-abs-residual-div-'+self.name),
# ], md=4), hide=self.hide_abs_residuals),
]),
])
def component_callbacks(self, app):
@app.callback(
[Output('random-index-reg-pred-slider-div-'+self.name, 'style'),
Output('random-index-reg-y-slider-div-'+self.name, 'style')],
[Input('random-index-reg-preds-or-y-'+self.name, 'value')])
def update_reg_hidden_div_pred_sliders(preds_or_y):
if preds_or_y == 'preds':
return (None, dict(display="none"))
elif preds_or_y == 'y':
return (dict(display="none"), None)
raise PreventUpdate
@app.callback(
[Output('random-index-reg-residual-slider-div-'+self.name, 'style'),
Output('random-index-reg-abs-residual-slider-div-'+self.name, 'style')],
[Input('random-index-reg-abs-residual-'+self.name, 'value')])
def update_reg_hidden_div_pred_sliders(abs_residuals):
if abs_residuals == 'absolute':
return (dict(display="none"), None)
else:
return (None, dict(display="none"))
raise PreventUpdate
@app.callback(
[Output('random-index-reg-residual-slider-'+self.name, 'min'),
Output('random-index-reg-residual-slider-'+self.name, 'max'),
Output('random-index-reg-residual-slider-'+self.name, 'value'),
Output('random-index-reg-residual-slider-'+self.name, 'marks'),
Output('random-index-reg-abs-residual-slider-'+self.name, 'min'),
Output('random-index-reg-abs-residual-slider-'+self.name, 'max'),
Output('random-index-reg-abs-residual-slider-'+self.name, 'value'),
Output('random-index-reg-abs-residual-slider-'+self.name, 'marks'),],
[Input('random-index-reg-pred-slider-'+self.name, 'value'),
Input('random-index-reg-y-slider-'+self.name, 'value')],
[State('random-index-reg-preds-or-y-'+self.name, 'value'),
State('random-index-reg-residual-slider-'+self.name, 'value'),
State('random-index-reg-abs-residual-slider-'+self.name, 'value')])
def update_residual_slider_limits(pred_range, y_range, preds_or_y, residuals_range, abs_residuals_range):
if preds_or_y=='preds':
min_residuals = self.explainer.residuals[(self.explainer.preds >= pred_range[0]) & (self.explainer.preds <= pred_range[1])].min()
max_residuals = self.explainer.residuals[(self.explainer.preds >= pred_range[0]) & (self.explainer.preds <= pred_range[1])].max()
min_abs_residuals = self.explainer.abs_residuals[(self.explainer.preds >= pred_range[0]) & (self.explainer.preds <= pred_range[1])].min()
max_abs_residuals = self.explainer.abs_residuals[(self.explainer.preds >= pred_range[0]) & (self.explainer.preds <= pred_range[1])].max()
elif preds_or_y=='y':
min_residuals = self.explainer.residuals[(self.explainer.y >= y_range[0]) & (self.explainer.y <= y_range[1])].min()
max_residuals = self.explainer.residuals[(self.explainer.y >= y_range[0]) & (self.explainer.y <= y_range[1])].max()
min_abs_residuals = self.explainer.abs_residuals[(self.explainer.y >= y_range[0]) & (self.explainer.y <= y_range[1])].min()
max_abs_residuals = self.explainer.abs_residuals[(self.explainer.y >= y_range[0]) & (self.explainer.y <= y_range[1])].max()
new_residuals_range = [max(min_residuals, residuals_range[0]), min(max_residuals, residuals_range[1])]
new_abs_residuals_range = [max(min_abs_residuals, abs_residuals_range[0]), min(max_abs_residuals, abs_residuals_range[1])]
residuals_marks = {min_residuals: str(np.round(min_residuals, self.round)),
max_residuals: str(np.round(max_residuals, self.round))}
abs_residuals_marks = {min_abs_residuals: str(np.round(min_abs_residuals, self.round)),
max_abs_residuals: str(np.round(max_abs_residuals, self.round))}
return (min_residuals, max_residuals, new_residuals_range, residuals_marks,
min_abs_residuals, max_abs_residuals, new_abs_residuals_range, abs_residuals_marks)
@app.callback(
Output('random-index-reg-index-'+self.name, 'value'),
[Input('random-index-reg-button-'+self.name, 'n_clicks')],
[State('random-index-reg-pred-slider-'+self.name, 'value'),
State('random-index-reg-y-slider-'+self.name, 'value'),
State('random-index-reg-residual-slider-'+self.name, 'value'),
State('random-index-reg-abs-residual-slider-'+self.name, 'value'),
State('random-index-reg-preds-or-y-'+self.name, 'value'),
State('random-index-reg-abs-residual-'+self.name, 'value')])
def update_index(n_clicks, pred_range, y_range, residual_range, abs_residuals_range, preds_or_y, abs_residuals):
if n_clicks is None and self.index is not None:
raise PreventUpdate
if preds_or_y == 'preds':
if abs_residuals=='absolute':
return self.explainer.random_index(
pred_min=pred_range[0], pred_max=pred_range[1],
abs_residuals_min=abs_residuals_range[0],
abs_residuals_max=abs_residuals_range[1],
return_str=True)
else:
return self.explainer.random_index(
pred_min=pred_range[0], pred_max=pred_range[1],
residuals_min=residual_range[0],
residuals_max=residual_range[1],
return_str=True)
elif preds_or_y == 'y':
if abs_residuals=='absolute':
return self.explainer.random_index(
y_min=y_range[0], y_max=y_range[1],
abs_residuals_min=abs_residuals_range[0],
abs_residuals_max=abs_residuals_range[1],
return_str=True)
else:
return self.explainer.random_index(
y_min=pred_range[0], y_max=pred_range[1],
residuals_min=residual_range[0],
residuals_max=residual_range[1],
return_str=True)
class RegressionPredictionSummaryComponent(ExplainerComponent):
def __init__(self, explainer, title="Prediction", name=None,
hide_index=False, hide_title=False,
hide_subtitle=False, hide_table=False,
feature_input_component=None,
index=None, round=3, description=None,
**kwargs):
"""Shows a summary for a particular prediction
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Prediction".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_index (bool, optional): hide index selector. Defaults to False.
hide_title (bool, optional): hide title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_table (bool, optional): hide the results table
feature_input_component (FeatureInputComponent): A FeatureInputComponent
that will give the input to the graph instead of the index selector.
If not None, hide_index=True. Defaults to None.
index ({int, str}, optional): Index to display prediction summary for. Defaults to None.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
self.index_name = 'reg-prediction-index-'+self.name
if self.feature_input_component is not None:
self.exclude_callbacks(self.feature_input_component)
self.hide_index = True
if self.description is None: self.description = f"""
Shows the predicted {self.explainer.target} and the observed {self.explainer.target},
as well as the difference between the two (residual)
"""
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.H3(self.title, id='reg-prediction-title-'+self.name, className='card-title'),
dbc.Tooltip(self.description, target='reg-prediction-title-'+self.name),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:"),
dcc.Dropdown(id='reg-prediction-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=6), hide=self.hide_index),
]),
dbc.Row([
dbc.Col([
html.Div(id='reg-prediction-div-'+self.name)
])
])
])
])
def component_callbacks(self, app):
if self.feature_input_component is None:
@app.callback(
Output('reg-prediction-div-'+self.name, 'children'),
[Input('reg-prediction-index-'+self.name, 'value')])
def update_output_div(index):
if index is not None:
preds_df = self.explainer.prediction_result_df(index, round=self.round)
return make_hideable(
dbc.Table.from_dataframe(preds_df, striped=False, bordered=False, hover=False),
hide=self.hide_table)
raise PreventUpdate
else:
@app.callback(
Output('reg-prediction-div-'+self.name, 'children'),
[*self.feature_input_component._feature_callback_inputs])
def update_output_div(*inputs):
X_row = self.explainer.get_row_from_input(inputs, ranked_by_shap=True)
preds_df = self.explainer.prediction_result_df(X_row=X_row, round=self.round)
return make_hideable(
dbc.Table.from_dataframe(preds_df, striped=False, bordered=False, hover=False),
hide=self.hide_table)
class PredictedVsActualComponent(ExplainerComponent):
def __init__(self, explainer, title="Predicted vs Actual", name=None,
subtitle="How close is the predicted value to the observed?",
hide_title=False, hide_subtitle=False,
hide_log_x=False, hide_log_y=False,
logs=False, log_x=False, log_y=False, description=None,
**kwargs):
"""Shows a plot of predictions vs y.
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Predicted vs Actual".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional) Hide the title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_log_x (bool, optional): Hide the log_x toggle. Defaults to False.
hide_log_y (bool, optional): Hide the log_y toggle. Defaults to False.
logs (bool, optional): Whether to use log axis. Defaults to False.
log_x (bool, optional): log only x axis. Defaults to False.
log_y (bool, optional): log only y axis. Defaults to False.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
self.logs, self.log_x, self.log_y = logs, log_x, log_y
if self.description is None: self.description = f"""
Plot shows the observed {self.explainer.target} and the predicted
{self.explainer.target} in the same plot. A perfect model would have
all the points on the diagonal (predicted matches observed). The further
away point are from the diagonal the worse the model is in predicting
{self.explainer.target}.
"""
self.register_dependencies(['preds'])
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='pred-vs-actual-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='pred-vs-actual-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup(
[
# html.Label("Log y"),
dbc.RadioButton(
id='pred-vs-actual-logy-'+self.name,
className="form-check-input",
checked=self.log_y),
dbc.Tooltip("By using a log axis, it is easier to see relative "
"errors instead of absolute errors.",
target='pred-vs-actual-logy-'+self.name),
dbc.Label("Log y",
html_for='pred-vs-actual-logy-'+self.name,
className="form-check-label"),
], check=True),
], md=1, align="center"), hide=self.hide_log_y),
dbc.Col([
dcc.Graph(id='pred-vs-actual-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False)),
], md=11)
]),
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup(
[
dbc.RadioButton(
id='pred-vs-actual-logx-'+self.name,
className="form-check-input",
checked=self.log_x),
dbc.Tooltip("By using a log axis, it is easier to see relative "
"errors instead of absolute errors.",
target='pred-vs-actual-logx-'+self.name),
dbc.Label("Log x",
html_for='pred-vs-actual-logx-'+self.name,
className="form-check-label"),
], check=True),
], md=2), hide=self.hide_log_x),
], justify="center"),
]),
])
def component_callbacks(self, app):
@app.callback(
Output('pred-vs-actual-graph-'+self.name, 'figure'),
[Input('pred-vs-actual-logx-'+self.name, 'checked'),
Input('pred-vs-actual-logy-'+self.name, 'checked')],
)
def update_predicted_vs_actual_graph(log_x, log_y):
return self.explainer.plot_predicted_vs_actual(log_x=log_x, log_y=log_y)
class ResidualsComponent(ExplainerComponent):
def __init__(self, explainer, title="Residuals", name=None,
subtitle="How much is the model off?",
hide_title=False, hide_subtitle=False, hide_footer=False,
hide_pred_or_actual=False, hide_ratio=False,
pred_or_actual="vs_pred", residuals="difference",
description=None, **kwargs):
"""Residuals plot component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Residuals".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional) Hide the title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_footer (bool, optional): hide the footer at the bottom of the component
hide_pred_or_actual (bool, optional): hide vs predictions or vs
actual for x-axis toggle. Defaults to False.
hide_ratio (bool, optional): hide residual type dropdown. Defaults to False.
pred_or_actual (str, {'vs_actual', 'vs_pred'}, optional): Whether
to plot actual or predictions on the x-axis.
Defaults to "vs_pred".
residuals (str, {'difference', 'ratio', 'log-ratio'} optional):
How to calcualte residuals. Defaults to 'difference'.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
assert residuals in ['difference', 'ratio', 'log-ratio'], \
("parameter residuals should in ['difference', 'ratio', 'log-ratio']"
f" but you passed residuals={residuals}")
if self.description is None: self.description = f"""
The residuals are the difference between the observed {self.explainer.target}
and predicted {self.explainer.target}. In this plot you can check if
the residuals are higher or lower for higher/lower actual/predicted outcomes.
So you can check if the model works better or worse for different {self.explainer.target}
levels.
"""
self.register_dependencies(['preds', 'residuals'])
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='residuals-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='residuals-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
dbc.Col([
dcc.Graph(id='residuals-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False)),
])
])
]),
make_hideable(
dbc.CardFooter([
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup(
[
dbc.Label("Horizontal axis:", html_for='residuals-pred-or-actual-'+self.name),
dbc.Select(
options=[
{"label": "Predicted", "value": "vs_pred"},
{"label": "Observed", "value": "vs_actual"},
],
value=self.pred_or_actual,
id='residuals-pred-or-actual-'+self.name,
),
], id='residuals-pred-or-actual-form-'+self.name),
dbc.Tooltip("Select what you would like to put on the x-axis:"
f" observed {self.explainer.target} or predicted {self.explainer.target}.",
target='residuals-pred-or-actual-form-'+self.name),
], md=3), hide=self.hide_pred_or_actual),
make_hideable(
dbc.Col([
html.Label('Residual type:', id='residuals-type-label-'+self.name),
dbc.Tooltip("Type of residuals to display: y-preds (difference), "
"y/preds (ratio) or log(y/preds) (logratio).",
target='residuals-type-label-'+self.name),
dbc.Select(id='residuals-type-'+self.name,
options = [{'label': 'Difference', 'value': 'difference'},
{'label': 'Ratio', 'value': 'ratio'},
{'label': 'Log ratio', 'value': 'log-ratio'}],
value=self.residuals),
], md=3), hide=self.hide_ratio),
]),
]), hide=self.hide_footer)
])
def register_callbacks(self, app):
@app.callback(
Output('residuals-graph-'+self.name, 'figure'),
[Input('residuals-pred-or-actual-'+self.name, 'value'),
Input('residuals-type-'+self.name, 'value')],
)
def update_residuals_graph(pred_or_actual, residuals):
vs_actual = pred_or_actual=='vs_actual'
return self.explainer.plot_residuals(vs_actual=vs_actual, residuals=residuals)
class RegressionVsColComponent(ExplainerComponent):
def __init__(self, explainer, title="Plot vs feature", name=None,
subtitle="Are predictions and residuals correlated with features?",
hide_title=False, hide_subtitle=False, hide_footer=False,
hide_col=False, hide_ratio=False, hide_cats=False,
hide_points=False, hide_winsor=False,
col=None, display='difference', cats=True,
points=True, winsor=0, description=None, **kwargs):
"""Show residuals, observed or preds vs a particular Feature component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Plot vs feature".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional) Hide the title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_footer (bool, optional): hide the footer at the bottom of the component
hide_col (bool, optional): Hide de column selector. Defaults to False.
hide_ratio (bool, optional): Hide the toggle. Defaults to False.
hide_cats (bool, optional): Hide group cats toggle. Defaults to False.
hide_points (bool, optional): Hide group points toggle. Defaults to False.
hide_winsor (bool, optional): Hide winsor input. Defaults to False.
col ([type], optional): Initial feature to display. Defaults to None.
display (str, {'observed', 'predicted', difference', 'ratio', 'log-ratio'} optional):
What to display on y axis. Defaults to 'difference'.
cats (bool, optional): group categorical columns. Defaults to True.
points (bool, optional): display point cloud next to violin plot
for categorical cols. Defaults to True
winsor (int, 0-50, optional): percentage of outliers to winsor out of
the y-axis. Defaults to 0.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
if self.col is None:
self.col = self.explainer.columns_ranked_by_shap(self.cats)[0]
assert self.display in {'observed', 'predicted', 'difference', 'ratio', 'log-ratio'}, \
("parameter display should in {'observed', 'predicted', 'difference', 'ratio', 'log-ratio'}"
f" but you passed display={self.display}!")
if self.description is None: self.description = f"""
This plot shows either residuals (difference between observed {self.explainer.target}
and predicted {self.explainer.target}) plotted against the values of different features,
or the observed or predicted {self.explainer.target}.
This allows you to inspect whether the model is more wrong for particular
range of feature values than others.
"""
self.register_dependencies(['preds', 'residuals'])
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='reg-vs-col-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='reg-vs-col-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label("Feature:", id='reg-vs-col-col-label-'+self.name),
dbc.Tooltip("Select the feature to display on the x-axis.",
target='reg-vs-col-col-label-'+self.name),
dbc.Select(id='reg-vs-col-col-'+self.name,
options=[{'label': col, 'value':col}
for col in self.explainer.columns_ranked_by_shap(self.cats)],
value=self.col),
], md=4), hide=self.hide_col),
make_hideable(
dbc.Col([
html.Label('Display:', id='reg-vs-col-display-type-label-'+self.name),
dbc.Tooltip(f"Select what to display on the y axis: observed {self.explainer.target}, "
f"predicted {self.explainer.target} or residuals. Residuals can either "
"be calculated by takind the difference (y-preds), "
"ratio (y/preds) or log ratio log(y/preds). The latter makes it easier to "
"see relative differences.",
target='reg-vs-col-display-type-label-'+self.name),
dbc.Select(id='reg-vs-col-display-type-'+self.name,
options = [{'label': 'Observed', 'value': 'observed'},
{'label': 'Predicted', 'value': 'predicted'},
{'label': 'Residuals: Difference', 'value': 'difference'},
{'label': 'Residuals: Ratio', 'value': 'ratio'},
{'label': 'Residuals: Log ratio', 'value': 'log-ratio'}],
value=self.display),
], md=4), hide=self.hide_ratio),
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='reg-vs-col-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='reg-vs-col-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='reg-vs-col-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
], md=2), self.hide_cats),
]),
dbc.Row([
dbc.Col([
dcc.Graph(id='reg-vs-col-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False)),
])
]),
]),
make_hideable(
dbc.CardFooter([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label("Winsor:", id='reg-vs-col-winsor-label-'+self.name),
dbc.Tooltip("Excluded the highest and lowest y values from the plot. "
"When you have some real outliers it can help to remove them"
" from the plot so it is easier to see the overall pattern.",
target='reg-vs-col-winsor-label-'+self.name),
dbc.Input(id='reg-vs-col-winsor-'+self.name,
value=self.winsor,
type="number", min=0, max=49, step=1),
], md=4), hide=self.hide_winsor),
make_hideable(
dbc.Col([
html.Div([
dbc.FormGroup([
dbc.Label("Scatter:"),
dbc.Tooltip("For categorical features, display "
"a point cloud next to the violin plots.",
target='reg-vs-col-show-points-'+self.name),
dbc.Checklist(
options=[{"label": "Show point cloud", "value": True}],
value=[True] if self.points else [],
id='reg-vs-col-show-points-'+self.name,
inline=True,
switch=True,
),
]),
], id='reg-vs-col-show-points-div-'+self.name)
], md=4), self.hide_points),
])
]), hide=self.hide_footer)
])
def register_callbacks(self, app):
@app.callback(
[Output('reg-vs-col-graph-'+self.name, 'figure'),
Output('reg-vs-col-show-points-div-'+self.name, 'style')],
[Input('reg-vs-col-col-'+self.name, 'value'),
Input('reg-vs-col-display-type-'+self.name, 'value'),
Input('reg-vs-col-show-points-'+self.name, 'value'),
Input('reg-vs-col-winsor-'+self.name, 'value')],
)
def update_residuals_graph(col, display, points, winsor):
style = {} if col in self.explainer.cats else dict(display="none")
if display == 'observed':
return self.explainer.plot_y_vs_feature(
col, points=bool(points), winsor=winsor, dropna=True), style
elif display == 'predicted':
return self.explainer.plot_preds_vs_feature(
col, points=bool(points), winsor=winsor, dropna=True), style
else:
return self.explainer.plot_residuals_vs_feature(
col, residuals=display, points=bool(points),
winsor=winsor, dropna=True), style
@app.callback(
Output('reg-vs-col-col-'+self.name, 'options'),
[Input('reg-vs-col-group-cats-'+self.name, 'value')])
def update_dependence_shap_scatter_graph(cats):
return [{'label': col, 'value': col}
for col in self.explainer.columns_ranked_by_shap(bool(cats))]
class RegressionModelSummaryComponent(ExplainerComponent):
def __init__(self, explainer, title="Model Summary", name=None,
subtitle="Quantitative metrics for model performance",
hide_title=False, hide_subtitle=False,
round=3, description=None, **kwargs):
"""Show model summary statistics (RMSE, MAE, R2) component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Model Summary".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): hide title
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
round (int): rounding to perform to metric floats.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
if self.description is None: self.description = f"""
In the table below you can find a number of regression performance
metrics that describe how well the model is able to predict
{self.explainer.target}.
"""
self.register_dependencies(['preds', 'residuals'])
def layout(self):
metrics_dict = self.explainer.metrics_descriptions()
metrics_df = (pd.DataFrame(self.explainer.metrics(), index=["Score"]).T
.rename_axis(index="metric").reset_index().round(self.round))
metrics_table = dbc.Table.from_dataframe(metrics_df, striped=False, bordered=False, hover=False)
metrics_table, tooltips = get_dbc_tooltips(metrics_table,
metrics_dict,
"reg-model-summary-div-hover",
self.name)
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='reg-model-summary-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='reg-model-summary-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
metrics_table,
*tooltips
]),
])
|
the-stack_106_29153 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import shutil
from setuptools.command.test import test as TestCommand
from setuptools import find_packages
def remove_dir(dirpath):
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
requires = [] #during runtime
tests_require=['pytest>=2.3'] #for testing
PACKAGE_PATH = os.path.abspath(os.path.join(__file__, os.pardir))
setup(
name='ngene',
version='0.1.0',
description='Ngene is Deep Learinng provider.',
author='Alireza',
url='https://github.com/vafaeiar/ngene',
packages=find_packages(PACKAGE_PATH, "ngene"),
package_dir={'ngene': 'ngene'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
keywords='ngene',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
"Intended Audience :: Science/Research",
'Intended Audience :: Developers',
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
]
)
remove_dir('build')
remove_dir('ngene.egg-info')
remove_dir('dist')
|
the-stack_106_29155 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class VotcaTools(CMakePackage):
"""Versatile Object-oriented Toolkit for Coarse-graining
Applications (VOTCA) is a package intended to reduce the amount of
routine work when doing systematic coarse-graining of various
systems. The core is written in C++.
This package contains the basic tools library of VOTCA.
"""
homepage = "http://www.votca.org"
url = "https://github.com/votca/tools/tarball/v1.4"
git = "https://github.com/votca/tools.git"
version('develop', branch='master')
version('1.5', sha256='a82a6596c24ff06e79eab17ca02f4405745ceeeb66369693a59023ad0b62cf22')
version('1.4.1', '3176b72f8a41ec053cc740a5398e7dc4')
version('1.4', 'cd47868e9f28e2c7b9d01f95aa0185ca')
depends_on("[email protected]:", type='build')
depends_on("expat")
depends_on("fftw")
depends_on("gsl", when="@:1.4.9999")
depends_on("[email protected]:", when="@1.5:")
depends_on("boost")
depends_on("sqlite")
def cmake_args(self):
args = [
'-DWITH_RC_FILES=OFF'
]
return args
|
the-stack_106_29158 | # -*- coding: utf-8 -*-
import os
import discord
from discord.ext import commands
from discord.ext.commands import CommandNotFound
import logging
import asyncio
import itertools
import sys
import traceback
import random
import itertools
import math
from async_timeout import timeout
from functools import partial
import functools
from youtube_dl import YoutubeDL
import youtube_dl
from io import StringIO
import time
import urllib.request
from gtts import gTTS
##################### ๋ก๊น
###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
access_token = os.environ["BOT_TOKEN"]
def init():
global command
command = []
fc = []
command_inidata = open('command.ini', 'r', encoding = 'utf-8')
command_inputData = command_inidata.readlines()
############## ๋ฎค์ง๋ด ๋ช
๋ น์ด ๋ฆฌ์คํธ #####################
for i in range(len(command_inputData)):
tmp_command = command_inputData[i][12:].rstrip('\n')
fc = tmp_command.split(', ')
command.append(fc)
fc = []
del command[0]
command_inidata.close()
#print (command)
init()
#mp3 ํ์ผ ์์ฑํจ์(gTTS ์ด์ฉ, ๋จ์ฑ๋ชฉ์๋ฆฌ)
async def MakeSound(saveSTR, filename):
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.wav')
'''
try:
encText = urllib.parse.quote(saveSTR)
urllib.request.urlretrieve("https://clova.ai/proxy/voice/api/tts?text=" + encText + "%0A&voicefont=1&format=wav",filename + '.wav')
except Exception as e:
print (e)
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.wav')
pass
'''
#mp3 ํ์ผ ์ฌ์ํจ์
async def PlaySound(voiceclient, filename):
source = discord.FFmpegPCMAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
source.cleanup()
# Silence useless bug reports messages
youtube_dl.utils.bug_reports_message = lambda: ''
class VoiceError(Exception):
pass
class YTDLError(Exception):
pass
class YTDLSource(discord.PCMVolumeTransformer):
YTDL_OPTIONS = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': False,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
'force-ipv4' : True,
'-4': True
}
FFMPEG_OPTIONS = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn',
}
ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS)
def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5):
super().__init__(source, volume)
self.requester = ctx.author
self.channel = ctx.channel
self.data = data
self.uploader = data.get('uploader')
self.uploader_url = data.get('uploader_url')
date = data.get('upload_date')
self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4]
self.title = data.get('title')
self.thumbnail = data.get('thumbnail')
self.description = data.get('description')
self.duration = self.parse_duration(int(data.get('duration')))
self.tags = data.get('tags')
self.url = data.get('webpage_url')
self.views = data.get('view_count')
self.likes = data.get('like_count')
self.dislikes = data.get('dislike_count')
self.stream_url = data.get('url')
def __str__(self):
return '**{0.title}** by **{0.uploader}**'.format(self)
@classmethod
async def create_source(cls, bot, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None):
loop = loop or asyncio.get_event_loop()
if "http" not in search:
partial = functools.partial(cls.ytdl.extract_info, f"ytsearch5:{search}", download=False, process=False)
data = await loop.run_in_executor(None, partial)
if data is None:
raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search))
emoji_list : list = ["1๏ธโฃ", "2๏ธโฃ", "3๏ธโฃ", "4๏ธโฃ", "5๏ธโฃ", "๐ซ"]
song_list_str : str = ""
cnt : int = 0
song_index : int = 0
song_url_list : list = []
for data_info in data["entries"]:
cnt += 1
if 'title' not in data_info:
data_info['title'] = f"{search} - ์ ๋ชฉ ์ ๋ณด ์์"
song_list_str += f"`{cnt}.` [**{data_info['title']}**](https://www.youtube.com/watch?v={data_info['url']})\n"
song_url_list.append(f"https://www.youtube.com/watch?v={data_info['url']}")
embed = discord.Embed(description= song_list_str)
embed.set_footer(text=f"20์ด ์์ ๋ฏธ์ ํ์ ์ทจ์๋ฉ๋๋ค.")
song_list_message = await ctx.send(embed = embed)
for emoji in emoji_list:
await song_list_message.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == song_list_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list)
try:
reaction, user = await bot.wait_for('reaction_add', check = reaction_check, timeout = 20)
except asyncio.TimeoutError:
reaction = "๐ซ"
for emoji in emoji_list:
await song_list_message.remove_reaction(emoji, bot.user)
await song_list_message.delete(delay = 10)
if str(reaction) == "1๏ธโฃ":
song_index = 0
elif str(reaction) == "2๏ธโฃ":
song_index = 1
elif str(reaction) == "3๏ธโฃ":
song_index = 2
elif str(reaction) == "4๏ธโฃ":
song_index = 3
elif str(reaction) == "5๏ธโฃ":
song_index = 4
else:
return False
#result_url = f"https://www.youtube.com/watch?v={data['entries'][song_index]['url']}"
result_url = song_url_list[song_index]
print(result_url)
else:
result_url = search
webpage_url = result_url
partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False)
processed_info = await loop.run_in_executor(None, partial)
if processed_info is None:
raise YTDLError('Couldn\'t fetch `{}`'.format(webpage_url))
if 'entries' not in processed_info:
info = processed_info
else:
info = None
while info is None:
try:
info = processed_info['entries'].pop(0)
except IndexError:
raise YTDLError('Couldn\'t retrieve any matches for `{}`'.format(webpage_url))
return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info)
@staticmethod
def parse_duration(duration: int):
return time.strftime('%H:%M:%S', time.gmtime(duration))
class Song:
__slots__ = ('source', 'requester')
def __init__(self, source: YTDLSource):
self.source = source
self.requester = source.requester
def create_embed(self):
embed = (discord.Embed(title='Now playing',
description='**```fix\n{0.source.title}\n```**'.format(self),
color=discord.Color.blurple())
.add_field(name='Duration', value=self.source.duration)
.add_field(name='Requested by', value=self.requester.mention)
.add_field(name='Uploader', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self))
.add_field(name='URL', value='[Click]({0.source.url})'.format(self))
.set_thumbnail(url=self.source.thumbnail))
return embed
class SongQueue(asyncio.Queue):
def __getitem__(self, item):
if isinstance(item, slice):
return list(itertools.islice(self._queue, item.start, item.stop, item.step))
else:
return self._queue[item]
def __iter__(self):
return self._queue.__iter__()
def __len__(self):
return self.qsize()
def clear(self):
self._queue.clear()
def shuffle(self):
random.shuffle(self._queue)
def reserve(self, item):
self._queue.insert(0, item)
def select(self, index : int, loop : bool = False):
for i in range(index-1):
if not loop:
del self._queue[0]
else:
self._queue.append(self._queue[0])
del self._queue[0]
def remove(self, index: int):
del self._queue[index]
class VoiceState:
def __init__(self, bot: commands.Bot, ctx: commands.Context):
self.bot = bot
self._ctx = ctx
self._cog = ctx.cog
self.current = None
self.voice = None
self.next = asyncio.Event()
self.songs = SongQueue()
self._loop = False
self._volume = 0.5
self.skip_votes = set()
self.audio_player = bot.loop.create_task(self.audio_player_task())
def __del__(self):
self.audio_player.cancel()
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, value: bool):
self._loop = value
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value: float):
self._volume = value
@property
def is_playing(self):
return self.voice and self.current
async def audio_player_task(self):
while True:
self.next.clear()
if self.loop and self.current is not None:
source1 = await YTDLSource.create_source(self.bot, self._ctx, self.current.source.url, loop=self.bot.loop)
song1 = Song(source1)
await self.songs.put(song1)
else:
pass
try:
async with timeout(180): # 3 minutes
self.current = await self.songs.get()
except asyncio.TimeoutError:
self.bot.loop.create_task(self.stop())
return
self.current.source.volume = self._volume
self.voice.play(self.current.source, after=self.play_next_song)
play_info_msg = await self.current.source.channel.send(embed=self.current.create_embed())
await play_info_msg.delete(delay = 20)
await self.next.wait()
def play_next_song(self, error=None):
if error:
raise VoiceError(str(error))
self.next.set()
def skip(self):
self.skip_votes.clear()
if self.is_playing:
self.voice.stop()
async def stop(self):
self.songs.clear()
if self.voice:
await self.voice.disconnect()
self.voice = None
self.bot.loop.create_task(self._cog.cleanup(self._ctx))
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, ctx: commands.Context):
state = self.voice_states.get(ctx.guild.id)
if not state:
state = VoiceState(self.bot, ctx)
self.voice_states[ctx.guild.id] = state
return state
def cog_unload(self):
for state in self.voice_states.values():
self.bot.loop.create_task(state.stop())
def cog_check(self, ctx: commands.Context):
if not ctx.guild:
raise commands.NoPrivateMessage('This command can\'t be used in DM channels.')
return True
async def cog_before_invoke(self, ctx: commands.Context):
ctx.voice_state = self.get_voice_state(ctx)
async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):
await ctx.send('์๋ฌ : {}'.format(str(error)))
'''
@commands.command(name='join', invoke_without_subcommand=True)
async def _join(self, ctx: commands.Context):
destination = ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
'''
async def cleanup(self, ctx: commands.Context):
del self.voice_states[ctx.guild.id]
@commands.command(name=command[0][0], aliases=command[0][1:])
#@commands.has_permissions(manage_guild=True)
async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None):
if not channel and not ctx.author.voice:
raise VoiceError(':no_entry_sign: ํ์ฌ ์ ์์ค์ธ ์์
์ฑ๋์ด ์์ต๋๋ค.')
destination = channel or ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
@commands.command(name=command[1][0], aliases=command[1][1:])
#@commands.has_permissions(manage_guild=True)
async def _leave(self, ctx: commands.Context):
if not ctx.voice_state.voice:
return await ctx.send(':no_entry_sign: ํ์ฌ ์ ์์ค์ธ ์์
์ฑ๋์ด ์์ต๋๋ค.')
await ctx.voice_state.stop()
del self.voice_states[ctx.guild.id]
@commands.command(name=command[8][0], aliases=command[8][1:])
async def _volume(self, ctx: commands.Context, *, volume: int):
vc = ctx.voice_client
if not ctx.voice_state.is_playing:
return await ctx.send(':mute: ํ์ฌ ์ฌ์์ค์ธ ์์
์ด ์์ต๋๋ค.')
if not 0 < volume < 101:
return await ctx.send('```๋ณผ๋ฅจ์ 1 ~ 100 ์ฌ์ด๋ก ์
๋ ฅ ํด์ฃผ์ธ์.```')
if vc.source:
vc.source.volume = volume / 100
ctx.voice_state.volume = volume / 100
await ctx.send(':loud_sound: ๋ณผ๋ฅจ์ {}%๋ก ์กฐ์ ํ์์ต๋๋ค.'.format(volume))
@commands.command(name=command[7][0], aliases=command[7][1:])
async def _now(self, ctx: commands.Context):
await ctx.send(embed=ctx.voice_state.current.create_embed())
@commands.command(name=command[3][0], aliases=command[3][1:])
#@commands.has_permissions(manage_guild=True)
async def _pause(self, ctx: commands.Context):
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():
ctx.voice_state.voice.pause()
await ctx.message.add_reaction('โธ')
@commands.command(name=command[4][0], aliases=command[4][1:])
#@commands.has_permissions(manage_guild=True)
async def _resume(self, ctx: commands.Context):
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_paused():
ctx.voice_state.voice.resume()
await ctx.message.add_reaction('โฏ')
@commands.command(name=command[9][0], aliases=command[9][1:])
#@commands.has_permissions(manage_guild=True)
async def _stop(self, ctx: commands.Context):
ctx.voice_state.songs.clear()
if ctx.voice_state.is_playing:
ctx.voice_state.voice.stop()
await ctx.message.add_reaction('โน')
@commands.command(name=command[5][0], aliases=command[5][1:])
async def _skip(self, ctx: commands.Context, *, args: int = 1):
if not ctx.voice_state.is_playing:
return await ctx.send(':mute: ํ์ฌ ์ฌ์์ค์ธ ์์
์ด ์์ต๋๋ค.')
await ctx.message.add_reaction('โญ')
if args != 1:
ctx.voice_state.songs.select(args, ctx.voice_state.loop)
ctx.voice_state.skip()
'''
voter = ctx.message.author
if voter == ctx.voice_state.current.requester:
await ctx.message.add_reaction('โญ')
ctx.voice_state.skip()
elif voter.id not in ctx.voice_state.skip_votes:
ctx.voice_state.skip_votes.add(voter.id)
total_votes = len(ctx.voice_state.skip_votes)
if total_votes >= 3:
await ctx.message.add_reaction('โญ')
ctx.voice_state.skip()
else:
await ctx.send('Skip vote added, currently at **{}/3**'.format(total_votes))
else:
await ctx.send('```์ด๋ฏธ ํฌํํ์
จ์ต๋๋ค.```')
'''
@commands.command(name=command[6][0], aliases=command[6][1:])
async def _queue(self, ctx: commands.Context, *, page: int = 1):
if len(ctx.voice_state.songs) == 0:
return await ctx.send(':mute: ์ฌ์๋ชฉ๋ก์ด ์์ต๋๋ค.')
items_per_page = 10
pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue = ''
for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):
queue += '`{0}.` [**{1.source.title}**]({1.source.url})\n'.format(i + 1, song)
if ctx.voice_state.loop:
embed = discord.Embed(title = '๐ Now playing', description='**```fix\n{0.source.title}\n```**'.format(ctx.voice_state.current))
else:
embed = discord.Embed(title = 'Now playing', description='**```fix\n{0.source.title}\n```**'.format(ctx.voice_state.current))
embed.add_field(name ='\u200B\n**{} tracks:**\n'.format(len(ctx.voice_state.songs)), value = f"\u200B\n{queue}")
embed.set_thumbnail(url=ctx.voice_state.current.source.thumbnail)
embed.set_footer(text='Viewing page {}/{}'.format(page, pages))
await ctx.send(embed=embed)
@commands.command(name=command[11][0], aliases=command[11][1:])
async def _shuffle(self, ctx: commands.Context):
if len(ctx.voice_state.songs) == 0:
return await ctx.send(':mute: ์ฌ์๋ชฉ๋ก์ด ์์ต๋๋ค.')
ctx.voice_state.songs.shuffle()
result = await ctx.send('์
ํ ์๋ฃ!')
await result.add_reaction('๐')
@commands.command(name=command[10][0], aliases=command[10][1:])
async def _remove(self, ctx: commands.Context, index: int):
if len(ctx.voice_state.songs) == 0:
return await ctx.send(':mute: ์ฌ์๋ชฉ๋ก์ด ์์ต๋๋ค.')
remove_result = '`{0}.` [**{1.source.title}**] ์ญ์ ์๋ฃ!\n'.format(index, ctx.voice_state.songs[index - 1])
result = await ctx.send(remove_result)
ctx.voice_state.songs.remove(index - 1)
await result.add_reaction('โ
')
@commands.command(name=command[14][0], aliases=command[14][1:])
async def _loop(self, ctx: commands.Context):
if not ctx.voice_state.is_playing:
return await ctx.send(':mute: ํ์ฌ ์ฌ์์ค์ธ ์์
์ด ์์ต๋๋ค.')
# Inverse boolean value to loop and unloop.
ctx.voice_state.loop = not ctx.voice_state.loop
if ctx.voice_state.loop :
result = await ctx.send('๋ฐ๋ณต์ฌ์์ด ์ค์ ๋์์ต๋๋ค!')
else:
result = await ctx.send('๋ฐ๋ณต์ฌ์์ด ์ทจ์๋์์ต๋๋ค!')
await result.add_reaction('๐')
@commands.command(name=command[2][0], aliases=command[2][1:])
async def _play(self, ctx: commands.Context, *, search: str):
if not ctx.voice_state.voice:
await ctx.invoke(self._summon)
async with ctx.typing():
try:
source = await YTDLSource.create_source(self.bot, ctx, search, loop=self.bot.loop)
if not source:
return await ctx.send(f"๋
ธ๋ ์ฌ์/์์ฝ์ด ์ทจ์ ๋์์ต๋๋ค.")
except YTDLError as e:
await ctx.send('์๋ฌ๊ฐ ๋ฐ์ํ์ต๋๋ค : {}'.format(str(e)))
else:
song = Song(source)
await ctx.voice_state.songs.put(song)
await ctx.send('์ฌ์๋ชฉ๋ก ์ถ๊ฐ : {}'.format(str(source)))
@commands.command(name=command[15][0], aliases=command[15][1:])
async def _reserve(self, ctx: commands.Context, *, search: str):
if not ctx.voice_state.is_playing:
return await ctx.send(f":mute: ํ์ฌ ์ฌ์์ค์ธ ์์
์ด ์์ต๋๋ค. {command[2][0]} ๋ช
๋ น์ด๋ฅผ ํตํด ๋
ธ๋๋ฅผ ์์ฝํด์ฃผ์ธ์!")
async with ctx.typing():
try:
source = await YTDLSource.create_source(self.bot, ctx, search, loop=self.bot.loop)
if not source:
return await ctx.send(f"๋
ธ๋ ์ฌ์/์์ฝ์ด ์ทจ์ ๋์์ต๋๋ค.")
except YTDLError as e:
await ctx.send('์๋ฌ๊ฐ ๋ฐ์ํ์ต๋๋ค : {}'.format(str(e)))
else:
song = Song(source)
ctx.voice_state.songs.reserve(song)
await ctx.send('์ฌ์๋ชฉ๋ก ์ถ๊ฐ : {}'.format(str(source)))
@commands.command(name=command[13][0], aliases=command[13][1:])
async def clear_channel_(self, ctx: commands.Context, *, msg: int = 1):
try:
msg = int(msg)
except:
await ctx.send(f"```์ง์ฐ๊ณ ์ถ์ ์ค์๋ [์ซ์]๋ก ์
๋ ฅํด์ฃผ์ธ์!```")
await ctx.channel.purge(limit = msg)
@_summon.before_invoke
@_play.before_invoke
async def ensure_voice_state(self, ctx: commands.Context):
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandError('์์ฑ์ฑ๋์ ์ ์ ํ ์ฌ์ฉํด์ฃผ์ญ์์ค.')
if ctx.voice_client:
if ctx.voice_client.channel != ctx.author.voice.channel:
raise commands.CommandError('๋ด์ด ์ด๋ฏธ ์์ฑ์ฑ๋์ ์ ์ํด ์์ต๋๋ค.')
@commands.command(name=command[12][0], aliases=command[12][1:]) #๋์๋ง
async def menu_(self, ctx):
command_list = ''
command_list += '#์ธ์ค : ๋ด์ํ๊ฐ ์์ข์ ๋ ์ฐ์ธ์!\n' #!
command_list += ','.join(command[0]) + '\n' #!๋ค์ด๊ฐ์
command_list += ','.join(command[1]) + '\n' #!๋๊ฐ์
command_list += ','.join(command[2]) + ' [๊ฒ์์ด] or [url]\n' #!์ฌ์
command_list += ','.join(command[15]) + ' [๊ฒ์์ด] or [url]\n' #!์ฐ์ ์์ฝ
command_list += ','.join(command[3]) + '\n' #!์ผ์์ ์ง
command_list += ','.join(command[4]) + '\n' #!๋ค์์ฌ์
command_list += ','.join(command[5]) + ' (์ซ์)\n' #!์คํต
command_list += ','.join(command[6]) + ' ํน์ [๋ช
๋ น์ด] + [์ซ์]\n' #!๋ชฉ๋ก
command_list += ','.join(command[7]) + '\n' #!ํ์ฌ์ฌ์
command_list += ','.join(command[8]) + ' [์ซ์ 1~100]\n' #!๋ณผ๋ฅจ
command_list += ','.join(command[9]) + '\n' #!์ ์ง
command_list += ','.join(command[10]) + '\n' #!์ญ์
command_list += ','.join(command[11]) + '\n' #!์๊ธฐ
command_list += ','.join(command[14]) + '\n' #!
command_list += ','.join(command[13]) + ' [์ซ์]\n' #!์ฑํ
์ฒญ์
embed = discord.Embed(
title = "----- ๋ช
๋ น์ด -----",
description= '```' + command_list + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
################ ์์ฑํ์ผ ์์ฑ ํ ์ฌ์ ################
@commands.command(name="#์ธ์ค")
async def playText_(self, ctx):
#msg = ctx.message.content[len(ctx.invoked_with)+1:]
#sayMessage = msg
#await MakeSound('๋ฎค์ง๋ด์ด ๋ง์ด ์ํ์. ์ ์ ํ ์ฌ์ฉํด์ฃผ์ธ์.', './say' + str(ctx.guild.id))
await ctx.send("```๋ฎค์ง๋ด์ด ๋ง์ด ์ํ์. ์ ์ ํ ์ฌ์ฉํด์ฃผ์ธ์.```", tts=False)
if not ctx.voice_state.voice:
await ctx.invoke(self._summon)
if ctx.voice_state.is_playing:
ctx.voice_state.voice.stop()
#await PlaySound(ctx.voice_state.voice, './say' + str(ctx.guild.id) + '.wav')
await ctx.voice_state.stop()
del self.voice_states[ctx.guild.id]
bot = commands.Bot('', help_command = None, description='ํด์ฑ๋ฎค์ง๋ด')
bot.add_cog(Music(bot))
@bot.event
async def on_ready():
print("Logged in as ") #ํ๋ฉด์ ๋ด์ ์์ด๋, ๋๋ค์์ด ์ถ๋ ฅ๋ฉ๋๋ค.
print(bot.user.name)
print(bot.user.id)
print("===========")
await bot.change_presence(status=discord.Status.dnd, activity=discord.Game(name=command[12][0], type=1), afk = False)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, CommandNotFound):
return
elif isinstance(error, discord.ext.commands.MissingRequiredArgument):
return
raise error
bot.run(access_token)
|
the-stack_106_29159 | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neu.tts.hparams import HParams
hparams = HParams(
# dataset parameters
data_dir="./data/LJSpeech-1.1/", # directory to the data
# directory to save all precomputed inputs
save_data_dir="./data/LJSpeech-1.1/tacotron",
# output variables in dataloader
out_variables=["mel", "linear", "text"],
n_frames=162, # number of frames in mel spectrogram
text_len=188, # maximum text length
sr=20000, # sampling rate used to read audios
# length of windowed signal after padding with zeros
n_fft=2048,
n_mels=80, # number of mel filters
# audio samples between adjacent STFT columns
hop_length=250,
win_length=1000, # window length
ref_db=20, # reference decibel
max_db=100, # maximum decibel
mel_fmin=0.0, # minimum mel bank
mel_fmax=None, # maximum mel bank
preemphasis=0.97, # preemphasis factor
# dictionary
vocab="~ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;?,_ ",
r=5, # number of frames generated on each timestep
n_iter=60, # number of iterations for Griffin-Lim
power=1.5, # power used for Griffin-Lim
# number of dimensions used for character embedding
symbols_embedding_dim=256,
prenet_channels=(256, 128), # number channels for prenet
# number of dimensions used for encoder embedding
encoder_embedding_dim=256,
attention_dim=256, # dimension of attention
# number of dimensions for decoder embedding
postnet_embedding_dim=256,
batch_size=32, # batch size
epoch=1001, # number of epochs
# number of iterations before printing to log file
print_frequency=50,
weight_decay=0.0, # weight decay
max_norm=1.0, # maximum norm used in clip_grad_by_norm
alpha=0.001, # learning rate
warmup=4000, # number of iterations for warmup
epochs_per_checkpoint=50, # number of epochs for each checkpoint
output_path="./log/tacotron/", # directory to save results
seed=123456, # random seed
)
|
the-stack_106_29160 | import numpy as np
import os
def load_bin(fname, start_time, N_CHAN, chunk_len, d_type='float32'):
"""Load Raw data
"""
with open(fname, 'rb') as fin:
if d_type=='float32':
fin.seek(start_time * 4 * N_CHAN, os.SEEK_SET)
elif d_type =='int16':
fin.seek(start_time * 2 * N_CHAN, os.SEEK_SET)
else:
print ("uknown data type")
data = np.fromfile(
fin,
dtype=d_type,
count=(chunk_len * N_CHAN)).reshape(chunk_len,N_CHAN)#.astype(np.int32)
return data
def binary_reader_waveforms(filename, n_channels, n_times, spikes, channels=None, data_type='float32'):
"""Reader for loading raw binaries
Args:
standardized_filename: name of file contianing the raw binary
n_channels: number of channels in the raw binary recording
n_times: length of waveform
spikes: 1D array containing spike times in sample rate of raw data
channels: load specific channels only
data_type: float32 for standardized data
"""
if channels is None:
wfs = np.zeros((spikes.shape[0], n_times, n_channels), data_type)
channels = np.arange(n_channels)
else:
wfs = np.zeros((spikes.shape[0], n_times, len(channels)), data_type)
with open(filename, "rb") as fin:
for ctr,s in enumerate(spikes):
# index into binary file: time steps * 4 4byte floats * n_channels
fin.seek(s * 4 * n_channels, os.SEEK_SET)
wfs[ctr] = np.fromfile(
fin,
dtype='float32',
count=(n_times * n_channels)).reshape(n_times, n_channels)[:,channels]
fin.close()
return wfs
def load_waveform(voltage_file, spike_index_all, spike_channel, load_channels=None, n_channels=49, n_times=61):
"""Load data
Args:
voltage_file: standardized voltages file .bin
spike_index_all: [n_spikes, 2] each row (time, channel)
spike_channel: which channel on the electrode array
"""
# select detected spikes on the desired channel
spike_index = spike_index_all[spike_index_all[:,1]==spike_channel]
# spike times in sample time; may need to shift the template a bit
spikes = spike_index[:,0] - 30
# read waveforms
waveforms = binary_reader_waveforms(voltage_file, n_channels, n_times, spikes, load_channels)
# [n_spikes, n_timesteps, n_channels or len(load_channels)]
return waveforms
def load_waveform_by_spike_time(voltage_file, spike_times, time_offset=-30, load_channels=None,
n_channels=49, n_times=61):
"""Load data by spike time
Args:
voltage_file: standardized voltages file .bin
spike_times: [n_spikes,]
time_offset: time offset for spike peak
load_channels: which channels on the electrode array to load
"""
# spike times in sample time; may need to shift the template a bit
spikes = spike_times + time_offset
# read waveforms
waveforms = binary_reader_waveforms(voltage_file, n_channels, n_times, spikes, load_channels)
# [n_spikes, n_timesteps, n_channels or len(load_channels)]
return waveforms
def add_data_to_npz(npz_path, new_data):
assert(isinstance(new_data, dict))
npz = np.load(npz_path)
data = {name: npz[name] for name in npz.files}
for k, v in new_data.items():
data[k] = v
np.savez_compressed(npz_path, **new_data) |
the-stack_106_29162 | #!/usr/bin/env/python
"""
manage_columns_filter.py -- add needed columns, remove unused columns
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, improve_jobcode_description
data_in = read_csv_fp(sys.stdin)
var_names = data_in[data_in.keys()[1]].keys() # create a list of var_names from the first row
print >>sys.stderr, "Columns in", var_names
data_out = {}
for row, data in data_in.items():
new_data =dict(data)
# Add these columns
new_data['remove'] = ''
new_data['uri'] = ''
new_data['title'] = improve_jobcode_description(new_data['JOBCODE_DESCRIPTION'])
new_data['hr_title'] = new_data['JOBCODE_DESCRIPTION']
# Delete these columns
del new_data['JOBCODE']
del new_data['HR_POSITION']
del new_data['JOBCODE_DESCRIPTION']
data_out[row] = new_data
var_names = data_out[data_out.keys()[1]].keys() # create a list of var_names from the first row
print >>sys.stderr, "Columns out", var_names
write_csv_fp(sys.stdout, data_out)
|
the-stack_106_29163 | # Displays average item active price and sends as text message
import json
import requests
import boto3
from config import key, number
from colorama import init, Fore
init()
def calculate_average(x):
return sum(x) / float(len(x))
searchTerm = input ("\nEnter Search Term: ")
condition = ("3000")
minPrice =("200")
maxPrice =("1200")
# minPrice = input("Enter Minimum Price: ") # Enter %00 to hard code no min price
# maxPrice = input("Enter Maximum Price: ") # Enter %00 to hard code no max price
# print ("\n1 = New \n2 = Used \n3 = For Parts or Not Working")
# condition = input("\nSelect Condition: ")
# if condition == "1":
# condition = "1000"
# elif condition == "2":
# condition = "3000"
# elif condition == "3":
# condition = "7000"
# else:
# print ("You did not select a valid category")
active_url = ("http://svcs.ebay.com/services/search/FindingService/v1\
?OPERATION-NAME=findItemsByKeywords\
&SERVICE-VERSION=1.7.0\
&SECURITY-APPNAME=" + key +"&RESPONSE-DATA-FORMAT=JSON\
&REST-PAYLOAD\
&itemFilter(0).name=Condition\
&itemFilter(0).value=" + condition + "\
&itemFilter(1).name=MinPrice\
&itemFilter(1).value=" + minPrice +"&itemFilter(1).paramName=Currency\
&itemFilter(1).paramValue=USD\
&itemFilter(2).name=MaxPrice\
&itemFilter(2).value=" + maxPrice +"&itemFilter(2).paramName=Currency\
&itemFilter(2).paramValue=USD\
&itemFilter(3).name=ListingType\
&itemFilter(3).value(0)=AuctionWithBIN\
&itemFilter(3).value(1)=FixedPrice\
&paginationInput.entriesPerPage=10\
&sortOrder=PricePlusShippingLowest\
&keywords=" + searchTerm)
results = requests.get(active_url)
raw = results.json()
new_list = []
print (Fore.YELLOW + "\nActive Listings: \n" + Fore.GREEN)
for item in (raw["findItemsByKeywordsResponse"][0]["searchResult"][0]["item"]):
price = item["sellingStatus"][0]["convertedCurrentPrice"][0]['__value__']
title = item["title"][0]
url = item["viewItemURL"][0]
print (title + " $" + price + " \nLink: " + url + "\n")
int_price = int(float(price))
new_list.append(int_price)
print (Fore.YELLOW + "Average Active Price: \n" + Fore.GREEN)
average_active_price = (calculate_average(new_list))
print (average_active_price)
# Send Text Message via AWS SNS
aws_client = boto3.client("sns")
sns_phone_number = number
sns_message = "The average price of " + searchTerm + " is $" + str(average_active_price)
aws_response = aws_client.publish(
PhoneNumber=sns_phone_number,
Message=sns_message,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.