filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_31720 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. by Daniel Barrejon, UC3M. +
# All rights reserved. This file is part of the Shi-VAE, and is released under the +
# "MIT License Agreement". Please see the LICENSE file that should have been included +
# as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import sys
import numpy as np
import torch
from lib import utils, datasets as dset
from lib.aux import set_device
from lib.process_args import get_args, save_args
from lib.scalers import HeterogeneousScaler
# CPU or GPU Run
args = get_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = set_device()
print("DEVICE: {}".format(device))
dataset = "physionet_burst"
args.dataset = dataset
args.train = -1
# Shi-VAE
args.model_name = '{}_{}_{}z_{}h_{}s'.format(args.model, args.dataset, args.z_dim, args.h_dim, args.K)
args.result_dir = os.path.join(args.ckpt_dir, args.experiment, args.model_name)
args.ckpt_file = os.path.join(args.result_dir, args.model_name + ".pth")
args.best_ckpt_file = os.path.join(args.result_dir, args.model_name + "_best.pth")
# Restore training
if (args.restore == 1):
if (not os.path.isfile(args.ckpt_file)):
print('Model not found at {}'.format(args.ckpt_file))
sys.exit()
model_dict = torch.load(args.ckpt_file)
n = args.n_epochs
# Restore args from training args.
args = model_dict['params']
args.n_epochs = n
args.restore = 1
# Print Arguments
print('ARGUMENTS')
for arg in vars(args):
print('{} = {}'.format(arg, getattr(args, arg)))
# Create checkpoint directory
if (not os.path.exists(args.ckpt_dir)):
os.makedirs(args.ckpt_dir)
# Create results directory
if (not os.path.exists(args.result_dir)):
os.makedirs(args.result_dir)
# ============= LOAD DATA ============= #
data = np.load(os.path.join(args.data_dir, dataset, dataset + ".npz"))
types_csv = os.path.join(args.data_dir, dataset, "data_types_real.csv")
types_list = utils.read_csv_types(types_csv)
# Train
x_train = data["x_train_miss"].astype(np.float32)
x_train_full = data["x_train_full"].astype(np.float32)
m_train = data["m_train_miss"].astype(bool)
m_train_artificial = data["m_train_artificial"].astype(bool)
y_train = data["y_train"]
# Val
x_val = data["x_val_miss"].astype(np.float32)
x_val_full = data["x_val_full"].astype(np.float32)
m_val = data["m_val_miss"].astype(bool)
m_val_artificial = data["m_val_artificial"].astype(bool)
y_val = data["y_val"]
# Test
x_test = data["x_test_miss"].astype(np.float32)
x_test_full = data["x_test_full"].astype(np.float32)
m_test = data["m_test_miss"].astype(bool)
m_test_artificial = data["m_test_artificial"].astype(bool)
y_test = data["y_test"]
# ===== Scaler ===== #
scaler = HeterogeneousScaler(types_list)
scaler.fit(x_train, m_train)
# ===== Datasets ===== #
data_train = dset.HeterDataset(x_train, m_train, x_train_full, m_train_artificial, types_list=types_list)
data_valid = dset.HeterDataset(x_val, m_val, x_val_full, m_val_artificial, types_list=types_list)
data_test = dset.HeterDataset(x_test, m_test, x_test_full, m_test_artificial, types_list=types_list)
# ===== DataLoaders ===== #
train_loader = torch.utils.data.DataLoader(data_train, batch_size=64, shuffle=True,
collate_fn=dset.standard_collate)
valid_loader = torch.utils.data.DataLoader(data_valid, batch_size=64, shuffle=False,
collate_fn=dset.standard_collate)
test_loader = torch.utils.data.DataLoader(data_test, batch_size=64, shuffle=False,
collate_fn=dset.standard_collate)
# ============= MODEL ============= #
from models.trainers import Trainer
from models.shivae import ShiVAE
# Shi-VAE
model = ShiVAE(h_dim=args.h_dim, z_dim=args.z_dim, s_dim=args.K, types_list=types_list,
n_layers=1,
learn_std=args.learn_std)
optimizer = torch.optim.Adam(model.parameters(), lr=args.l_rate)
total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Trainable params: {}'.format(total_params))
# ============= TRAIN ============= #
if args.train == 1 or args.train == -1:
trainer = Trainer(model, optimizer, args, scaler=scaler)
# Train from pretrained model
if (args.restore == 1 and os.path.isfile(args.ckpt_file)):
print('Model loaded at {}'.format(args.ckpt_file))
trainer.load_checkpoint(model_dict)
print('Training points: {}'.format(len(train_loader.dataset)))
trainer.train(train_loader, test_loader)
# ============= RESULTS ============= #
if args.train == 0 or args.train == -1:
from lib.result import Result
result_dir = os.path.dirname(args.ckpt_file)
print('Save images in: {}'.format(result_dir))
# Load pretrained model
model_dict = torch.load(args.best_ckpt_file)
model.load_state_dict(model_dict['state_dict'])
# Create test loader
test_loader = torch.utils.data.DataLoader(data_test, batch_size=64, shuffle=False,
collate_fn=dset.standard_collate)
# ================== #
# Reconstruction and generation
# ================== #
result = Result(test_loader, scaler, model, result_dir, args)
model_name = "ShiVAE"
result.avg_error(model_name=model_name)
result.reconstruction(types_list=types_list)
result.generation(args.result_imgs, types_list=types_list)
# ===== Save args ===== #
args_path = os.path.join(args.result_dir, args.model_name) + args.model_name + '.json'
save_args(args, args_path)
|
the-stack_106_31722 | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.remoteobject.transport.message_dispatcher\
import MessageDispatcher
from org.o3project.odenos.remoteobject.transport.remote_message_transport\
import RemoteMessageTransport
from org.o3project.odenos.remoteobject.object_property import ObjectProperty
import unittest
from mock import Mock, MagicMock, patch
from contextlib import nested
class RemoteMessageTransportTest(unittest.TestCase):
Dispatcher = MagicMock()
value01 = ""
value02 = ""
result01 = ""
result02 = ""
def setUp(self):
self.target = RemoteMessageTransport(
"RemoteMessageTransport",
self.Dispatcher)
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target.object_id, "RemoteMessageTransport")
self.assertEqual(self.target.seqnoGenerator.counter, 0)
self.assertEqual(self.target.responseMap, {})
def test_AtomicInteger_constructor(self):
with patch("threading.RLock") as Mock_RLock:
Mock_RLock.return_value = "Mock_RLock"
self.target = self.target.AtomicInteger(5)
self.assertEqual(self.target.counter, 5)
self.assertEqual(self.target.lock, "Mock_RLock")
def test_AtomicInteger_increase(self):
self.target = self.target.AtomicInteger(6)
self.target.increase()
self.assertEqual(self.target.counter, 7)
def test_SynchronousQueue_constructor(self):
with nested(patch("threading.RLock"),
patch("Queue.Queue")) as (Mock_RLock,
Mock_Queue):
Mock_Queue.return_value = "Mock_Queue"
Mock_RLock.return_value = "Mock_RLock"
self.target = self.target.SynchronousQueue()
self.assertEqual(self.target.q, "Mock_Queue")
self.assertEqual(self.target.put_lock, "Mock_RLock")
def test_SynchronousQueue_get(self):
self.target = self.target.SynchronousQueue()
self.value01 = ObjectProperty("object_type", "object_id")
with self.target.put_lock:
self.target.q.put(self.value01, block=True)
self.result = self.target.get()
self.assertEqual(self.result, self.value01)
def test_SynchronousQueue_put(self):
self.target = self.target.SynchronousQueue()
with patch("Queue.Queue.join") as Mock_Queue_join:
self.value01 = ObjectProperty("object_type", "object_id")
self.result = self.target.put(self.value01)
self.assertEqual(self.target.get(), self.value01)
def test_send_request_message(self):
with patch("org.o3project.odenos.remoteobject.transport." +
"remote_message_transport.RemoteMessageTransport." +
"SynchronousQueue.get") as q_get:
self.value01 = ObjectProperty("object_type", "object_id")
q_get.return_value = "get_item"
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(
return_value=False)
self.result01 = self.target.send_request_message(self.value01)
self.assertEqual(self.result01, "get_item")
def test_send_request_message_response_None(self):
with patch("org.o3project.odenos.remoteobject.transport." +
"remote_message_transport.RemoteMessageTransport." +
"SynchronousQueue.get") as q_get:
self.value01 = ObjectProperty("object_type", "object_id")
q_get.return_value = None
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(
return_value=False)
try:
self.result = self.target.send_request_message(self.value01)
except:
pass
def test_addRequest_success(self):
self.value01 = ObjectProperty("object_type", "object_id")
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(return_value=False)
self.result01 = self.target.addRequet(self.value01)
self.assertEqual(len(self.target.responseMap), 1)
self.assertEqual(self.target.responseMap[1], self.result01)
def test_addRequest_success2(self):
self.value01 = ObjectProperty("object_type", "object_id")
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(return_value=True)
self.result01 = self.target.addRequet(self.value01, "object_id")
self.assertEqual(len(self.target.responseMap), 1)
self.assertEqual(self.target.responseMap[1], self.result01)
def test_addRequest_error(self):
with nested(
patch("org.o3project.odenos.remoteobject.transport."
"message_dispatcher.MessageDispatcher."
"pushPublishQueue"),
patch("logging.exception")) as (Mock_pushPublishQueue,
logging_exception):
self.value01 = ObjectProperty("object_type", "object_id")
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(
return_value=False)
self.target.dispatcher.get_source_dispatcher_id.side_effect =\
Exception()
try:
self.result01 = self.target.addRequet(self.value01)
except:
self.assertEqual(logging_exception.call_count, 1)
self.assertEqual(len(self.target.responseMap), 0)
def test_addRequest_error2(self):
with nested(
patch("logging.warn")) as (logging_warn):
self.value01 = ObjectProperty("object_type", "object_id")
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(return_value=True)
self.result01 = self.target.addRequet(self.value01)
self.assertEqual(len(self.target.responseMap), 1)
self.assertEqual(self.target.responseMap[1], self.result01)
def test_signalResponse(self):
with patch("org.o3project.odenos.remoteobject.transport." +
"remote_message_transport.RemoteMessageTransport." +
"SynchronousQueue.put") as q_put:
self.value01 = ObjectProperty("object_type01", "object_id01")
self.value02 = ObjectProperty("object_type02", "object_id02")
self.target.dispatcher.get_source_dispatcher_id = Mock(
return_value="dispatcher_id")
self.target.dispatcher.monitor_enabled = Mock(
return_value=False)
self.result01 = self.target.addRequet(self.value01)
self.result02 = self.target.addRequet(self.value02)
self.target.signalResponse(1, self.value01)
self.assertEqual(len(self.target.responseMap), 1)
self.assertEqual(self.target.responseMap[2], self.result02)
def test_close(self):
self.target.close()
if __name__ == "__main__":
unittest.main()
|
the-stack_106_31723 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.common.pytest_helper import assertEqual, assertAlmostEqual
from flsim.utils.timing.training_duration_distribution import (
PerUserUniformDurationDistribution,
PerUserUniformDurationDistributionConfig,
PerUserHalfNormalDurationDistribution,
PerUserHalfNormalDurationDistributionConfig,
DurationDistributionFromListConfig,
DurationDistributionFromList,
DurationInfo,
)
from flsim.utils.timing.training_time_estimator import (
get_training_time,
AsyncTrainingTimeEstimator,
SyncTrainingTimeEstimator,
)
from omegaconf import OmegaConf
class TestTrainingTimeEstimator:
def test_time_from_list(self) -> None:
"""
Test training time from list
Assuming UPR = 2
Sync would be the sum of slowest user between rounds
round 1
user_1: duration = 4
user_2: duration = 3
round 2
user_3: duration = 2
user_4: duration = 1
total = 4 + 2 = 6
Async would be the
user_1: duration = 4, start_time = 1
user_2: duration = 3, start_time = 1
user_3: duration = 2, start_time = 2
user_4: duration = 1, start_time = 3
users training @ time 1: user 1, user 2
users training @ time 3: user 2, user 3
users training @ time 4: user 3, user 4
users training @ time 5: user 4 finishes training
"""
training_events = [
DurationInfo(duration=4),
DurationInfo(duration=3),
DurationInfo(duration=2),
DurationInfo(duration=1),
]
async_start_times = [1, 1, 2, 3]
sync_training_dist = DurationDistributionFromList(
**OmegaConf.structured(
DurationDistributionFromListConfig(training_events=training_events)
)
)
async_training_dist = DurationDistributionFromList(
**OmegaConf.structured(
DurationDistributionFromListConfig(training_events=training_events)
)
)
num_users = len(training_events)
epochs = 1
users_per_round = 2
sync_estimator = SyncTrainingTimeEstimator(
total_users=len(training_events),
users_per_round=users_per_round,
epochs=epochs,
training_dist=sync_training_dist,
)
async_estimator = AsyncTrainingTimeEstimator(
total_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=async_training_dist,
start_times=async_start_times,
)
async_time = async_estimator.training_time()
sync_time = sync_estimator.training_time()
assertEqual(sync_time, 6)
assertEqual(async_time, 5)
def test_uniform_training_time(self) -> None:
"""
Test uniform training time
Sync and Async should have the same training time if
UPR = 1 and duration_min close to duration_mean
"""
torch.manual_seed(0)
num_users = 1000
epochs = 1
users_per_round = 1
duration_mean = 1.00
duration_min = 0.99999
training_dist = PerUserUniformDurationDistribution(
**OmegaConf.structured(
PerUserUniformDurationDistributionConfig(
training_duration_mean=duration_mean,
training_duration_min=duration_min,
)
)
)
sync_time, async_time = get_training_time(
num_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=training_dist,
)
assertAlmostEqual(sync_time, async_time, delta=1e-3)
def test_per_user_half_normal(self) -> None:
"""
Test half normal training time
Sync and Async should have the following training time
sync_training_time = async_training_time = num_users * duration_min
if UPR = 1 and duraton_std is close to 0
"""
torch.manual_seed(0)
num_users = 1000
epochs = 1
users_per_round = 1
duration_std = 1e-6
duration_min = 1.0
training_dist = PerUserHalfNormalDurationDistribution(
**OmegaConf.structured(
PerUserHalfNormalDurationDistributionConfig(
training_duration_sd=duration_std,
training_duration_min=duration_min,
)
)
)
sync_time, async_time = get_training_time(
num_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=training_dist,
)
assertAlmostEqual(sync_time, async_time, delta=1e-3)
assertAlmostEqual(sync_time, num_users * duration_min, delta=1e-3)
assertAlmostEqual(async_time, num_users * duration_min, delta=1e-3)
|
the-stack_106_31725 | """OPP WS2812 wing."""
import logging
from mpf.core.platform_batch_light_system import PlatformBatchLight
from mpf.platforms.opp.opp_rs232_intf import OppRs232Intf
class OPPNeopixelCard:
"""OPP Neopixel/WS2812 card."""
__slots__ = ["log", "chain_serial", "platform", "addr", "card_num", "num_pixels", "num_color_entries",
"color_table_dict"]
def __init__(self, chain_serial, addr, neo_card_dict, platform):
"""Initialise OPP Neopixel/WS2812 card."""
self.log = logging.getLogger('OPPNeopixel')
self.chain_serial = chain_serial
self.addr = addr
self.platform = platform
self.card_num = str(addr - ord(OppRs232Intf.CARD_ID_GEN2_CARD))
self.num_pixels = 0
self.num_color_entries = 0
self.color_table_dict = dict()
neo_card_dict[chain_serial + '-' + self.card_num] = self
self.log.debug("Creating OPP Neopixel card at hardware address: 0x%02x", addr)
class OPPLightChannel(PlatformBatchLight):
"""A channel of a WS2812 LED."""
__slots__ = ["chain_serial", "addr", "pixel_num"]
# pylint: disable-msg=too-many-arguments
def __init__(self, chain_serial, addr, pixel_num, light_system):
"""Initialise led channel."""
super().__init__("{}-{}-{}".format(chain_serial, addr, pixel_num), light_system)
self.pixel_num = pixel_num
self.addr = addr
self.chain_serial = chain_serial
def get_max_fade_ms(self):
"""Return largest number which fits two bytes."""
return 65535
def get_board_name(self):
"""Return OPP chain and addr."""
return "OPP LED {} on Chain {} Board {}".format(self.pixel_num, self.chain_serial, self.addr)
|
the-stack_106_31726 | from dataclasses import replace
from typing import Any, Iterator, List
from unittest.mock import patch
import pytest
import black
from tests.util import (
DEFAULT_MODE,
PY36_VERSIONS,
THIS_DIR,
assert_format,
dump_to_stderr,
read_data,
)
SIMPLE_CASES: List[str] = [
"attribute_access_on_number_literals",
"beginning_backslash",
"bracketmatch",
"class_blank_parentheses",
"class_methods_new_line",
"collections",
"comments",
"comments2",
"comments3",
"comments4",
"comments5",
"comments6",
"comments_non_breaking_space",
"comment_after_escaped_newline",
"composition",
"composition_no_trailing_comma",
"docstring",
"empty_lines",
"expression",
"fmtonoff",
"fmtonoff2",
"fmtonoff3",
"fmtonoff4",
"fmtskip",
"fmtskip2",
"fmtskip3",
"fmtskip4",
"fmtskip5",
"fmtskip6",
"fstring",
"function",
"function2",
"function_trailing_comma",
"import_spacing",
"power_op_spacing",
"remove_parens",
"slices",
"string_prefixes",
"torture",
"trailing_comma_optional_parens1",
"trailing_comma_optional_parens2",
"trailing_comma_optional_parens3",
"tricky_unicode_symbols",
"tupleassign",
]
PY310_CASES: List[str] = [
"starred_for_target",
"pattern_matching_simple",
"pattern_matching_complex",
"pattern_matching_extras",
"pattern_matching_style",
"pattern_matching_generic",
"parenthesized_context_managers",
]
PREVIEW_CASES: List[str] = [
# string processing
"cantfit",
"comments7",
"long_strings",
"long_strings__edge_case",
"long_strings__regression",
"percent_precedence",
]
SOURCES: List[str] = [
"src/black/__init__.py",
"src/black/__main__.py",
"src/black/brackets.py",
"src/black/cache.py",
"src/black/comments.py",
"src/black/concurrency.py",
"src/black/const.py",
"src/black/debug.py",
"src/black/files.py",
"src/black/linegen.py",
"src/black/lines.py",
"src/black/mode.py",
"src/black/nodes.py",
"src/black/numerics.py",
"src/black/output.py",
"src/black/parsing.py",
"src/black/report.py",
"src/black/rusty.py",
"src/black/strings.py",
"src/black/trans.py",
"src/blackd/__init__.py",
"src/black_primer/cli.py",
"src/black_primer/lib.py",
"src/blib2to3/pygram.py",
"src/blib2to3/pytree.py",
"src/blib2to3/pgen2/conv.py",
"src/blib2to3/pgen2/driver.py",
"src/blib2to3/pgen2/grammar.py",
"src/blib2to3/pgen2/literals.py",
"src/blib2to3/pgen2/parse.py",
"src/blib2to3/pgen2/pgen.py",
"src/blib2to3/pgen2/tokenize.py",
"src/blib2to3/pgen2/token.py",
"setup.py",
"tests/test_black.py",
"tests/test_blackd.py",
"tests/test_format.py",
"tests/test_primer.py",
"tests/optional.py",
"tests/util.py",
"tests/conftest.py",
]
@pytest.fixture(autouse=True)
def patch_dump_to_file(request: Any) -> Iterator[None]:
with patch("black.dump_to_file", dump_to_stderr):
yield
def check_file(filename: str, mode: black.Mode, *, data: bool = True) -> None:
source, expected = read_data(filename, data=data)
assert_format(source, expected, mode, fast=False)
@pytest.mark.parametrize("filename", SIMPLE_CASES)
def test_simple_format(filename: str) -> None:
check_file(filename, DEFAULT_MODE)
@pytest.mark.parametrize("filename", PREVIEW_CASES)
def test_preview_format(filename: str) -> None:
check_file(filename, black.Mode(preview=True))
@pytest.mark.parametrize("filename", SOURCES)
def test_source_is_formatted(filename: str) -> None:
path = THIS_DIR.parent / filename
check_file(str(path), DEFAULT_MODE, data=False)
# =============== #
# Complex cases
# ============= #
def test_empty() -> None:
source = expected = ""
assert_format(source, expected)
def test_pep_572() -> None:
source, expected = read_data("pep_572")
assert_format(source, expected, minimum_version=(3, 8))
def test_pep_572_remove_parens() -> None:
source, expected = read_data("pep_572_remove_parens")
assert_format(source, expected, minimum_version=(3, 8))
def test_pep_572_do_not_remove_parens() -> None:
source, expected = read_data("pep_572_do_not_remove_parens")
# the AST safety checks will fail, but that's expected, just make sure no
# parentheses are touched
assert_format(source, expected, fast=True)
@pytest.mark.parametrize("major, minor", [(3, 9), (3, 10)])
def test_pep_572_newer_syntax(major: int, minor: int) -> None:
source, expected = read_data(f"pep_572_py{major}{minor}")
assert_format(source, expected, minimum_version=(major, minor))
def test_pep_570() -> None:
source, expected = read_data("pep_570")
assert_format(source, expected, minimum_version=(3, 8))
@pytest.mark.parametrize("filename", PY310_CASES)
def test_python_310(filename: str) -> None:
source, expected = read_data(filename)
mode = black.Mode(target_versions={black.TargetVersion.PY310})
assert_format(source, expected, mode, minimum_version=(3, 10))
def test_python_310_without_target_version() -> None:
source, expected = read_data("pattern_matching_simple")
mode = black.Mode()
assert_format(source, expected, mode, minimum_version=(3, 10))
def test_patma_invalid() -> None:
source, expected = read_data("pattern_matching_invalid")
mode = black.Mode(target_versions={black.TargetVersion.PY310})
with pytest.raises(black.parsing.InvalidInput) as exc_info:
assert_format(source, expected, mode, minimum_version=(3, 10))
exc_info.match("Cannot parse: 10:11")
def test_python_2_hint() -> None:
with pytest.raises(black.parsing.InvalidInput) as exc_info:
assert_format("print 'daylily'", "print 'daylily'")
exc_info.match(black.parsing.PY2_HINT)
def test_docstring_no_string_normalization() -> None:
"""Like test_docstring but with string normalization off."""
source, expected = read_data("docstring_no_string_normalization")
mode = replace(DEFAULT_MODE, string_normalization=False)
assert_format(source, expected, mode)
def test_long_strings_flag_disabled() -> None:
"""Tests for turning off the string processing logic."""
source, expected = read_data("long_strings_flag_disabled")
mode = replace(DEFAULT_MODE, experimental_string_processing=False)
assert_format(source, expected, mode)
def test_numeric_literals() -> None:
source, expected = read_data("numeric_literals")
mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
assert_format(source, expected, mode)
def test_numeric_literals_ignoring_underscores() -> None:
source, expected = read_data("numeric_literals_skip_underscores")
mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
assert_format(source, expected, mode)
def test_stub() -> None:
mode = replace(DEFAULT_MODE, is_pyi=True)
source, expected = read_data("stub.pyi")
assert_format(source, expected, mode)
def test_python38() -> None:
source, expected = read_data("python38")
assert_format(source, expected, minimum_version=(3, 8))
def test_python39() -> None:
source, expected = read_data("python39")
assert_format(source, expected, minimum_version=(3, 9))
def test_power_op_newline() -> None:
# requires line_length=0
source, expected = read_data("power_op_newline")
assert_format(source, expected, mode=black.Mode(line_length=0))
|
the-stack_106_31728 | from discord.ext import commands
import asyncio
from cogs.utils import twitconn
from cogs.utils import checks
from discord.errors import Forbidden, InvalidArgument
import json, os, twitutils, linkutils, discordutils
class Streams:
bot = None
def __init__(self, bot):
self.bot = bot
self.loop = None
self.destinations = None
path = os.path.join(os.getcwd(), 'files', 'tweets.json')
with open(path) as f:
self.destinations = json.load(f)
stalk = self.get_stalks()
twitconn.init_stream(stalk)
@bot.event
async def on_ready():
print('Ready')
await self.stream()
@bot.event
async def on_resumed():
print('Resumed')
await self.reboot_stream()
async def stream(self):
if self.loop is None:
await self.start()
else:
await self.reboot_stream()
async def start(self):
print('Now stalking')
self.loop = asyncio.get_event_loop()
self.loop.create_task(self.tweet_retriever())
async def end(self):
print('Ending stalking')
self.stop_loop = True
self.loop.stop()
self.loop = None
async def tweet_retriever(self):
print('kek')
await self.bot.wait_until_ready()
self.stop_loop = False
while not self.stop_loop:
if not twitconn.poster.running:
print('Disconnected')
await self.reboot_stream()
print('Reconnected')
await asyncio.sleep(20)
continue
statuses = twitconn.stream_new_tweets()
while len(statuses) > 0:
fstatus = statuses.pop(0)
id = str(fstatus.user.id)
#status = twitconn.encode_status(fstatus)
status = discordutils.encode_status(fstatus)
targets = self.destinations['destinations']
try:
channels = targets[id]
except KeyError:
continue
if channels == None:
continue
for channel in channels:
if channel in self.destinations['blacklist']:
continue
try:
send = self.bot.get_channel(channel)
await self.bot.send_message(send, embed=status)
except Forbidden as e:
print(send.name)
print(e)
except InvalidArgument as e:
print(send)
print(e)
await asyncio.sleep(5)
@commands.command(hidden=True)
@checks.is_owner()
async def reboot(self):
print('command????')
await self.bot.say('Attempting to restart stream!')
await self.kill_stream()
await self.bot.say('Stream killed!')
await self.bot.say('Restarting stream...')
await asyncio.sleep(60)
await self.restart_stream()
await self.bot.say('Restart successful...?')
@commands.command(hidden=True)
@checks.is_owner()
async def kill(self):
await self.bot.say('Killing stream...')
await self.kill_stream()
await self.bot.say('Stream killed!')
@commands.command(hidden=True)
@checks.is_owner()
async def restart(self):
await self.bot.say('Restarting stream...')
await self.restart_stream()
await self.bot.say('Stream started!')
@commands.group(hidden=True, pass_context=True, invoke_without_command=True)
@checks.is_owner()
async def stalk(self, ctx, id):
channel = ctx.message.channel.id
user = twitutils.get_user(twitconn.api_twitter, id)
#user = twitconn.get_user(id)
if self.add_channel(user, channel):
await self.bot.say('Added user {} to channel {} stalk queue!'.format(user.screen_name, ctx.message.channel.name))
else:
await self.bot.say('Added user {} to channel {} unfollow queue!'.format(user.screen_name, ctx.message.channel.name))
@stalk.command(name='list', pass_context=True, hidden=True)
async def slist(self, ctx):
channel = ctx.message.channel.id
stalks = []
for key in self.get_stalks():
channels = self.destinations['destinations'][key]
if channel in channels:
stalks.append(key)
if len(stalks):
await self.bot.say('Stalked twitter accounts on this channel: ' + str(stalks))
else:
await self.bot.say('This channel is not stalking any twitter accounts.')
@commands.command(hidden=True, pass_context=True)
@checks.is_owner()
async def blacklist(self, ctx):
channel = ctx.message.channel.id
if not self.blacklist_channel(channel):
await self.bot.say("Now blacklisting this channel.")
else:
await self.bot.say("No longer blacklisting this channel.")
async def kill_stream(self):
print('killing stream')
twitconn.kill_stream()
async def restart_stream(self):
print('restarting stream')
twitconn.restart_stream(self.get_stalks())
async def reboot_stream(self):
print('rebooting stream')
twitconn.kill_stream()
twitconn.restart_stream(self.get_stalks())
def add_channel(self, user, channel_id):
twitter_id = user.id_str
try:
channels = self.destinations['destinations'][twitter_id]
if channel_id in channels:
channels.remove(channel_id)
self.update_json()
return False
channels.append(channel_id)
except KeyError:
channels = [channel_id, ]
self.destinations['destinations'][twitter_id] = channels
self.update_json()
return True
def blacklist_channel(self, channel_id):
try:
channels = self.destinations['blacklist']
if channel_id in channels:
channels.remove(channel_id)
self.update_json()
return True
channels.append(channel_id)
except KeyError:
channels = [channel_id, ]
self.destinations['blacklist'] = channels
self.update_json()
return False
def update_json(self):
path = os.path.join(os.getcwd(), 'files', 'tweets.json')
with open(path, 'w') as f:
f.seek(0) # <--- should reset file position to the beginning.
json.dump(self.destinations, f, indent=4)
def get_stalks(self):
return list(self.destinations['destinations'].keys())
def setup(bot):
bot.add_cog(Streams(bot)) |
the-stack_106_31730 | #!/usr/bin/env python3
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : Allen Woo
import sys
from signal import signal, SIGCHLD, SIG_IGN
from pymongo.errors import OperationFailure
from apps.configs.config import CONFIG
from apps.configs.db_config import DB_CONFIG
from apps.core.db.config_mdb import DatabaseConfig
from apps.core.utils.sys_tool import update_pylib, add_user as add_user_process
__author__ = 'all.woo'
"""
manage
"""
# 网站还未启动的时候, 临时连接数据库, 更新collection
print("\033[1;36m[OSROOM] Staring...\033[0m")
from apps.core.utils.update_sys_data import update_mdb_collections, init_datas, compatible_processing, \
update_mdbcolls_json_file
from apps.core.db.mongodb import PyMongo
print(" * Check or update the database collection")
database = DatabaseConfig()
mdbs = {}
for k, mdb_acc in DB_CONFIG["mongodb"].items():
mdbs[k] = PyMongo()
db_init = 2
while db_init:
try:
for name, mdb in mdbs.items():
if name not in ["sys", "user", "web"]:
print(" *[Error] 由v1.x.x更新到v2.x.x需要请更新你的数据库配置文件apps/configs/db_config.py\n"
" 请参考同目录下的db_config_sample.py")
sys.exit()
mdb.init_app(config_prefix=name.upper(),
db_config=database.__dict__["{}_URI".format(name.upper())])
except OperationFailure as e:
print("\n[Mongodb] *{}".format(e))
print("Mongodb validation failure, the user name,"
" password mistake or database configuration errors.\n"
"Tip: to open database authentication configuration")
sys.exit(-1)
if db_init == 2:
update_mdb_collections(mdbs=mdbs)
db_init -= 1
# 更新配置文件
from apps.core.flask.update_config_file import update_config_file
print(" * Update and sync config.py")
r = update_config_file(mdbs=mdbs)
if not r:
print("[Error] Update profile error, check log sys_start.log")
sys.exit(-1)
del CONFIG["py_venv"]
compatible_processing(mdbs=mdbs)
init_datas(mdbs=mdbs)
for mdb in mdbs.values():
mdb.close()
# 启动网站
from flask_script import Manager
from apps.app import app
from apps.core.flask.module_import import module_import
from apps.init_core_module import init_core_module
from apps.configs.sys_config import MODULES
from apps.sys_startup_info import start_info
start_info()
init_core_module(app)
module_import(MODULES)
manager = Manager(app)
if "--debug" not in sys.argv and "-D" not in sys.argv:
print(" * Signal:(SIGCHLD, SIG_IGN).Prevent child processes from becoming [Defunct processes]."
"(Do not need to comment out)")
signal(SIGCHLD, SIG_IGN)
@manager.command
def add_user():
update_mdb_collections(mdbs=mdbs)
init_datas(mdbs=mdbs)
add_user_process(mdbs=mdbs)
@manager.command
def dbcoll_to_file():
"""
更新mdb collections到json文件中
:return:
"""
update_mdbcolls_json_file(mdbs=mdbs)
if __name__ == '__main__':
"""
使用Flask 自带 server启动网站
"""
print(" * Use the Web service that comes with Flask")
if "--debug" not in sys.argv and "-D" not in sys.argv:
# 更新python第三方类库
print(" * Check or update Python third-party libraries")
update_pylib()
else:
print(" * Using --debug, the system will not check Python dependencies")
manager.run()
|
the-stack_106_31731 | """A wrapper for engaging with the THOR environment."""
import copy
import json
import os
import random
from .offline_controller_with_small_rotation import OfflineControllerWithSmallRotation
class Environment:
""" Abstraction of the ai2thor enviroment. """
def __init__(
self,
use_offline_controller,
grid_size=0.25,
fov=100.0,
offline_data_dir='/tmp/data_dhm/AI2thor_Dataset/Scene_Data',
detection_feature_file_name='det_feature_60_categories.hdf5',
images_file_name='resnet18_featuremap.hdf5',
visible_object_map_file_name='visible_object_map.json',
local_executable_path=None,
optimal_action_file_name=None,
):
self.offline_data_dir = offline_data_dir
self.use_offline_controller = use_offline_controller
self.images_file_name = images_file_name
self.controller = OfflineControllerWithSmallRotation(
grid_size=grid_size,
fov=fov,
offline_data_dir=offline_data_dir,
detection_feature_file_name=detection_feature_file_name,
images_file_name=images_file_name,
metadata_file_name=visible_object_map_file_name,
visualize=False,
local_executable_path=local_executable_path,
optimal_action_file_name=optimal_action_file_name,
)
self.grid_size = grid_size
self._reachable_points = None
self.start_state = None
self.last_action = None
self.fov = fov
@property
def scene_name(self):
return self.controller.last_event.metadata["sceneName"]
@property
def current_frame(self):
return self.controller.last_event.frame
@property
def current_detection_feature(self):
return self.controller.get_detection_feature()
@property
def current_cls_masks(self):
return self.controller.get_cls_masks()
@property
def current_depth(self):
return self.controller.get_depth()
@property
def last_event(self):
return self.controller.last_event
@property
def last_action_success(self):
if self.use_offline_controller:
return self.controller.last_action_success
return self.controller.last_event.metadata["lastActionSuccess"]
def object_is_visible(self, objId):
if not self.use_offline_controller:
objects = self.last_event.metadata["objects"]
visible_objects = [o["objectId"] for o in objects if o["visible"]]
return objId in visible_objects
return self.controller.object_is_visible(objId)
def start(self, scene_name):
""" Begin the scene. """
self.controller.start()
self.reset(scene_name=scene_name)
def reset(self, scene_name):
""" Reset the scene. """
self.controller.reset(scene_name)
self.controller.step(
dict(action="Initialize", gridSize=self.grid_size, fieldOfView=self.fov)
)
def all_objects(self):
if not self.use_offline_controller:
objects = self.controller.last_event.metadata["objects"]
return [o["objectId"] for o in objects]
return self.controller.all_objects()
def step(self, action_dict):
return self.controller.step(action_dict)
def teleport_agent_to(self, x, y, z, rotation, horizon):
""" Teleport the agent to (x,y,z) with given rotation and horizon. """
self.controller.step(dict(action="Teleport", x=x, y=y, z=z))
self.controller.step(dict(action="Rotate", rotation=rotation))
self.controller.step(dict(action="Look", horizon=horizon))
def random_reachable_state(self, seed=None):
""" Get a random reachable state. """
if seed is not None:
random.seed(seed)
xyz = random.choice(self.reachable_points)
rotation = random.choice([0, 90, 180, 270])
horizon = random.choice([0, 30, 330])
state = copy.copy(xyz)
state["rotation"] = rotation
state["horizon"] = horizon
return state
def randomize_agent_location(self, seed=None):
""" Put agent in a random reachable state. """
if not self.use_offline_controller:
state = self.random_reachable_state(seed=seed)
self.teleport_agent_to(**state)
self.start_state = copy.deepcopy(state)
return
self.controller.randomize_state()
self.start_state = copy.deepcopy(self.controller.state)
def back_to_start(self):
if self.start_state is None:
self.reset(self.scene_name)
return
if not self.use_offline_controller:
self.teleport_agent_to(**self.start_state)
else:
self.controller.back_to_start(self.start_state)
@property
def reachable_points(self):
""" Use the JSON file to get the reachable points. """
if self._reachable_points is not None:
return self._reachable_points
points_path = os.path.join(self.offline_data_dir, self.scene_name, "grid.json")
if not os.path.exists(points_path):
raise IOError("Path {0} does not exist".format(points_path))
self._reachable_points = json.load(open(points_path))
return self._reachable_points
|
the-stack_106_31733 | from tensorflow.contrib.training import HParams
# Default hyperparameters
hparams = HParams(
# Comma-separated list of cleaners to run on text prior to training and eval. For non-English
# text, you may want to use "basic_cleaners" or "transliteration_cleaners".
cleaners="english_cleaners",
# If you only have 1 GPU or want to use only one GPU, please set num_gpus=0 and specify the
# GPU idx on run. example:
# expample 1 GPU of index 2 (train on "/gpu2" only): CUDA_VISIBLE_DEVICES=2 python train.py
# --model="Tacotron" --hparams="tacotron_gpu_start_idx=2"
# If you want to train on multiple GPUs, simply specify the number of GPUs available,
# and the idx of the first GPU to use. example:
# example 4 GPUs starting from index 0 (train on "/gpu0"->"/gpu3"): python train.py
# --model="Tacotron" --hparams="tacotron_num_gpus=4, tacotron_gpu_start_idx=0"
# The hparams arguments can be directly modified on this hparams.py file instead of being
# specified on run if preferred!
# If one wants to train both Tacotron and WaveNet in parallel (provided WaveNet will be
# trained on True mel spectrograms), one needs to specify different GPU idxes.
# example Tacotron+WaveNet on a machine with 4 or plus GPUs. Two GPUs for each model:
# CUDA_VISIBLE_DEVICES=0,1 python train.py --model="Tacotron"
# --hparams="tacotron_gpu_start_idx=0, tacotron_num_gpus=2"
# Cuda_VISIBLE_DEVICES=2,3 python train.py --model="WaveNet"
# --hparams="wavenet_gpu_start_idx=2; wavenet_num_gpus=2"
# IMPORTANT NOTE: If using N GPUs, please multiply the tacotron_batch_size by N below in the
# hparams! (tacotron_batch_size = 32 * N)
# Never use lower batch size than 32 on a single GPU!
# Same applies for Wavenet: wavenet_batch_size = 8 * N (wavenet_batch_size can be smaller than
# 8 if GPU is having OOM, minimum 2)
# Please also apply the synthesis batch size modification likewise. (if N GPUs are used for
# synthesis, minimal batch size must be N, minimum of 1 sample per GPU)
# We did not add an automatic multi-GPU batch size computation to avoid confusion in the
# user"s mind and to provide more control to the user for
# resources related decisions.
# Acknowledgement:
# Many thanks to @MlWoo for his awesome work on multi-GPU Tacotron which showed to work a
# little faster than the original
# pipeline for a single GPU as well. Great work!
# Hardware setup: Default supposes user has only one GPU: "/gpu:0" (Tacotron only for now!
# WaveNet does not support multi GPU yet, WIP)
# Synthesis also uses the following hardware parameters for multi-GPU parallel synthesis.
tacotron_gpu_start_idx=0, # idx of the first GPU to be used for Tacotron training.
tacotron_num_gpus=1, # Determines the number of gpus in use for Tacotron training.
split_on_cpu=True,
# Determines whether to split data on CPU or on first GPU. This is automatically True when
# more than 1 GPU is used.
###########################################################################################################################################
# Audio
# Audio parameters are the most important parameters to tune when using this work on your
# personal data. Below are the beginner steps to adapt
# this work to your personal data:
# 1- Determine my data sample rate: First you need to determine your audio sample_rate (how
# many samples are in a second of audio). This can be done using sox: "sox --i <filename>"
# (For this small tuto, I will consider 24kHz (24000 Hz), and defaults are 22050Hz,
# so there are plenty of examples to refer to)
# 2- set sample_rate parameter to your data correct sample rate
# 3- Fix win_size and and hop_size accordingly: (Supposing you will follow our advice: 50ms
# window_size, and 12.5ms frame_shift(hop_size))
# a- win_size = 0.05 * sample_rate. In the tuto example, 0.05 * 24000 = 1200
# b- hop_size = 0.25 * win_size. Also equal to 0.0125 * sample_rate. In the tuto
# example, 0.25 * 1200 = 0.0125 * 24000 = 300 (Can set frame_shift_ms=12.5 instead)
# 4- Fix n_fft, num_freq and upsample_scales parameters accordingly.
# a- n_fft can be either equal to win_size or the first power of 2 that comes after
# win_size. I usually recommend using the latter
# to be more consistent with signal processing friends. No big difference to be seen
# however. For the tuto example: n_fft = 2048 = 2**11
# b- num_freq = (n_fft / 2) + 1. For the tuto example: num_freq = 2048 / 2 + 1 = 1024 +
# 1 = 1025.
# c- For WaveNet, upsample_scales products must be equal to hop_size. For the tuto
# example: upsample_scales=[15, 20] where 15 * 20 = 300
# it is also possible to use upsample_scales=[3, 4, 5, 5] instead. One must only
# keep in mind that upsample_kernel_size[0] = 2*upsample_scales[0]
# so the training segments should be long enough (2.8~3x upsample_scales[0] *
# hop_size or longer) so that the first kernel size can see the middle
# of the samples efficiently. The length of WaveNet training segments is under the
# parameter "max_time_steps".
# 5- Finally comes the silence trimming. This very much data dependent, so I suggest trying
# preprocessing (or part of it, ctrl-C to stop), then use the
# .ipynb provided in the repo to listen to some inverted mel/linear spectrograms. That
# will first give you some idea about your above parameters, and
# it will also give you an idea about trimming. If silences persist, try reducing
# trim_top_db slowly. If samples are trimmed mid words, try increasing it.
# 6- If audio quality is too metallic or fragmented (or if linear spectrogram plots are
# showing black silent regions on top), then restart from step 2.
num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality
# network
rescale=True, # Whether to rescale audio prior to preprocessing
rescaling_max=0.9, # Rescaling value
# Whether to clip silence in Audio (at beginning and end of audio only, not the middle)
# train samples of lengths between 3sec and 14sec are more than enough to make a model capable
# of good parallelization.
clip_mels_length=True,
# For cases of OOM (Not really recommended, only use if facing unsolvable OOM errors,
# also consider clipping your samples to smaller chunks)
max_mel_frames=900,
# Only relevant when clip_mels_length = True, please only use after trying output_per_steps=3
# and still getting OOM errors.
# Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
# Does not work if n_ffit is not multiple of hop_size!!
use_lws=False,
# Only used to set as True if using WaveNet, no difference in performance is observed in
# either cases.
silence_threshold=2, # silence threshold used for sound trimming for wavenet preprocessing
# Mel spectrogram
n_fft=800, # Extra window size is filled with 0 paddings to match this parameter
hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i <filename>)
frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5)
# M-AILABS (and other datasets) trim params (these parameters are usually correct for any
# data, but definitely must be tuned for specific speakers)
trim_fft_size=512,
trim_hop_size=128,
trim_top_db=23,
# Mel and Linear spectrograms normalization/scaling and clipping
signal_normalization=True,
# Whether to normalize mel spectrograms to some predefined range (following below parameters)
allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True
symmetric_mels=True,
# Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2,
# faster and cleaner convergence)
max_abs_value=4.,
# max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not
# be too big to avoid gradient explosion,
# not too small for fast convergence)
normalize_for_wavenet=True,
# whether to rescale to [0, 1] for wavenet. (better audio quality)
clip_for_wavenet=True,
# whether to clip [-max, max] before training/synthesizing with wavenet (better audio quality)
# Contribution by @begeekmyfriend
# Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude
# levels. Also allows for better G&L phase reconstruction)
preemphasize=True, # whether to apply filter
preemphasis=0.97, # filter coefficient.
# Limits
min_level_db=-100,
ref_level_db=20,
fmin=55,
# Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To
# test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
fmax=7600, # To be increased/reduced depending on data.
# Griffin Lim
power=1.5,
# Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.
griffin_lim_iters=60,
# Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.
###########################################################################################################################################
# Tacotron
outputs_per_step=2, # Was 1
# number of frames to generate at each decoding step (increase to speed up computation and
# allows for higher batch size, decreases G&L audio quality)
stop_at_any=True,
# Determines whether the decoder should stop when predicting <stop> to any frame or to all of
# them (True works pretty well)
embedding_dim=512, # dimension of embedding space (these are NOT the speaker embeddings)
# Encoder parameters
enc_conv_num_layers=3, # number of encoder convolutional layers
enc_conv_kernel_size=(5,), # size of encoder convolution filters for each layer
enc_conv_channels=512, # number of encoder convolutions filters for each layer
encoder_lstm_units=256, # number of lstm units for each direction (forward and backward)
# Attention mechanism
smoothing=False, # Whether to smooth the attention normalization function
attention_dim=128, # dimension of attention space
attention_filters=32, # number of attention convolution filters
attention_kernel=(31,), # kernel size of attention convolution
cumulative_weights=True,
# Whether to cumulate (sum) all previous attention weights or simply feed previous weights (
# Recommended: True)
# Decoder
prenet_layers=[256, 256], # number of layers and number of units of prenet
decoder_layers=2, # number of decoder lstm layers
decoder_lstm_units=1024, # number of decoder lstm units on each layer
max_iters=2000,
# Max decoder steps during inference (Just for safety from infinite loop cases)
# Residual postnet
postnet_num_layers=5, # number of postnet convolutional layers
postnet_kernel_size=(5,), # size of postnet convolution filters for each layer
postnet_channels=512, # number of postnet convolution filters for each layer
# CBHG mel->linear postnet
cbhg_kernels=8,
# All kernel sizes from 1 to cbhg_kernels will be used in the convolution bank of CBHG to act
# as "K-grams"
cbhg_conv_channels=128, # Channels of the convolution bank
cbhg_pool_size=2, # pooling size of the CBHG
cbhg_projection=256,
# projection channels of the CBHG (1st projection, 2nd is automatically set to num_mels)
cbhg_projection_kernel_size=3, # kernel_size of the CBHG projections
cbhg_highwaynet_layers=4, # Number of HighwayNet layers
cbhg_highway_units=128, # Number of units used in HighwayNet fully connected layers
cbhg_rnn_units=128,
# Number of GRU units used in bidirectional RNN of CBHG block. CBHG output is 2x rnn_units in
# shape
# Loss params
mask_encoder=True,
# whether to mask encoder padding while computing attention. Set to True for better prosody
# but slower convergence.
mask_decoder=False,
# Whether to use loss mask for padded sequences (if False, <stop_token> loss function will not
# be weighted, else recommended pos_weight = 20)
cross_entropy_pos_weight=20,
# Use class weights to reduce the stop token classes imbalance (by adding more penalty on
# False Negatives (FN)) (1 = disabled)
predict_linear=False,
# Whether to add a post-processing network to the Tacotron to predict linear spectrograms (
# True mode Not tested!!)
###########################################################################################################################################
# Tacotron Training
# Reproduction seeds
tacotron_random_seed=5339,
# Determines initial graph and operations (i.e: model) random state for reproducibility
tacotron_data_random_state=1234, # random state for train test split repeatability
# performance parameters
tacotron_swap_with_cpu=False,
# Whether to use cpu as support to gpu for decoder computation (Not recommended: may cause
# major slowdowns! Only use when critical!)
# train/test split ratios, mini-batches sizes
tacotron_batch_size=36, # number of training samples on each training steps (was 32)
# Tacotron Batch synthesis supports ~16x the training batch size (no gradients during
# testing).
# Training Tacotron with unmasked paddings makes it aware of them, which makes synthesis times
# different from training. We thus recommend masking the encoder.
tacotron_synthesis_batch_size=128,
# DO NOT MAKE THIS BIGGER THAN 1 IF YOU DIDN"T TRAIN TACOTRON WITH "mask_encoder=True"!!
tacotron_test_size=0.05,
# % of data to keep as test data, if None, tacotron_test_batches must be not None. (5% is
# enough to have a good idea about overfit)
tacotron_test_batches=None, # number of test batches.
# Learning rate schedule
tacotron_decay_learning_rate=True,
# boolean, determines if the learning rate will follow an exponential decay
tacotron_start_decay=50000, # Step at which learning decay starts
tacotron_decay_steps=50000, # Determines the learning rate decay slope (UNDER TEST)
tacotron_decay_rate=0.5, # learning rate decay rate (UNDER TEST)
tacotron_initial_learning_rate=1e-3, # starting learning rate
tacotron_final_learning_rate=1e-5, # minimal learning rate
# Optimization parameters
tacotron_adam_beta1=0.9, # AdamOptimizer beta1 parameter
tacotron_adam_beta2=0.999, # AdamOptimizer beta2 parameter
tacotron_adam_epsilon=1e-6, # AdamOptimizer Epsilon parameter
# Regularization parameters
tacotron_reg_weight=1e-7, # regularization weight (for L2 regularization)
tacotron_scale_regularization=False,
# Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is
# high and biasing the model)
tacotron_zoneout_rate=0.1, # zoneout rate for all LSTM cells in the network
tacotron_dropout_rate=0.5, # dropout rate for all convolutional layers + prenet
tacotron_clip_gradients=True, # whether to clip gradients
# Evaluation parameters
natural_eval=False,
# Whether to use 100% natural eval (to evaluate Curriculum Learning performance) or with same
# teacher-forcing ratio as in training (just for overfit)
# Decoder RNN learning can take be done in one of two ways:
# Teacher Forcing: vanilla teacher forcing (usually with ratio = 1). mode="constant"
# Curriculum Learning Scheme: From Teacher-Forcing to sampling from previous outputs is
# function of global step. (teacher forcing ratio decay) mode="scheduled"
# The second approach is inspired by:
# Bengio et al. 2015: Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks.
# Can be found under: https://arxiv.org/pdf/1506.03099.pdf
tacotron_teacher_forcing_mode="constant",
# Can be ("constant" or "scheduled"). "scheduled" mode applies a cosine teacher forcing ratio
# decay. (Preference: scheduled)
tacotron_teacher_forcing_ratio=1.,
# Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder
# inputs, Only relevant if mode="constant"
tacotron_teacher_forcing_init_ratio=1.,
# initial teacher forcing ratio. Relevant if mode="scheduled"
tacotron_teacher_forcing_final_ratio=0.,
# final teacher forcing ratio. Relevant if mode="scheduled"
tacotron_teacher_forcing_start_decay=10000,
# starting point of teacher forcing ratio decay. Relevant if mode="scheduled"
tacotron_teacher_forcing_decay_steps=280000,
# Determines the teacher forcing ratio decay slope. Relevant if mode="scheduled"
tacotron_teacher_forcing_decay_alpha=0.,
# teacher forcing ratio decay rate. Relevant if mode="scheduled"
###########################################################################################################################################
# Tacotron-2 integration parameters
train_with_GTA=False,
# Whether to use GTA mels to train WaveNet instead of ground truth mels.
###########################################################################################################################################
# Eval sentences (if no eval text file was specified during synthesis, these sentences are
# used for eval)
sentences=[
# From July 8, 2017 New York Times:
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There\"s a way to measure the acute emotional intelligence that has never gone out of "
"style.",
"President Trump met with other leaders at the Group of 20 conference.",
"The Senate\"s bill to repeal and replace the Affordable Care Act is now imperiled.",
# From Google"s Tacotron example page:
"Generative adversarial network or variational auto-encoder.",
"Basilar membrane and otolaryngology are not auto-correlations.",
"He has read the whole thing.",
"He reads books.",
"He thought it was time to present the present.",
"Thisss isrealy awhsome.",
"Punctuation sensitivity, is working.",
"Punctuation sensitivity is working.",
"Peter Piper picked a peck of pickled peppers. How many pickled peppers did Peter Piper pick?",
"She sells sea-shells on the sea-shore. The shells she sells are sea-shells I'm sure.",
"Tajima Airport serves Toyooka.",
# From The web (random long utterance)
"Sequence to sequence models have enjoyed great success in a variety of tasks such as machine translation, speech recognition, and text summarization.\
This project covers a sequence to sequence model trained to predict a speech representation from an input sequence of characters. We show that\
the adopted architecture is able to perform this task with wild success.",
"Thank you so much for your support!",
],
### SV2TTS ###
speaker_embedding_size=256,
silence_min_duration_split=0.4, # Duration in seconds of a silence for an utterance to be split
utterance_min_duration=1.6, # Duration in seconds below which utterances are discarded
)
def hparams_debug_string():
values = hparams.values()
hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"]
return "Hyperparameters:\n" + "\n".join(hp)
|
the-stack_106_31735 | import os
import sys
try:
from setuptools import setup
except ImportError:
sys.exit('ERROR: setuptools is required.\n')
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
# try:
# from pip.req import parse_requirements
# except ImportError:
# sys.exit('ERROR: pip is required.\n')
if os.environ.get('READTHEDOCS', None):
# Set empty install_requires to get install to work on readthedocs
install_requires = []
else:
if sys.version_info[0] > 2:
req_file = 'requirements.txt'
else:
req_file = 'requirements2.txt'
try:
reqs = parse_requirements(req_file, session=False)
except TypeError:
reqs = parse_requirements(req_file)
try:
install_requires = [str(r.req) for r in reqs]
except AttributeError:
install_requires = [str(r.requirement) for r in reqs]
# read version
exec(open('abutils/version.py').read())
config = {
'description': 'Utilities for analysis of antibody NGS data',
'author': 'Bryan Briney',
'url': 'https://www.github.com/briney/abutils',
'author_email': '[email protected]',
'version': __version__,
'install_requires': install_requires,
'packages': ['abutils'],
'scripts': ['bin/batch_cellranger'],
'name': 'abutils',
'include_package_data': True,
'classifiers': ['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Bio-Informatics']
}
setup(**config)
|
the-stack_106_31736 | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.core import serializers
from .models import Event
from .serializers import EventSerializer
from django.db.models import Q
import datetime
def get_events(request):
startDate = request.GET.get('start')
endDate = request.GET.get('end')
start=datetime.date.today()
end=datetime.date.today()
chamber = request.GET.get('chamber')
committee = request.GET.get('committee')
type = request.GET.get('type')
if startDate is not None:
start = datetime.datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S%z')
if endDate is not None:
end = datetime.datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S%z')
new_end = end + datetime.timedelta(days=1)
q = Q()
if start:
q &= Q(start__gte=start)
if end:
q &= Q(end__lte=new_end)
if chamber and chamber != "all":
q &= Q(chamber=chamber)
if committee and committee != "all":
q &= Q(committee=committee)
if type and type != "all":
q &= Q(type=type)
events = Event.objects.filter(q).order_by('start', 'startTime')
serializer = EventSerializer(events, many=True)
return JsonResponse(serializer.data, safe = False)
def get_committees(request):
committees = Event.objects.order_by('committee').values_list('committee', flat=True).distinct()
return JsonResponse(list(committees), safe = False)
|
the-stack_106_31737 | import wx
import wx.grid
import wx.lib.gizmos as gizmos
import CustomGridRenderer as cgr
import wx.propgrid as wxpg
import wx.lib.scrolledpanel as scrolled
import sqlite3
import time
import os
import typing
from typing import List
from typing import Union
from typing import Tuple
from typing import Dict
from typing import NewType
import Utilities as util
import KinsectsTab as k
wxTreeListItem = NewType('wxTreeListItem', None)
kKinsect = NewType('Kinsect', None)
class KinsectsTab:
def __init__(self, root, mainNotebook):
self.root = root
self.mainNotebook = mainNotebook
self.init = True
self.currentlySelectedKinsectID = 1
self.testIcon = wx.Bitmap("images/unknown.png", wx.BITMAP_TYPE_ANY)
self.kinsectDetail = {
1: ["images/unknown.png", "Rarity"],
2: ["images/weapon-detail-24/attacktype.png", "Attack Type"],
3: ["images/weapon-detail-24/dusteffect.png", "Dust Effect"],
4: ["images/weapon-detail-24/power.png", "Power"],
5: ["images/weapon-detail-24/speed.png", "Speed"],
6: ["images/weapon-detail-24/heal.png", "Heal"],
}
self.rarityColors = {
1: "#C2BFBF",
2: "#F3F3F3",
3: "#aac44b",
4: "#57ac4c",
5: "#75b8c2",
6: "#6764d7",
7: "#895edc",
8: "#c47c5e",
9: "#cb7793",
10: "#4fd1f5",
11: "#f5d569",
12: "#d5edfa",
}
self.initKinsectTab()
def initKinsectTab(self):
self.kinsectPanel = wx.Panel(self.mainNotebook)
self.mainNotebook.AddPage(self.kinsectPanel, "Kinsects")
self.kinsectSizer = wx.BoxSizer(wx.HORIZONTAL)
self.kinsectTreeSizer = wx.BoxSizer(wx.VERTICAL)
self.kinsectDetailedSizer = wx.BoxSizer(wx.VERTICAL)
kinsectImage = wx.Bitmap("images/kinsects/Culldrone I.jpg", wx.BITMAP_TYPE_ANY)
self.kinsectImageLabel = wx.StaticBitmap(self.kinsectPanel, bitmap=kinsectImage, size=(230, 230))
self.kinsectImageLabel.SetBackgroundColour((0, 0, 0))
self.kinsectDetailsNotebook = wx.Notebook(self.kinsectPanel)
self.kinsectDetailPanel = wx.Panel(self.kinsectDetailsNotebook)
self.kinsectDetailSizer = wx.BoxSizer(wx.VERTICAL)
self.kinsectDetailsNotebook.AddPage(self.kinsectDetailPanel, "Detail")
self.kinsectDetailPanel.SetSizer(self.kinsectDetailSizer)
self.kinsectDetailedImagesSizer = wx.BoxSizer(wx.HORIZONTAL)
self.kinsectDetailedImagesSizer.Add(self.kinsectImageLabel, 1, wx.ALIGN_CENTER)
self.kinsectDetailedSizer.Add(self.kinsectDetailedImagesSizer, 1, wx.EXPAND)
self.kinsectDetailedSizer.Add(self.kinsectDetailsNotebook, 3, wx.EXPAND)
self.kinsectSizer.Add(self.kinsectTreeSizer, 0, wx.EXPAND)
self.kinsectSizer.Add(self.kinsectDetailedSizer, 1, wx.EXPAND)
self.kinsectPanel.SetSizer(self.kinsectSizer)
self.initSearch()
self.initKinsectTree()
self.initKinsectDetailTab()
def initSearch(self):
self.search = wx.TextCtrl(self.kinsectPanel)
self.search.SetHint(" search by name")
self.search.Bind(wx.EVT_TEXT, self.onSearchTextEnter)
self.kinsectTreeSizer.Add(697, 0, 0)
self.kinsectTreeSizer.Add(self.search, 0, wx.ALIGN_CENTER_VERTICAL)
def onSearchTextEnter(self, event):
self.loadKinsectTree()
def initKinsectTree(self):
self.kinsectTree = cgr.HeaderBitmapGrid(self.kinsectPanel)
self.kinsectTree.EnableEditing(False)
self.kinsectTree.EnableDragRowSize(False)
self.kinsectTree.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.onKinsectSelection)
self.kinsectTreeSizer.Add(self.kinsectTree, 1, wx.EXPAND)
kinsectTreeColumns = {
"Name": [472, None],
"Attack Type": [45, wx.Bitmap("images/weapon-detail-24/attacktype.png")],
"Dust Effect": [57, wx.Bitmap("images/weapon-detail-24/dusteffect.png")],
"Power": [35, wx.Bitmap("images/weapon-detail-24/power.png")],
"Speed": [35, wx.Bitmap("images/weapon-detail-24/speed.png")],
"Heal": [35, wx.Bitmap("images/weapon-detail-24/heal.png")],
"id": [0, None],
}
self.kinsectTree.CreateGrid(1, len(kinsectTreeColumns))
self.kinsectTree.SetDefaultRowSize(27, resizeExistingRows=True)
self.kinsectTree.HideRowLabels()
self.kinsectTree.SetDefaultCellAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
for col, (k, v) in enumerate(kinsectTreeColumns.items()):
if v[1] == None:
self.kinsectTree.SetColLabelValue(col, k)
else:
self.kinsectTree.SetColLabelRenderer(col, cgr.HeaderBitmapColLabelRenderer(v[1], ""))
self.kinsectTree.SetColSize(col, v[0])
self.loadKinsectTree()
def loadKinsectTree(self):
self.init = True
try:
self.kinsectTree.DeleteRows(0, self.kinsectTree.GetNumberRows())
except:
pass
searchText = self.search.GetValue().replace("'", "''")
if len(searchText) == 0 or searchText == " ":
sql = """
SELECT k.*, kt.name
FROM kinsect k
JOIN kinsect_text kt USING (id)
WHERE kt.lang_id = :langId
ORDER BY k.id ASC
"""
else:
sql = f"""
SELECT k.*, kt.name
FROM kinsect k
JOIN kinsect_text kt USING (id)
WHERE kt.lang_id = :langId
AND kt.name LIKE '%{searchText}%'
ORDER BY k.id ASC
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, ("en", ))
data = data.fetchall()
self.offset = {
0: 0,
1: 223,
2: 198,
3: 176,
4: 151,
5: 127,
6: 103,
7: 78,
8: 55,
9: 30,
10: 0,
11: -25,
12: -50,
13: -75,
}
kinsectNodes = {}
kinsects = []
for row in data:
kinsects.append(k.Kinsect(row))
for kin in kinsects:
if len(searchText) != 0:
self.populateKinsectTree(1, kin, None)
else:
if kin.previousID == None:
self.populateKinsectTree(1, kin, kinsectNodes)
else:
self.populateKinsectTree(kinsectNodes[kin.previousID], kin, kinsectNodes)
self.init = False
def populateKinsectTree(self, indent:int, kin: Tuple[str], kinsectNodes: Dict[int, int]) -> None:
self.kinsectTree.AppendRows()
row = self.kinsectTree.GetNumberRows() - 1
padding = " " * indent
img = wx.Bitmap(f"images/kinsects/{kin.attackType.lower()}-rarity-24/{kin.rarity}.png")
self.kinsectTree.SetCellRenderer(row, 0, cgr.ImageTextCellRenderer(
img, f"{padding}{kin.name}", hAlign=wx.ALIGN_LEFT, imageOffset=self.offset[indent]))
self.kinsectTree.SetCellValue(row, 0, str(kin.name))
self.kinsectTree.SetCellValue(row, 1, str(kin.attackType.capitalize()))
self.kinsectTree.SetCellValue(row, 2, str(kin.dustEffect.capitalize()))
self.kinsectTree.SetCellValue(row, 3, f"Lv {str(kin.power).capitalize()}")
self.kinsectTree.SetCellValue(row, 4, f"Lv {str(kin.speed).capitalize()}")
self.kinsectTree.SetCellValue(row, 5, f"Lv {str(kin.heal).capitalize()}")
self.kinsectTree.SetCellValue(row, 6, str(kin.id))
if len(self.search.GetValue()) == 0:
kinsectNodes[kin.id] = indent + 1
def initKinsectDetailTab(self):
self.kinsectDetailList = cgr.HeaderBitmapGrid(self.kinsectDetailPanel)
self.kinsectDetailList.Bind(wx.EVT_SIZE, self.onSize)
self.kinsectDetailList.EnableEditing(False)
self.kinsectDetailList.EnableDragRowSize(False)
self.kinsectDetailSizer.Add(self.kinsectDetailList, 1, wx.EXPAND)
self.kinsectDetailList.CreateGrid(len(self.kinsectDetail) + 1, 2)
self.kinsectDetailList.SetDefaultRowSize(24, resizeExistingRows=True)
self.kinsectDetailList.SetColSize(0, 302)
self.kinsectDetailList.SetColSize(1, 155 - 20)
self.kinsectDetailList.SetDefaultCellAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
self.kinsectDetailList.SetColLabelSize(2)
self.kinsectDetailList.SetRowLabelSize(1)
self.kinsectMaterialList = wx.ListCtrl(self.kinsectDetailPanel, style=wx.LC_REPORT
| wx.LC_VRULES
| wx.LC_HRULES
)
self.il = wx.ImageList(24, 24)
self.test = self.il.Add(self.testIcon)
self.kinsectMaterialList.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.kinsectDetailSizer.Add(self.kinsectMaterialList, 1, wx.EXPAND)
self.loadKinsectDetails()
def loadKinsectDetails(self):
self.root.Freeze()
self.kinsectDetailList.DeleteRows(0, self.kinsectDetailList.GetNumberRows())
self.kinsectDetailList.AppendRows(len(self.kinsectDetail) + 1)
self.kinsectMaterialList.ClearAll()
self.il.RemoveAll()
sql = """
SELECT k.*, kt.name
FROM kinsect k
JOIN kinsect_text kt USING (id)
WHERE kt.lang_id = :langId
AND k.id = :kinId
ORDER BY k.id ASC
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, ("en", self.currentlySelectedKinsectID))
data = data.fetchone()
kin = k.Kinsect(data)
kinsectDetail = {
0: str(kin.name),
1: str(kin.rarity),
2: str(kin.attackType),
3: str(kin.dustEffect),
4: f"Lv {kin.power}",
5: f"Lv {kin.speed}",
6: f"Lv {kin.heal}",
}
imageOffset = 55
self.kinsectImageLabel.SetBitmap(wx.Bitmap(f"images/kinsects/{kin.name}.jpg"))
rarityIcon = wx.Bitmap(f"images/kinsects/{kin.attackType.lower()}-rarity-24/{kin.rarity}.png")
self.kinsectDetailList.SetCellValue(0, 0, "Name")
self.kinsectDetailList.SetCellValue(0, 1, kin.name)
for num in range(1, len(kinsectDetail)):
if num == 1:
self.kinsectDetailList.SetCellRenderer(num, 0,
cgr.ImageTextCellRenderer(
rarityIcon,
self.kinsectDetail[num][1],
imageOffset=imageOffset,
))
self.kinsectDetailList.SetCellBackgroundColour(num, 1, util.hexToRGB(self.rarityColors[kin.rarity]))
else:
self.kinsectDetailList.SetCellRenderer(num, 0,
cgr.ImageTextCellRenderer(
wx.Bitmap(self.kinsectDetail[num][0]),
self.kinsectDetail[num][1],
imageOffset=imageOffset
))
if kinsectDetail[num] == None:
self.kinsectDetailList.SetCellValue(num, 1, "-")
else:
self.kinsectDetailList.SetCellValue(num, 1, str(kinsectDetail[num]))
self.loadKinsectMaterials()
def loadKinsectMaterials(self):
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_LEFT
info.Text = "Req. Materials"
self.kinsectMaterialList.InsertColumn(0, info)
self.kinsectMaterialList.SetColumnWidth(0, 480)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_CENTER
info.Text = ""
self.kinsectMaterialList.InsertColumn(1, info)
self.kinsectMaterialList.SetColumnWidth(1, 200)
sql = """
SELECT k.id, kr.item_id, kr.quantity, kt.name,
i.category, i.icon_name, i.icon_color, it.name
FROM kinsect k
JOIN kinsect_recipe kr
ON k.id = kr.kinsect_id
JOIN item i
ON kr.item_id = i.id
JOIN item_text it
ON kr.item_id = it.id
JOIN kinsect_text kt USING (id)
WHERE kt.lang_id = :langId
AND it.lang_id = :langId
AND k.id = :kinId
ORDER BY i.id
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, ("en", self.currentlySelectedKinsectID))
data = data.fetchall()
materials = []
for row in data:
materials.append(k.KinsectMaterial(row))
for mat in materials:
img = self.il.Add(wx.Bitmap(f"images/items-24/{mat.iconName}{mat.iconColor}.png"))
index = self.kinsectMaterialList.InsertItem(self.kinsectMaterialList.GetItemCount(), mat.name, img)
self.kinsectMaterialList.SetItem(index, 1, f"{mat.quantity}")
width, height = self.kinsectPanel.GetSize()
self.kinsectPanel.SetSize(width + 1, height + 1)
self.kinsectPanel.SetSize(width, height)
self.root.Thaw()
def onKinsectSelection(self, event):
"""
When a specific kinsect is selected in the tree, the detail view gets populated with the information from the database.
"""
if not self.init:
self.currentlySelectedKinsectID = self.kinsectTree.GetCellValue(event.GetRow(), 6)
if self.currentlySelectedKinsectID != "":
self.loadKinsectDetails()
def onSize(self, event):
"""
When the application window is resized some columns's width gets readjusted.
"""
try:
self.kinsectDetailList.SetColSize(0, self.kinsectDetailPanel.GetSize()[0] * 0.66)
self.kinsectDetailList.SetColSize(1, self.kinsectDetailPanel.GetSize()[0] * 0.34 - 20)
self.kinsectMaterialList.SetColumnWidth(0, self.kinsectDetailPanel.GetSize()[0] * 0.66)
self.kinsectMaterialList.SetColumnWidth(1, self.kinsectDetailPanel.GetSize()[0] * 0.34 - 40)
except:
pass |
the-stack_106_31740 | import os
import pickle
import sqlite3
import subprocess
import tempfile
import warnings
from collections import defaultdict
from contextlib import closing
from typing import Any, Dict, List
import prefect
__all__ = ["AirflowTask", "AirflowTriggerDAG"]
def custom_query(db: str, query: str, *params: str) -> List:
with closing(sqlite3.connect(db)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(query, params)
return cursor.fetchall()
class AirflowTask(prefect.tasks.shell.ShellTask):
"""
Task wrapper for executing individual Airflow tasks.
Successful execution of this task requires a separate conda environment in which `airflow` is installed.
Any XComs this task pushes will be converted to return values for this task.
Unless certain CLI flags are provided (e.g., `-A`), execution of this task will respect Airflow trigger rules.
Args:
- task_id (string): the Airflow `task_id` to execute at runtime
- dag_id (string): the Airflow `dag_id` containing the given `task_id`
- airflow_env (str, optional): the name of the conda environment in which `airflow` is installed;
defaults to `"airflow"`
- cli_flags (List[str], optional): a list of CLI flags to provide to `airflow run` at runtime;
see [the airflow docs](https://airflow.apache.org/cli.html#run) for options. This can be used to ignore Airflow trigger rules
by providing `cli_flags=['-A']`
- env (dict, optional): dictionary of environment variables to use for
the subprocess (e.g., `AIRFLOW__CORE__DAGS_FOLDER`)
- execution_date (str, optional): the execution date for this task run; can also be provided to the run method;
if not provided here or to `run()`, the value of `today` in context will be used
- db_conn (str, optional): the location of the airflow database; currently only SQLite DBs are supported;
defaults to `~/airflow/airflow.db`; used for pulling XComs and inspecting task states
- **kwargs: additional keyword arguments to pass to the Task constructor
Example:
```python
from prefect import Flow
from prefect.tasks.airflow import AirflowTask
# compare with https://github.com/apache/airflow/blob/master/airflow/example_dags/example_xcom.py
puller = AirflowTask(
task_id="puller",
dag_id="example_xcom",
)
push = AirflowTask(
task_id="push",
dag_id="example_xcom",
)
push_by_returning = AirflowTask(
task_id="push_by_returning",
dag_id="example_xcom",
)
with Flow(name="example_xcom") as flow:
res = puller(upstream_tasks=[push, push_by_returning])
flow_state = flow.run()
# XComs auto-convert to return values
assert flow_state.result[push].result == [1, 2, 3]
assert flow_state.result[push_by_returning].result == {"a": "b"}
```
"""
def __init__(
self,
task_id: str,
dag_id: str,
cli_flags: List[str] = None,
airflow_env: str = "airflow",
env: dict = None,
execution_date: str = None,
db_conn: str = None,
**kwargs: Any
):
if cli_flags is None:
cli_flags = []
cmd = "airflow run " + " ".join(cli_flags) + " {0} {1} {2}"
self.db_conn = db_conn or os.path.expanduser("~/airflow/airflow.db")
self.dag_id = dag_id
self.task_id = task_id
self.execution_date = execution_date
kwargs.setdefault("name", task_id)
super().__init__(
command=cmd,
env=env,
helper_script="source deactivate && source activate {}".format(airflow_env),
**kwargs
)
def _state_conversion(self, query: List) -> None:
if query:
status = query[0][0]
if status == "skipped":
raise prefect.engine.signals.SKIP(
"Airflow task state marked as 'skipped' in airflow db"
)
elif status != "success":
raise prefect.engine.signals.FAIL(
"Airflow task state marked as {} in airflow db".format(
status.rstrip()
)
)
def _pre_check(self, execution_date: str) -> None:
check_query = "select state from task_instance where task_id=? and dag_id=? and execution_date like ?"
status = custom_query(
self.db_conn,
check_query,
self.task_id,
self.dag_id,
"%{}%".format(execution_date),
)
self._state_conversion(status)
def _post_check(self, execution_date: str) -> None:
check_query = "select state from task_instance where task_id=? and dag_id=? and execution_date like ?"
status = custom_query(
self.db_conn,
check_query,
self.task_id,
self.dag_id,
"%{}%".format(execution_date),
)
if not status:
raise prefect.engine.signals.SKIP(
"Airflow task state not present in airflow db, was skipped."
)
self._state_conversion(status)
def _pull_xcom(self, execution_date: str) -> Any:
check_query = "select value from xcom where task_id=? and dag_id=? and execution_date like ?"
data = custom_query(
self.db_conn,
check_query,
self.task_id,
self.dag_id,
"%{}%".format(execution_date),
)
if data:
return pickle.loads(data[0][0])
@prefect.utilities.tasks.defaults_from_attrs("execution_date")
def run(self, execution_date: str = None) -> Any:
"""
Executes `airflow run` for the provided `task_id`, `dag_id` and `execution_date`.
Args:
- execution_date (str, optional): the execution date for this task run;
if not provided here or at initialization, the value of `today` in context will be used
Raises:
- prefect.engine.signals.PrefectStateSignal: depending on the state of the task_instance in the Airflow DB
Returns:
- Any: any data this task pushes as an XCom
"""
if execution_date is None:
execution_date = prefect.context.get("today")
self._pre_check(execution_date)
self.command = self.command.format( # type: ignore
self.dag_id, self.task_id, execution_date
)
res = super().run()
if "Task is not able to be run" in res.decode():
raise prefect.engine.signals.SKIP("Airflow task was not run.")
self._post_check(execution_date)
data = self._pull_xcom(execution_date)
return data
class AirflowTriggerDAG(prefect.tasks.shell.ShellTask):
"""
Task wrapper for triggering an Airflow DAG run.
Successful execution of this task requires a separate conda environment in which `airflow` is installed.
Args:
- dag_id (string): the Airflow `dag_id` containing the given `task_id`
- airflow_env (str, optional): the name of the conda environment in which `airflow` is installed;
defaults to `"airflow"`
- execution_date (str, optional): the execution date for this task run; can also be provided to the run method;
if not provided here or to `run()`, the value of `today` in context will be used
- cli_flags (List[str], optional): a list of CLI flags to provide to `airflow trigger_dag` at runtime;
this can be used to provide `execution_date` via `["-e 1999-01-01"]`. For a complete list of available options,
see the [corresponding Airflow documentation](https://airflow.apache.org/cli.html#trigger_dag)
- env (dict, optional): dictionary of environment variables to use for
the subprocess (e.g., `AIRFLOW__CORE__DAGS_FOLDER`)
- **kwargs: additional keyword arguments to pass to the Task constructor
"""
def __init__(
self,
dag_id: str,
airflow_env: str = "airflow",
execution_date: str = None,
cli_flags: List[str] = None,
env: dict = None,
**kwargs
):
if cli_flags is None:
cli_flags = []
self.cli_flags = cli_flags
self.dag_id = dag_id
self.execution_date = execution_date
kwargs.setdefault("name", dag_id)
super().__init__(
env=env,
helper_script="source deactivate && source activate {}".format(airflow_env),
**kwargs
)
@prefect.utilities.tasks.defaults_from_attrs("execution_date")
def run(self, execution_date: str = None) -> Any:
"""
Executes `airflow trigger_dag` for the provided `dag_id` with the provided options.
Args:
- execution_date (str, optional): the execution date for this task run;
if not provided here or at initialization, the value of `today` in context will be used
Raises:
- prefect.engine.signals.PrefectStateSignal: depending on the state of the task_instance in the Airflow DB
Returns:
- Any: any data this task pushes as an XCom
"""
if execution_date is None:
execution_date = prefect.context.get("today")
cli_flags = self.cli_flags + ["-e {}".format(execution_date)]
cmd = "airflow trigger_dag " + " ".join(cli_flags) + " {0}".format(self.dag_id)
res = super().run(command=cmd)
return res
|
the-stack_106_31741 | from config import denoise_image_config as config
from pyimagesearch.denoising.helper import blur_and_threshold
from imutils import paths
import progressbar
import cv2
import random
train_paths = sorted(list(paths.list_images(config.TRAIN_PATH)))
cleaned_paths = sorted(list(paths.list_images(config.CLEANED_PATH)))
widgets = ["Creating Features: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(train_paths), widgets=widgets).start()
image_paths = zip(train_paths, cleaned_paths)
csv = open(config.FEATURES_PATH, 'w')
for(i, (train_paths, cleaned_paths)) in enumerate(image_paths):
# read images
train_image = cv2.imread(train_paths)
clean_image = cv2.imread(cleaned_paths)
# BRG to gray
train_image = cv2.cvtColor(train_image, cv2.COLOR_BGR2GRAY)
clean_image = cv2.cvtColor(clean_image, cv2.COLOR_BGR2GRAY)
# padding 2px each side for both train and clean images
train_image = cv2.copyMakeBorder(train_image, 2, 2, 2, 2, cv2.BORDER_REPLICATE)
clean_image = cv2.copyMakeBorder(train_image, 2, 2, 2, 2, cv2.BORDER_REPLICATE)
# blur the train image
train_image = blur_and_threshold(train_image)
# scale to px intensity between 0-1 from 0-255
clean_image = clean_image.astype('float') / 255.0
# 5x5 sliding window throgh images
for y in range(0, train_image.shape[0]):
for x in range(0, train_image.shape[1]):
# extract the region for both clean and train image
train_region = train_image[y:y + 5, x:x + 5]
clean_region = clean_image[y:y + 5, x:x + 5]
(rH, rW) = train_region.shape[:2]
# discard region which is not 5x5
if rW != 5 or rH != 5:
continue
# get features and target
features = train_region.flatten()
target = clean_region[2, 2]
# only write some of feature/target combination to disk
if random.random() <= config.SAMPLE_PROB:
features = [str(x) for x in features]
row = [str(target)] + features
row = ', '.join(row)
csv.write('{}\n'.format(row))
pbar.update(i)
# housekeeping
pbar.finish()
csv.close()
|
the-stack_106_31742 |
from importlib.abc import ExecutionLoader
from thingsboard_gateway.storage.sqlite.database import Database
from time import time, sleep
from queue import Queue
from thingsboard_gateway.storage.sqlite.database_request import DatabaseRequest
from thingsboard_gateway.storage.sqlite.database_action_type import DatabaseActionType
#
# No need to import DatabaseResponse, responses come to this component to be deconstructed
#
from logging import getLogger
log = getLogger("storage")
class StorageHandler:
"""
HIGH level api for thingsboard_gateway main loop
"""
def __init__(self, config):
log.info("Sqlite Storage initializing...")
# TODO:
# Make a init function that checks if database file exists
# if it exists load connected deiveces from it
self.db = Database(config)
# We need queues to stay atomic when multiplle connectors/Threads are
# trying to write or read from database
log.info("Initializing read and process queues")
self.processQueue = Queue(-1)
self.readQueue = Queue(-1)
self.db.setReadQueue(self.readQueue)
self.db.setProcessQueue(self.processQueue)
# Create table if not exists for connected devices
self.db.create_connected_devices_table()
self.connected_devices = self.get_connected_devices()
log.info("Sqlite storage initialized!")
def get_connected_devices(self):
"""
Util func, to only parse and store connedted devices names in a list
"""
_type = DatabaseActionType.READ_CONNECTED_DEVICES
data = self
req = DatabaseRequest(_type, data)
self.processQueue.put(req)
self.db.process()
return self.connected_devices
def readAll(self, deviceName):
return self.db.readAll(deviceName)
def readFrom(self, deviceName, ts):
return self.db.readFrom(deviceName, ts)
def put(self, message):
try:
device_name = message.get("deviceName")
if device_name is not None and device_name not in self.connected_devices:
self.db.create_device_table(device_name)
_type = DatabaseActionType.WRITE_DATA_STORAGE
request = DatabaseRequest(_type, message)
log.info("Sending data to storage")
self.processQueue.put(request)
## Left for discussion ##
log.debug("data %s from device %s " % (str(self.connected_devices[device_name]), device_name))
self.connected_devices[device_name]["data_saved_index"] += 1
storageIndex = self.connected_devices[device_name]["data_saved_index"]
data = (device_name, storageIndex)
_type = DatabaseActionType.WRITE_STORAGE_INDEX
log.debug("Index request data: %s" % str(data))
index_request = DatabaseRequest(_type, data)
log.debug("Updating device storage index")
self.processQueue.put(index_request)
self.db.process() ## This call is neccessary
return True
except Exception as e:
log.exception(e)
def add_device(self, deviceName, connector, deviceType=None):
self.db.add_new_connecting_device(deviceName, connector, deviceType)
# Update connected devices list
self.connected_devices = self.get_connected_devices()
# Create device table
self.db.create_device_table(deviceName)
def del_device(self, device_name):
self.db.del_connected_device(device_name)
# Update connected devices list
self.connected_devices = self.get_connected_devices()
def closeDB(self):
self.db.closeDB() |
the-stack_106_31744 | import random
import numpy as np
from fedot.core.log import default_log
from fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams
from fedot.api.api_utils.presets import OperationsPreset
class ApiParams:
def __init__(self):
self.default_forecast_length = 30
self.api_params = None
self.log = None
self.task = None
self.metric_to_compose = None
self.task_params = None
self.metric_name = None
def check_input_params(self, **input_params):
self.metric_to_compose = None
self.api_params['problem'] = input_params['problem']
self.log = default_log('FEDOT logger', verbose_level=input_params['verbose_level'])
if input_params['seed'] is not None:
np.random.seed(input_params['seed'])
random.seed(input_params['seed'])
if input_params['timeout'] is not None:
self.api_params['timeout'] = self.api_params['timeout']
self.api_params['num_of_generations'] = 10000
if 'metric' in self.api_params:
self.api_params['composer_metric'] = self.api_params['metric']
del self.api_params['metric']
self.metric_to_compose = self.api_params['composer_metric']
if input_params['problem'] == 'ts_forecasting' and input_params['task_params'] is None:
self.log.warn('The value of the forecast depth was set to {}.'.format(self.default_forecast_length))
self.task_params = TsForecastingParams(forecast_length=self.default_forecast_length)
if input_params['problem'] == 'clustering':
raise ValueError('This type of task is not not supported in API now')
def get_initial_params(self, **input_params):
if input_params['composer_params'] is None:
self.api_params = self.get_default_evo_params(problem=input_params['problem'])
else:
self.api_params = {**self.get_default_evo_params(problem=input_params['problem']),
**input_params['composer_params']}
self.check_input_params(**input_params)
self.task = self.get_task_params(input_params['problem'],
input_params['task_params'])
self.metric_name = self.get_default_metric(input_params['problem'])
return
def initialize_params(self, **input_params):
self.get_initial_params(**input_params)
preset_operations = OperationsPreset(task=self.task, preset_name=input_params['preset'])
self.api_params = preset_operations.composer_params_based_on_preset(composer_params=self.api_params)
param_dict = {
'task': self.task,
'logger': self.log,
'metric_name': self.metric_name,
'composer_metric': self.metric_to_compose
}
return {**param_dict, **self.api_params}
@staticmethod
def get_default_evo_params(problem: str):
""" Dictionary with default parameters for composer """
params = {'max_depth': 3,
'max_arity': 4,
'pop_size': 20,
'num_of_generations': 20,
'timeout': 2,
'with_tuning': False,
'preset': 'light_tun',
'genetic_scheme': None,
'history_folder': None}
if problem in ['classification', 'regression']:
params['cv_folds'] = 3
return params
@staticmethod
def get_default_metric(problem: str):
default_test_metric_dict = {
'regression': ['rmse', 'mae'],
'classification': ['roc_auc', 'f1'],
'multiclassification': 'f1',
'clustering': 'silhouette',
'ts_forecasting': ['rmse', 'mae']
}
return default_test_metric_dict[problem]
@staticmethod
def get_task_params(problem, task_params):
""" Return task parameters by machine learning task """
task_dict = {'regression': Task(TaskTypesEnum.regression, task_params=task_params),
'classification': Task(TaskTypesEnum.classification, task_params=task_params),
'clustering': Task(TaskTypesEnum.clustering, task_params=task_params),
'ts_forecasting': Task(TaskTypesEnum.ts_forecasting, task_params=task_params)
}
return task_dict[problem]
|
the-stack_106_31745 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import pytest
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.fail_step import FailStep
from sagemaker.workflow.functions import Join
from sagemaker.workflow.parameters import ParameterInteger
from sagemaker.workflow.pipeline import Pipeline
def test_fail_step():
fail_step = FailStep(
name="MyFailStep",
depends_on=["TestStep"],
error_message="Test error message",
)
fail_step.add_depends_on(["SecondTestStep"])
assert fail_step.to_request() == {
"Name": "MyFailStep",
"Type": "Fail",
"DependsOn": ["TestStep", "SecondTestStep"],
"Arguments": {"ErrorMessage": "Test error message"},
}
def test_fail_step_with_no_error_message():
fail_step = FailStep(
name="MyFailStep",
depends_on=["TestStep"],
)
fail_step.add_depends_on(["SecondTestStep"])
assert fail_step.to_request() == {
"Name": "MyFailStep",
"Type": "Fail",
"DependsOn": ["TestStep", "SecondTestStep"],
"Arguments": {"ErrorMessage": ""},
}
def test_fail_step_with_join_fn_in_error_message():
param = ParameterInteger(name="MyInt", default_value=2)
cond = ConditionEquals(left=param, right=1)
step_cond = ConditionStep(
name="CondStep",
conditions=[cond],
if_steps=[],
else_steps=[],
)
step_fail = FailStep(
name="FailStep",
error_message=Join(
on=": ", values=["Failed due to xxx == yyy returns", step_cond.properties.Outcome]
),
)
pipeline = Pipeline(
name="MyPipeline",
steps=[step_cond, step_fail],
parameters=[param],
)
_expected_dsl = [
{
"Name": "CondStep",
"Type": "Condition",
"Arguments": {
"Conditions": [
{"Type": "Equals", "LeftValue": {"Get": "Parameters.MyInt"}, "RightValue": 1}
],
"IfSteps": [],
"ElseSteps": [],
},
},
{
"Name": "FailStep",
"Type": "Fail",
"Arguments": {
"ErrorMessage": {
"Std:Join": {
"On": ": ",
"Values": [
"Failed due to xxx == yyy returns",
{"Get": "Steps.CondStep.Outcome"},
],
}
}
},
},
]
assert json.loads(pipeline.definition())["Steps"] == _expected_dsl
def test_fail_step_with_properties_ref():
fail_step = FailStep(
name="MyFailStep",
error_message="Test error message",
)
with pytest.raises(Exception) as error:
fail_step.properties()
assert (
str(error.value)
== "FailStep is a terminal step and the Properties object is not available for it."
)
|
the-stack_106_31746 | import datetime
from sqlalchemy import orm, types, Column, Table, ForeignKey, desc, or_
import ckan.model
import meta
import types as _types
import domain_object
__all__ = ['Activity', 'activity_table',
'ActivityDetail', 'activity_detail_table',
]
activity_table = Table(
'activity', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('timestamp', types.DateTime),
Column('user_id', types.UnicodeText),
Column('object_id', types.UnicodeText),
Column('revision_id', types.UnicodeText),
Column('activity_type', types.UnicodeText),
Column('data', _types.JsonDictType),
)
activity_detail_table = Table(
'activity_detail', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('activity_id', types.UnicodeText, ForeignKey('activity.id')),
Column('object_id', types.UnicodeText),
Column('object_type', types.UnicodeText),
Column('activity_type', types.UnicodeText),
Column('data', _types.JsonDictType),
)
class Activity(domain_object.DomainObject):
def __init__(self, user_id, object_id, revision_id, activity_type,
data=None):
self.id = _types.make_uuid()
self.timestamp = datetime.datetime.now()
self.user_id = user_id
self.object_id = object_id
self.revision_id = revision_id
self.activity_type = activity_type
if data is None:
self.data = {}
else:
self.data = data
meta.mapper(Activity, activity_table)
class ActivityDetail(domain_object.DomainObject):
def __init__(self, activity_id, object_id, object_type, activity_type,
data=None):
self.activity_id = activity_id
self.object_id = object_id
self.object_type = object_type
self.activity_type = activity_type
if data is None:
self.data = {}
else:
self.data = data
@classmethod
def by_activity_id(cls, activity_id):
return ckan.model.Session.query(cls) \
.filter_by(activity_id = activity_id).all()
meta.mapper(ActivityDetail, activity_detail_table, properties = {
'activity':orm.relation ( Activity, backref=orm.backref('activity_detail'))
})
def _activities_at_offset(q, limit, offset):
'''Return an SQLAlchemy query for all activities at an offset with a limit.
'''
import ckan.model as model
q = q.order_by(desc(model.Activity.timestamp))
if offset:
q = q.offset(offset)
if limit:
q = q.limit(limit)
return q.all()
def _activities_from_user_query(user_id):
'''Return an SQLAlchemy query for all activities from user_id.'''
import ckan.model as model
q = model.Session.query(model.Activity)
q = q.filter(model.Activity.user_id == user_id)
return q
def _activities_about_user_query(user_id):
'''Return an SQLAlchemy query for all activities about user_id.'''
import ckan.model as model
q = model.Session.query(model.Activity)
q = q.filter(model.Activity.object_id == user_id)
return q
def _user_activity_query(user_id):
'''Return an SQLAlchemy query for all activities from or about user_id.'''
q = _activities_from_user_query(user_id)
q = q.union(_activities_about_user_query(user_id))
return q
def user_activity_list(user_id, limit, offset):
'''Return user_id's public activity stream.
Return a list of all activities from or about the given user, i.e. where
the given user is the subject or object of the activity, e.g.:
"{USER} created the dataset {DATASET}"
"{OTHER_USER} started following {USER}"
etc.
'''
q = _user_activity_query(user_id)
return _activities_at_offset(q, limit, offset)
def _package_activity_query(package_id):
'''Return an SQLAlchemy query for all activities about package_id.
'''
import ckan.model as model
q = model.Session.query(model.Activity)
q = q.filter_by(object_id=package_id)
return q
def package_activity_list(package_id, limit, offset):
'''Return the given dataset (package)'s public activity stream.
Returns all activities about the given dataset, i.e. where the given
dataset is the object of the activity, e.g.:
"{USER} created the dataset {DATASET}"
"{USER} updated the dataset {DATASET}"
etc.
'''
q = _package_activity_query(package_id)
return _activities_at_offset(q, limit, offset)
def _group_activity_query(group_id):
'''Return an SQLAlchemy query for all activities about group_id.
Returns a query for all activities whose object is either the group itself
or one of the group's datasets.
'''
import ckan.model as model
group = model.Group.get(group_id)
if not group:
# Return a query with no results.
return model.Session.query(model.Activity).filter("0=1")
dataset_ids = [dataset.id for dataset in group.packages()]
q = model.Session.query(model.Activity)
if dataset_ids:
q = q.filter(or_(model.Activity.object_id == group_id,
model.Activity.object_id.in_(dataset_ids)))
else:
q = q.filter(model.Activity.object_id == group_id)
return q
def group_activity_list(group_id, limit, offset):
'''Return the given group's public activity stream.
Returns all activities where the given group or one of its datasets is the
object of the activity, e.g.:
"{USER} updated the group {GROUP}"
"{USER} updated the dataset {DATASET}"
etc.
'''
q = _group_activity_query(group_id)
return _activities_at_offset(q, limit, offset)
def _activites_from_users_followed_by_user_query(user_id):
'''Return a query for all activities from users that user_id follows.'''
import ckan.model as model
# Get a list of the users that the given user is following.
follower_objects = model.UserFollowingUser.followee_list(user_id)
if not follower_objects:
# Return a query with no results.
return model.Session.query(model.Activity).filter("0=1")
q = _user_activity_query(follower_objects[0].object_id)
q = q.union_all(*[_user_activity_query(follower.object_id)
for follower in follower_objects[1:]])
return q
def _activities_from_datasets_followed_by_user_query(user_id):
'''Return a query for all activities from datasets that user_id follows.'''
import ckan.model as model
# Get a list of the datasets that the user is following.
follower_objects = model.UserFollowingDataset.followee_list(user_id)
if not follower_objects:
# Return a query with no results.
return model.Session.query(model.Activity).filter("0=1")
q = _package_activity_query(follower_objects[0].object_id)
q = q.union_all(*[_package_activity_query(follower.object_id)
for follower in follower_objects[1:]])
return q
def _activities_from_groups_followed_by_user_query(user_id):
'''Return a query for all activities about groups the given user follows.
Return a query for all activities about the groups the given user follows,
or about any of the group's datasets. This is the union of
_group_activity_query(group_id) for each of the groups the user follows.
'''
import ckan.model as model
# Get a list of the group's that the user is following.
follower_objects = model.UserFollowingGroup.followee_list(user_id)
if not follower_objects:
# Return a query with no results.
return model.Session.query(model.Activity).filter("0=1")
q = _group_activity_query(follower_objects[0].object_id)
q = q.union_all(*[_group_activity_query(follower.object_id)
for follower in follower_objects[1:]])
return q
def _activities_from_everything_followed_by_user_query(user_id):
'''Return a query for all activities from everything user_id follows.'''
q = _activites_from_users_followed_by_user_query(user_id)
q = q.union(_activities_from_datasets_followed_by_user_query(user_id))
q = q.union(_activities_from_groups_followed_by_user_query(user_id))
return q
def activities_from_everything_followed_by_user(user_id, limit, offset):
'''Return activities from everything that the given user is following.
Returns all activities where the object of the activity is anything
(user, dataset, group...) that the given user is following.
'''
q = _activities_from_everything_followed_by_user_query(user_id)
return _activities_at_offset(q, limit, offset)
def _dashboard_activity_query(user_id):
'''Return an SQLAlchemy query for user_id's dashboard activity stream.'''
q = _user_activity_query(user_id)
q = q.union(_activities_from_everything_followed_by_user_query(user_id))
return q
def dashboard_activity_list(user_id, limit, offset):
'''Return the given user's dashboard activity stream.
Returns activities from the user's public activity stream, plus
activities from everything that the user is following.
This is the union of user_activity_list(user_id) and
activities_from_everything_followed_by_user(user_id).
'''
q = _dashboard_activity_query(user_id)
return _activities_at_offset(q, limit, offset)
def _changed_packages_activity_query():
'''Return an SQLAlchemyu query for all changed package activities.
Return a query for all activities with activity_type '*package', e.g.
'new_package', 'changed_package', 'deleted_package'.
'''
import ckan.model as model
q = model.Session.query(model.Activity)
q = q.filter(model.Activity.activity_type.endswith('package'))
return q
def recently_changed_packages_activity_list(limit, offset):
'''Return the site-wide stream of recently changed package activities.
This activity stream includes recent 'new package', 'changed package' and
'deleted package' activities for the whole site.
'''
q = _changed_packages_activity_query()
return _activities_at_offset(q, limit, offset)
|
the-stack_106_31749 | #!/usr/bin/env python3
from collections import Counter
from copy import deepcopy
from itertools import chain, repeat
import sys
def get_layout(filename):
# adding floors lets us not worry about literal edge cases
with open(filename, "r") as ifile:
# add floor around each edge
layout = [["."] + [c for c in l.strip()] + ["."] for l in ifile]
# add floor at top and bottom of seats
blank_row = ["."] * len(layout[0])
layout.insert(0, blank_row)
layout.append(blank_row)
return layout
def adjacent_seats(layout, r, c):
return (
layout[r - 1][c - 1 : c + 2]
+ [layout[r][c - 1]]
+ [layout[r][c + 1]]
+ layout[r + 1][c - 1 : c + 2]
)
def adjacent_occupied(layout, r, c):
return [s == "#" for s in adjacent_seats(layout, r, c)]
def adjacent_empty(layout, r, c):
return [s in (".", "L") for s in adjacent_seats(layout, r, c)]
def iterate_part_one(layout):
new_layout = deepcopy(layout)
for row_num, row in enumerate(layout):
for col_num, seat in enumerate(row):
if seat == "L" and all(adjacent_empty(layout, row_num, col_num)):
new_layout[row_num][col_num] = "#"
elif seat == "#" and sum(adjacent_occupied(layout, row_num, col_num)) >= 4:
new_layout[row_num][col_num] = "L"
return new_layout
def indexes(direction, r, c, nrows, ncols):
return {
"n": zip(range(r - 1, -1, -1), repeat(c)),
"s": zip(range(r + 1, nrows), repeat(c)),
"w": zip(repeat(r), range(c - 1, -1, -1)),
"e": zip(repeat(r), range(c + 1, ncols)),
"nw": zip(range(r - 1, -1, -1), range(c - 1, -1, -1)),
"ne": zip(range(r - 1, -1, -1), range(c + 1, ncols)),
"sw": zip(range(r + 1, nrows), range(c - 1, -1, -1)),
"se": zip(range(r + 1, nrows), range(c + 1, ncols)),
}.get(direction)
def first_visible_seats(layout, r, c):
directions = ["n", "s", "e", "w", "ne", "nw", "sw", "se"]
nrows = len(layout)
ncols = len(layout[r])
visible_seats = []
for direction in directions:
for i, j in indexes(direction, r, c, nrows, ncols):
if (seat := layout[i][j]) in ("L", "#"):
visible_seats.append(seat)
break
return Counter(visible_seats)
def no_visible_occupied(layout, r, c):
return first_visible_seats(layout, r, c)["#"] == 0
def gets_vacated(layout, r, c):
return first_visible_seats(layout, r, c)["#"] >= 5
def iterate_part_two(layout):
new_layout = deepcopy(layout)
for row_num, row in enumerate(layout):
for col_num, seat in enumerate(row):
if seat == "L" and no_visible_occupied(layout, row_num, col_num):
new_layout[row_num][col_num] = "#"
elif seat == "#" and gets_vacated(layout, row_num, col_num):
new_layout[row_num][col_num] = "L"
return new_layout
def occupied_seats(layout):
return sum(s == "#" for s in chain.from_iterable(layout))
def solve(layout, iterate_func):
while True:
n_occupied = occupied_seats(layout)
layout = iterate_func(layout)
if n_occupied == occupied_seats(layout):
return n_occupied
def main():
if len(sys.argv) != 2:
print("Usage: ./solve.py inputfile")
sys.exit(2)
layout = get_layout(sys.argv[1])
print(f"Part 1 answer: {solve(layout, iterate_part_one)}")
print(f"Part 2 answer: {solve(layout, iterate_part_two)}")
if __name__ == "__main__":
main()
|
the-stack_106_31750 | """:mod:`asuka.service` --- Service interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import re
from .build import BaseBuild
from .instance import Instance
from .logger import LoggerProviderMixin
__all__ = 'DomainService', 'Service'
class Service(LoggerProviderMixin):
"""The inteface of services.
:param build: the build object
:type build: :class:`~asuka.build.BaseBuild`
:param name: the service name
:type name: :class:`basestring`
:param config: the config mapping object
:type config: :class:`collections.Mapping`
:param required_apt_repositories: the set of APT repositories
the service uses
:param required_apt_packages: the set of APT packages
the service depends
:type required_apt_packages: :class:`collections.Set`
:param required_python_packages: the set of Python packages
the service depends.
elements have to be PyPI names
:type required_python_packages: :class:`collections.Set`
"""
#: (:class:`re.RegexObject`) The pattern of the valid service name.
NAME_PATTERN = re.compile('^[a-z_][a-z0-9_]{1,50}$')
#: (:class:`~asuka.build.Build`) The build object.
build = None
#: (:class:`~asuka.branch.Branch`) The branch object.
branch = None
#: (:class:`~asuka.commit.Commit`) The commit object.
commit = None
#: (:class:`~asuka.app.App`) The application object.
app = None
#: (:class:`str`) The service name e.g. ``'web'``.
name = None
#: (:class:`collections.Sequence`) The shell commands to be invoked
#: before services are installed.
pre_install = None
#: (:class:`collections.Sequence`) The shell commands to be invoked
#: after all services are installed.
post_install = None
#: (:class:`collections.Mapping`) The configuration dictionary.
config = None
def __init__(self, build, name, config={},
required_apt_repositories=frozenset(),
required_apt_packages=frozenset(),
required_python_packages=frozenset(),
pre_install=[],
post_install=[]):
if not isinstance(build, BaseBuild):
raise TypeError('build must be an instance of asuka.build.'
'BaseBuild, not ' + repr(build))
elif not isinstance(name, basestring):
raise TypeError('name must be a string, not ' + repr(name))
elif not self.NAME_PATTERN.search(name):
raise TypeError('invalid name: ' + repr(name))
self.build = build
self.app = build.app
self.branch = build.branch
self.commit = build.commit
self.name = str(name)
self.config = dict(config)
self._required_apt_repositories = frozenset(required_apt_repositories)
self._required_apt_packages = frozenset(required_apt_packages)
self._required_python_packages = frozenset(required_python_packages)
self.pre_install = list(pre_install)
self.post_install = list(post_install)
@property
def required_apt_repositories(self):
"""(:class:`collections.Set`) The set of APT repository source lines
to add. It takes source lines :program:`apt-add-repository`
can take e.g.::
frozenset([
'deb http://myserver/path/to/repo stable myrepo',
'http://myserver/path/to/repo myrepo',
'https://packages.medibuntu.org free non-free',
'http://extras.ubuntu.com/ubuntu ',
'ppa:user/repository'
])
"""
return self._required_apt_repositories
@property
def required_apt_packages(self):
"""(:class:`collections.Set`) The set of APT package names
to install e.g. ``frozenset(['python-dev', 'python-greenlet'])``.
"""
return self._required_apt_packages
@property
def required_python_packages(self):
"""(:class:`collections.Set`) The set of PyPI_ package names
to install e.g. ``frozenset(['Werkzeug', 'chardet'])``.
.. _PyPI: http://pypi.python.org/
"""
return self._required_python_packages
def install(self, instance):
"""Installs the service into the ``instance``.
:param instance: the instance to install the service
:type instance: :class:`asuka.instance.Instance`
:returns: values provided by the service. this values are shared
to the app via :file:`/etc/<app>/values.json` file.
it must be able to be serialized into JSON
"""
if not isinstance(instance, Instance):
raise TypeError('instance must be an asuka.instance.Instance '
'object, not ' + repr(instance))
elif instance.app is not self.app:
raise TypeError('{0!r} is not an instance for {1!r} but {0.app!r}'
''.format(instance, self.app))
app_name = instance.app.name
F = app_name, self.name
with instance:
# Make directories
instance.do([
'sudo', 'mkdir', '-p', '/etc/{0}/{1}'.format(*F),
'/var/lib/{0}/{1}'.format(*F), '/var/run/{0}'.format(*F)
])
instance.do([
'sudo', 'chown', '-R', '{0}:{0}'.format(*F),
'/etc/{0}'.format(*F), '/var/lib/{0}'.format(*F),
'/var/run/{0}'.format(*F)
])
def uninstall(self):
"""Uninstalls the service."""
def __repr__(self):
cls = type(self)
return '<{0.__module__}.{0.__name__} {1!r}>'.format(cls, self.name)
class DomainService(Service):
"""The service subtype mixin which provides domain routing."""
def route_domain(self, name, records):
"""Routes the service to the zone.
:param name: the full domain name to map
:type name: :class:`basestring`
:param records: the record changeset
:type records: :class:`boto.route53.record.ResourceRecordSets`
"""
def remove_domain(self, name, records):
"""Removes the records for the service from the zone.
:param name: the full domain name to deregister
:type name: :class:`basestring`
:param records: the record changeset
:type records: :class:`boto.route53.record.ResourceRecordSets`
"""
|
the-stack_106_31752 | # SPDX-FileCopyrightText: 2021 Dan Halbert, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2021 Adafruit Industries for Adafruit Industries LLC
#
# SPDX-License-Identifier: MIT
"""
`adafruit_ble_lywsd03mmc`
================================================================================
BLE Support for Xiaomi LYWSD03MMC Thermometer/Hygrometer
* Author(s): Adafruit Industries
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import struct
import _bleio
from adafruit_ble.services import Service
from adafruit_ble.uuid import VendorUUID
from adafruit_ble.characteristics import Characteristic, ComplexCharacteristic
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_LYWSD03MMC.git"
class _Readings(ComplexCharacteristic):
"""Notify-only characteristic of temperature/humidity"""
uuid = VendorUUID("ebe0ccc1-7a0a-4b0c-8a1a-6ff2997da3a6")
def __init__(self):
super().__init__(properties=Characteristic.NOTIFY)
def bind(self, service):
"""Bind to an LYWSD03MMCService."""
bound_characteristic = super().bind(service)
bound_characteristic.set_cccd(notify=True)
# Use a PacketBuffer that can store one packet to receive the data.
return _bleio.PacketBuffer(bound_characteristic, buffer_size=1)
class LYWSD03MMCService(Service):
"""Service for reading from an LYWSD03MMC sensor."""
def __init__(self, service=None):
super().__init__(service=service)
# Defer creating buffers until needed, since MTU is not known yet.
self._settings_result_buf = None
self._readings_buf = None
uuid = VendorUUID("ebe0ccb0-7a0a-4b0c-8a1a-6ff2997da3a6")
readings = _Readings()
@property
def temperature_humidity(self):
"""Return a tuple of (temperature, humidity)."""
if self._readings_buf is None:
self._readings_buf = bytearray(self.readings.incoming_packet_length)
data = self._readings_buf
length = self.readings.readinto(data)
if length > 0:
low_temp, high_temp, hum = struct.unpack_from("<BBB", data)
sign = high_temp & 0x80
temp = ((high_temp & 0x7F) << 8) | low_temp
if sign:
temp = temp - 32767
temp = temp / 100
return (temp, hum)
# No data.
return None
|
the-stack_106_31755 | # -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
import os
import platform
class System:
@staticmethod
def detect_platform():
"""
Method to detect platform. Based on that result the methods can change
:return: plat: A string contains the platform name like 'Windows', 'Linux'.
"""
plat = platform.system()
if plat == 0:
return 'System cannot be determined'
return plat
@staticmethod
def get_architecture_path(dll_folder):
"""
Method to detect the architecture of the machine
:param dll_folder: Folder that contains dll
:return path: A string contains the path based on the architecture
"""
path = "Nothing was decided about the architecture"
if platform.architecture()[0] == "64bit":
path = os.path.join(dll_folder, "x64")
System.check_path_environment(path)
elif platform.architecture()[0] == "32bit":
path = os.path.join(dll_folder, "x86")
System.check_path_environment(path)
else:
raise Exception("Make sure you are using the OpenDSS DLL and Python with the same bits")
return path
@staticmethod
def check_path_environment(str_path):
if str_path not in os.environ['PATH']:
os.environ['PATH'] = str_path + os.pathsep + os.environ['PATH']
|
the-stack_106_31756 | #!/usr/bin/env python3
from sys import argv
from re import search
from math import log2
import urllib3
import ssl
"""
Programmed with urllib3.
Global RIR IPv4 CIDR prefix extractor, by country.
It now searches for a particular CC in all RIRs:
RIPE NCC, APNIC, ARIN, LACNIC and AFRINIC
Usage: ./program.py countrycode (optional: file)
If a file isn't an argument, it prints prefixes to stdout.
PEP8 compliant
"Explicit is better than implicit."
— The Zen of Python
"""
""" Bypass SSL/TLS checks """
c_reqs = ssl.CERT_NONE
urllib3.disable_warnings()
h = urllib3.PoolManager(
cert_reqs=c_reqs)
RIRs = ("https://ftp.lacnic.net/pub/stats/ripencc/delegated-ripencc-latest",
"https://ftp.lacnic.net/pub/stats/apnic/delegated-apnic-latest",
"https://ftp.lacnic.net/pub/stats/arin/delegated-arin-extended-latest",
"https://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-latest",
"https://ftp.lacnic.net/pub/stats/afrinic/delegated-afrinic-latest")
if len(argv) > 1:
for url in RIRs:
# reads content from URLs one by one
for prefix in h.request('GET', url).data.decode('utf-8').splitlines():
regex = search(str(argv[1]) + '.*ipv4', prefix)
if regex: # searches for cc and ipv4 strings
netaddr = prefix.split("|")[3] # net addr
bitmask = int(prefix.split("|")[4]) # bits used by net addr
cidrmask = int(32 - log2(bitmask)) # converts bits into CIDR
if len(argv) == 2:
print(f'{netaddr}/{cidrmask}') # prints to stdout
elif len(argv) == 3:
with open(f'{argv[2]}.txt', 'a') as file:
print(f'{netaddr}/{cidrmask}', file=file)
else:
print('Please provide at least a universal country code. (Optional: a\
filename descriptor to save the results.)\n\
Ex: ./program.py GB (print to stdout) OR ./program.py GB ipaddr-gb.txt \
(write to file "ipaddr-gb.txt" as an example)')
|
the-stack_106_31757 | from flask import make_response, abort, jsonify, send_from_directory, send_file
import sys
from io import StringIO
import os
import re
import urllib
import matplotlib.style
import matplotlib
matplotlib.use("Agg")
matplotlib.style.use('ggplot')
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
COLORS = ["#e6bc17", "#e67017", "#17e670", "#e6177e", "#1739e6", "#17b2e6", "#dd1a21", "#de8b5f", "#cce197", "#901F76"]
def plot(csv, delimiter, session, features, labels, problem):
replaced = urllib.parse.unquote(csv)
features = features.split(',')
labels = labels.split(',')
features= list(map(int, features))
labels = list(map(int, labels))
data = pd.read_csv(StringIO(replaced), sep=delimiter)
plt.figure()
if problem == 'classification':
colors = list(data.columns.values[labels])[0]
print(colors)
pairplot = sns.pairplot(data, vars=data[data.columns[features]], hue=colors, palette=sns.color_palette(COLORS))
else:
combined = features + labels
combined.sort()
print(combined)
pairplot = sns.pairplot(data[data.columns[combined]], palette=sns.color_palette(COLORS))
plt.savefig('./plots/plot'+ session + '.png')
plt.clf()
plt.cla()
plt.close()
image_path = './plots/plot'+ session + '.png'
print(os.path.isfile(image_path))
return send_file(image_path, mimetype='image/png')
def heatmap(csv, delimiter, session):
replaced = urllib.parse.unquote(csv)
data = pd.read_csv(StringIO(replaced), sep=delimiter)
figure_size = len(data.columns) / 2
if figure_size < 5:
figure_size = 5
plt.figure(figsize=(figure_size,figure_size))
cmap = sns.diverging_palette(352, 136, s=96, l=51, n=7)
sns.heatmap(data.corr(), annot=True, cmap=cmap, fmt='.1f', square=True)
plt.tight_layout()
plt.savefig('./plots/heatmap' + session + '.png')
plt.clf()
plt.cla()
plt.close()
image_path = './plots/heatmap' + session + '.png'
print(os.path.isfile(image_path))
return send_file(image_path, mimetype='image/png')
|
the-stack_106_31758 |
from __future__ import print_function
import os
import sys
import pickle
import subprocess
import nltk
import re
import time
import requests, json
from nltk.corpus import stopwords
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from nltk.tokenize import PunktSentenceTokenizer
import requests,json
from sutime import SUTime
import os
import pickle
from argparse import ArgumentParser
from platform import system
from subprocess import Popen
from sys import argv
from sys import stderr
import shlex
from subprocess import Popen, PIPE,STDOUT
### SAMPLE COMMAND:
### python rule_based.py /home/raj/nlp_data/Partners_Train/docs /home/raj/Coreference-resolution/rule_based_concepts/clamp_out /home/raj/Downloads/python-sutime-master/jars
path1 =sys.argv[1] #path to the docs folder
path2=sys.argv[2] #path to the clamp output folder
path3=sys.argv[3] #path to the jars folder in SU-Time master folder
IS_WINDOWS = True if system() == 'Windows' else False
JAVA_BIN_PATH = 'java.exe' if IS_WINDOWS else 'java'
STANFORD_NER_FOLDER = 'stanford-ner'
#ner=[]
stopwords = stopwords.words('english')
st = StanfordNERTagger('/home/raj/Downloads/stanford-ner-2018-02-27/classifiers/english.all.3class.distsim.crf.ser.gz',
'/home/raj/Downloads/stanford-ner-2018-02-27/stanford-ner.jar',
encoding='utf-8')
person_list=['patient', 'the patient','doctor','the doctor','the boy','boy','the girl','girl','the man','man','the woman','woman','the lady','lady','person','the person','child','the child','infant','the infant']
sentence_re = r'(?:(?:[A-Z])(?:.[A-Z])+.?)|(?:\w+(?:-\w+)*)|(?:\$?\d+(?:.\d+)?%?)|(?:...|)(?:[][.,;"\'?():-_`])'
lemmatizer = nltk.WordNetLemmatizer()
stemmer = nltk.stem.porter.PorterStemmer()
grammar = r"""
NBAR:
{<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
"""
chunker = nltk.RegexpParser(grammar)
def arg_parse():
arg_p = ArgumentParser('Stanford NER Python Wrapper')
arg_p.add_argument('-f', '--filename', type=str, default=None)
arg_p.add_argument('-v', '--verbose', action='store_true')
return arg_p
def debug_print(log, verbose):
if verbose:
print(log)
def process_entity_relations(entity_relations_str, verbose=True):
# format is ollie.
entity_relations = list()
for s in entity_relations_str:
entity_relations.append(s[s.find("(") + 1:s.find(")")].split(';'))
return entity_relations
def stanford_ner(filename, verbose=True, absolute_path=None):
out = 'out.txt'
command = ''
if absolute_path is not None:
command = 'cd {};'.format(absolute_path)
else:
filename = '../{}'.format(filename)
command += 'cd {}; {} -mx1g -cp "*:lib/*" edu.stanford.nlp.ie.NERClassifierCombiner ' \
'-ner.model classifiers/english.all.3class.distsim.crf.ser.gz ' \
'-outputFormat tabbedEntities -textFile {} > ../{}' \
.format(STANFORD_NER_FOLDER, JAVA_BIN_PATH, filename, out)
if verbose:
debug_print('Executing command = {}'.format(command), verbose)
java_process = Popen(command, stdout=stderr, shell=True)
else:
java_process = Popen(command, stdout=stderr, stderr=open(os.devnull, 'w'), shell=True)
java_process.wait()
assert not java_process.returncode, 'ERROR: Call to stanford_ner exited with a non-zero code status.'
if absolute_path is not None:
out = absolute_path + out
with open(out, 'r') as output_file:
results_str = output_file.readlines()
os.remove(out)
results = []
for res in results_str:
if len(res.strip()) > 0:
split_res = res.split('\t')
entity_name = split_res[0]
entity_type = split_res[1]
if len(entity_name) > 0 and len(entity_type) > 0:
results.append([entity_name.strip(), entity_type.strip()])
if verbose:
pickle.dump(results_str, open('out.pkl', 'wb'))
debug_print('wrote to out.pkl', verbose)
return results
def main(args):
ner=[]
#arg_p = arg_parse().parse_args(args[1:])
#filename = arg_p.filename
filename=args
#verbose = args.verbose
#debug_print(arg_p, verbose)
if filename is None:
print('please provide a text file containing your input. Program will exit.')
exit(1)
#if verbose:
#debug_print('filename = {}'.format(filename), verbose)
#entities = stanford_ner(filename, verbose)
entities = stanford_ner(filename)
#print('\n'.join([entity[0].ljust(20) + ',' + entity[1] for entity in entities]))
for entity in entities:
if(entity[1]=='PERSON'):
ner.append(entity[0])
return ner
def leaves(tree):
"""Finds NP (nounphrase) leaf nodes of a chunk tree."""
for subtree in tree.subtrees(filter = lambda t: t.label()=='NP'):
yield subtree.leaves()
def normalise(word):
"""Normalises words to lowercase and stems and lemmatizes it."""
word = word.lower()
# word = stemmer.stem_word(word) #if we consider stemmer then results comes with stemmed word, but in this case word will not match with comment
word = lemmatizer.lemmatize(word)
return word
def acceptable_word(word):
"""Checks conditions for acceptable word: length, stopword. We can increase the length if we want to consider large phrase"""
accepted = bool(2 <= len(word) <= 40
and word.lower() not in stopwords)
return accepted
def get_terms(tree):
for leaf in leaves(tree):
term = [ normalise(w) for w,t in leaf if acceptable_word(w) ]
yield term
def get_noun_phrases(line):
toks = nltk.regexp_tokenize(line, sentence_re)
postoks = nltk.tag.pos_tag(toks)
#print postoks
tree = chunker.parse(postoks)
terms = get_terms(tree)
noun_list=[]
for term in terms:
for word in term:
noun_list.append(word)
return noun_list
subprocess.call(['/home/raj/end_to_end/ClampCmd_1.4.0/run_ner_pipeline.sh',path1,'clamp_out'])
all_files = os.listdir(path2)
all_files = [file for file in all_files if file.endswith('txt')]
sem_string = "semantic"
noun_pos=['NNP','NNPS','NN', 'NNS']
for i in os.listdir(path2):
if i.endswith("txt"):
file = open("clamp_out/" + str(i))
fp=open("concepts/"+i+".con","w+")
clamp_concept=[]
for line in file:
if sem_string in line:
line_list = line.split("\t")
semantic = line_list[3].split("=")
ne = line_list[5].split("=")
fp.write(semantic[1]+','+ne[1])
clamp_concept.append(ne[1])
fp.write('\n')
fp.close()
all_files=os.listdir(path1)
all_files = [file for file in all_files if file.endswith('txt')]
for i in os.listdir(path1):
if i.endswith("txt"):
print (str(i) + " is being processed")
file=open(path1+"/"+i)
fp=open("concepts/"+i+".con","a+")
num = 1
for line in file:
noun_list=get_noun_phrases(line)
for n in noun_list:
if n in person_list:
#words = n.split()
line_list = line.split()
#print (words)
print (line_list)
if (n not in line_list):
regex = re.compile(r"\b"+n+"\b",re.IGNORECASE)
print (regex)
if(n in ll.lower() for ll in line_list):
if (regex.search(str(n))):
line_list = line.lower().split()
start = line_list.index(n)
end = line_list.index(n)
else:
continue
else:
start = line_list.index(n)
end = line_list.index(n)
fp.write(n+"|"+str(num)+":" +str(start) + " "+str(num) +":" +str(end) +"|person"+"\n")
num += 1
#url = "https://api.genderize.io/?name="+n
#response = requests.get(url)
#data=json.loads(response.text)
#person.append(name)
#gender=data['gender']
#if n in person_list or gender !='null':
# fp.write('person,'+n+'\n')
fp.write('\n')
fp.close()
file.close()
'''
for i in os.listdir(path1):
file=open(path1+"/"+i)
fp=open("concepts/"+i+".con","a+")
for line in file:
tokens = nltk.tokenize.word_tokenize(line)
tags = st.tag(tokens)
for tag in tags:
if tag[1]=='PERSON':
fp.write('person,'+tag[0])
'''
pronoun_pos=['PRP','PRP$']
for i in os.listdir(path1):
if i.endswith("txt"):
file=open(path1+"/"+i)
fp=open("concepts/"+i+".con","a+")
print(path1+"/"+i)
ner=main(path1+"/"+i)
#omp_cmd="python main.py -f %s -v" %(i)
#subprocess.call(["python main.py","-f",i,"-v"])
#xmlResult = Popen(shlex.split(omp_cmd), stdout=PIPE, stderr=STDOUT)
#with (open("ner.pkl", "rb")) as p:
# ner=pickle.load(p)
num = 1
for line in file:
for n in ner:
line_list = line.split()
#print (line_list)
if n in line_list:
print ("*"*40)
pp = n.split()
print (pp)
if (len(pp) == 1):
indices = [i for i, x in enumerate(line_list) if x == str(pp[0])]
for ind in indices:
fp.write(n+"|"+str(num)+":" +str(ind) + " "+str(num) +":" +str(ind) +"|person"+"\n")
else:
start = line_list.index(str(pp[0]))
end = line_list.index(str(pp[-1]))
fp.write(n+"|"+str(num)+":" +str(start) + " "+str(num) +":" +str(end) +"|person"+"\n")
num += 1
fp.close()
file.close()
for i in os.listdir(path1):
if i.endswith("txt"):
file=open(path1+"/"+i)
fp=open("concepts/"+i+".con","a+")
pronoun_list=[]
person_list=[]
num = 1
for line in file:
line=re.sub(r'[^\w]', ' ', line)
tokenized = nltk.word_tokenize(line)
tagged = nltk.pos_tag(tokenized)
tsize = len(tagged)
for j in range(0,tsize):
if (tagged[j][1] == 'PRP$'):
pronoun_list.append(tagged[j][0])
if (tagged[j][1] == 'PRP'):
person_list.append(tagged[j][0])
for p in pronoun_list:
line_list = line.split()
#print (line_list)
#if (p not in line_list):
#regex = re.compile(r"\b"+n+"\b",re.IGNORECASE)
#print (regex)
#if(p in ll.lower() for ll in line_list):
#if (regex.search(str(n))):
#line_list = line.lower().split()
print (line_list)
print (p)
if (p in line_list):
print ("*"*40)
pp = p.split()
print (pp)
indices = [i for i, x in enumerate(line_list) if x == str(pp[0])]
start = line_list.index(str(pp[0]))
end = line_list.index(str(pp[-1]))
'''
else:
start = line_list.index(p)
end = line_list.index(p)
'''
for ind in indices:
fp.write(p+"|"+str(num)+":" +str(ind) + " "+str(num) +":" +str(ind) +"|pronoun"+"\n")
#fp.write('pronoun,'+p+'\n')
for p in person_list:
line_list = line.split()
#print (line_list)
#if (p not in line_list):
#regex = re.compile(r"\b"+n+"\b",re.IGNORECASE)
#print (regex)
#if(p in ll.lower() for ll in line_list):
#if (regex.search(str(n))):
#print (line_list)
#line_list = line.lower().split()
print (line_list)
print (p)
if p in line_list:
print ("*"*40)
pp = p.split()
print (pp)
if (len(pp) == 1):
indices = [i for i, x in enumerate(line_list) if x == str(pp[0])]
for ind in indices:
fp.write(p+"|"+str(num)+":" +str(ind) + " "+str(num) +":" +str(ind) +"|person"+"\n")
else:
start = line_list.index(str(pp[0]))
end = line_list.index(str(pp[-1]))
fp.write(p+"|"+str(num)+":" +str(start) + " "+str(num) +":" +str(end) +"|person"+"\n")
'''
else:
start = line_list.index(p)
end = line_list.index(p)
'''
#fp.write('person,'+p+'\n')
num += 1
fp.close()
file.close()
'''
jar_files = os.path.join(os.path.dirname(path3), 'jars')
sutime = SUTime(jars=jar_files, mark_time_ranges=True)
if __name__ == '__main__':
for i in os.listdir(path1):
if i.endswith("txt"):
file=open(path1+"/"+i)
fp=open("concepts/"+i+".con","a+")
num = 1
for line in file:
data=json.dumps(sutime.parse(line))
list_data=json.loads(data)
for l in list_data:
line_list = line.split()
#print (line_list)
#if (l not in line_list):
#regex = re.compile(r"\b"+n+"\b",re.IGNORECASE)
#print (regex)
#if(l in ll.lower() for ll in line_list):
#if (regex.search(str(n))):
#line_list = line.lower().split()
print (line_list)
if (l["text"] in line_list):
print ("*"*40)
ll = l["text"].split()
print (ll[0])
print (ll[-1])
start = line_list.index(str(ll[0]))
end = line_list.index(str(ll[-1]))
fp.write(l["text"]+"|"+str(num)+":" +str(start) + " "+str(num) +":" +str(end) +"|temporal"+"\n")
#fp.write('temporal,'+l["text"]+'\n')
num += 1
fp.close()
file.close()
'''
'''
for i in os.listdir(path1):
file=open(path1+"/"+i)
fp=open("concepts/"+i+".con","a+")
for line in file:
tokens = nltk.tokenize.word_tokenize(line)
tags = st.tag(tokens)
for tag in tags:
if tag[1]=='PERSON':
fp.write('person'+tag[0]+'\n')
'''
|
the-stack_106_31759 | """ Utilities for parsing datastore entities. """
import datastore_server
from dbconstants import JOURNAL_SCHEMA
from dbconstants import JOURNAL_TABLE
from dbconstants import KEY_DELIMITER
from dbconstants import KIND_SEPARATOR
from google.appengine.datastore import entity_pb
def get_root_key_from_entity_key(key):
""" Extracts the root key from an entity key. We
remove any excess children from a string to get to
the root key.
Args:
entity_key: A string representing a row key.
Returns:
The root key extracted from the row key.
"""
tokens = key.split(KIND_SEPARATOR)
return tokens[0] + KIND_SEPARATOR
def get_prefix_from_entity_key(entity_key):
""" Extracts the prefix from a key to the entity table.
Args:
entity_key: A str representing a row key to the entity table.
Returns:
A str representing the app prefix (app_id and namespace).
"""
tokens = entity_key.split(KEY_DELIMITER)
return tokens[0] + KEY_DELIMITER + tokens[1]
def get_kind_from_entity_key(entity_key):
""" Extracts the kind from a key to the entity table.
Args:
entity_key: A str representing a row key to the entity table.
Returns:
A str representing the kind.
"""
tokens = entity_key.split(KEY_DELIMITER)
return tokens[2].split(":")[0]
def fetch_journal_entry(db_access, key):
""" Fetches the given key from the journal.
Args:
db_access: A datastore accessor.
keys: A str, the key to fetch.
Returns:
The entity fetched from the datastore, or None if it was deleted.
"""
result = db_access.batch_get_entity(JOURNAL_TABLE, [key],
JOURNAL_SCHEMA)
if len(result.keys()) == 0:
return None
if JOURNAL_SCHEMA[0] in result.keys()[0]:
ent_string = result[0][JOURNAL_SCHEMA[0]]
if ent_string == datastore_server.TOMBSTONE:
return None
return entity_pb.EntityProto().ParseFromString(ent_string)
else:
return None
|
the-stack_106_31761 | """
stale_sensors.py - Detects devices that haven't checked into
CrowdStrike for a specified period of time.
- jshcodes@CrowdStrike, 09.01.21
"""
from datetime import datetime, timedelta, timezone
from argparse import RawTextHelpFormatter
import argparse
from tabulate import tabulate
try:
from falconpy import Hosts
except ImportError as no_falconpy:
raise SystemExit(
"CrowdStrike FalconPy must be installed in order to use this application.\n"
"Please execute `python3 -m pip install crowdstrike-falconpy` and try again."
) from no_falconpy
def parse_command_line() -> object:
"""
Parses command-line arguments and returns them back as an object.
"""
header = """
_______ ___ ___ _______ _______ _______ ______
| _ | Y | _ | _ | _ | _ \\
|. 1___|. | | 1___| 1___|. 1___|. | \\
|. |___|. | |____ |____ |. __)_|. | \\
|: 1 |: 1 |: 1 |: 1 |: 1 |: 1 /
|::.. . |::.. . |::.. . |::.. . |::.. . |::.. . /
`-------`-------`-------`-------`-------`------'
CrowdStrike Unattended Stale Sensor Environment Detector
"""
parser = argparse.ArgumentParser(
description=header,
formatter_class=RawTextHelpFormatter
)
parser.add_argument(
'-k',
'--client_id',
help='CrowdStrike Falcon API key ID',
required=True
)
parser.add_argument(
'-s',
'--client_secret',
help='CrowdStrike Falcon API key secret',
required=True
)
parser.add_argument(
'-b',
'--base_url',
help='CrowdStrike API region (us1, us2, eu1, usgov1)',
required=False
)
parser.add_argument(
'-d',
'--days',
help='Number of days since a host was seen before it is considered stale',
required=False
)
parser.add_argument(
'-r',
'--reverse',
help='Reverse sort (defaults to ASC)',
required=False,
action="store_true"
)
parser.add_argument(
'-x',
'--remove',
help='Remove hosts identified as stale',
required=False,
action='store_true'
)
return parser.parse_args()
def connect_api(key: str, secret: str, base_url: str) -> object:
"""
Connects to the API and returns an instance of the Hosts Service Class.
"""
return Hosts(client_id=key, client_secret=secret, base_url=base_url)
def get_host_details(id_list: list) -> list:
"""
Retrieves a list containing device infomration based upon the ID list provided.
"""
return falcon.get_device_details(ids=id_list)["body"]["resources"]
def get_hosts(date_filter: str) -> list:
"""
Retrieves a list of hosts IDs that match the last_seen date filter.
"""
return falcon.query_devices_by_filter_scroll(
limit=5000,
filter=f"last_seen:<='{date_filter}Z'"
)["body"]["resources"]
def get_sort_key(sorting) -> list:
"""
Sorting method for table display.
Column 4 = Stale Period
Column 0 = Hostname
"""
return (sorting[4], sorting[0])
def calc_stale_date(num_days: int) -> str:
"""
Calculates the "stale" datetime based upon the number of days
provided by the user.
"""
today = datetime.strptime(str(datetime.now(timezone.utc)), "%Y-%m-%d %H:%M:%S.%f%z")
return str(today - timedelta(days=num_days)).replace(" ", "T")[:-6]
def parse_host_detail(detail: dict, found: list):
"""
Parses the returned host detail and adds it to the stale list.
"""
now = datetime.strptime(str(datetime.now(timezone.utc)), "%Y-%m-%d %H:%M:%S.%f%z")
then = datetime.strptime(detail["last_seen"], "%Y-%m-%dT%H:%M:%S%z")
distance = (now - then).days
found.append([
detail.get("hostname", "Unknown"),
detail.get("device_id", "Unknown"),
detail.get("local_ip", "Unknown"),
detail["last_seen"],
f"{distance} days"
])
return found
def hide_hosts(id_list: list) -> dict:
"""
Hides hosts identified as stale.
"""
return falcon.perform_action(action_name="hide_host", body={"ids": id_list})
# Parse our command line
args = parse_command_line()
# Default SORT to ASC if not present
if not args.reverse:
SORT = False
else:
SORT = bool(args.reverse)
if not args.base_url:
BASE = "us1"
else:
BASE = args.base_url
# Credentials
api_client_id = args.client_id
api_client_secret = args.client_secret
if not api_client_id and not api_client_secret:
raise SystemExit("Invalid API credentials provided.")
# Set our stale date to 120 days if not present
if not args.days:
STALE_DAYS = 120
else:
try:
STALE_DAYS = int(args.days)
except ValueError as bad_day_value:
raise SystemExit("Invalid value specified for days. Integer required.") from bad_day_value
# Do not hide hosts if it is not requested
if not args.remove:
HIDE = False
else:
HIDE = bool(args.remove)
# Calculate our stale date filter
STALE_DATE = calc_stale_date(STALE_DAYS)
# Connect to the API
falcon = connect_api(api_client_id, api_client_secret, BASE)
# List to hold our identified hosts
stale = []
# For each stale host identified
try:
for host in get_host_details(get_hosts(STALE_DATE)):
# Retrieve host detail
stale = parse_host_detail(host, stale)
except KeyError:
raise SystemExit("Unable to communicate with CrowdStrike API, check credentials and try again.")
# If we produced stale host results
if stale:
# Display only
if not HIDE:
headers = ["Hostname", "Device ID", "Local IP", "Last Seen", "Stale Period"]
print(f"\n{tabulate(sorted(stale, key=get_sort_key, reverse=SORT), headers)}")
else:
# Remove the hosts
host_list = [x[1] for x in stale]
remove_result = hide_hosts(host_list)["body"]["resources"]
for deleted in remove_result:
print(f"Removed host {deleted['id']}")
else:
print("No stale hosts identified for the range specified.")
|
the-stack_106_31762 | """
Details about crypto currencies from CoinMarketCap.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.coinmarketcap/
"""
import logging
from datetime import timedelta
import json
from urllib.error import HTTPError
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_CURRENCY
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['coinmarketcap==2.0.1']
_LOGGER = logging.getLogger(__name__)
ATTR_24H_VOLUME_USD = '24h_volume_usd'
ATTR_AVAILABLE_SUPPLY = 'available_supply'
ATTR_MARKET_CAP = 'market_cap_usd'
ATTR_NAME = 'name'
ATTR_PERCENT_CHANGE_24H = 'percent_change_24h'
ATTR_PERCENT_CHANGE_7D = 'percent_change_7d'
ATTR_PRICE = 'price_usd'
ATTR_SYMBOL = 'symbol'
ATTR_TOTAL_SUPPLY = 'total_supply'
CONF_ATTRIBUTION = "Data provided by CoinMarketCap"
DEFAULT_CURRENCY = 'bitcoin'
ICON = 'mdi:currency-usd'
SCAN_INTERVAL = timedelta(minutes=15)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_CURRENCY, default=DEFAULT_CURRENCY): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the CoinMarketCap sensor."""
currency = config.get(CONF_CURRENCY)
try:
CoinMarketCapData(currency).update()
except HTTPError:
_LOGGER.warning("Currency %s is not available. Using bitcoin",
currency)
currency = DEFAULT_CURRENCY
add_devices([CoinMarketCapSensor(CoinMarketCapData(currency))], True)
class CoinMarketCapSensor(Entity):
"""Representation of a CoinMarketCap sensor."""
def __init__(self, data):
"""Initialize the sensor."""
self.data = data
self._ticker = None
self._unit_of_measurement = 'USD'
@property
def name(self):
"""Return the name of the sensor."""
return self._ticker.get('name')
@property
def state(self):
"""Return the state of the sensor."""
return round(float(self._ticker.get('price_usd')), 2)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_24H_VOLUME_USD: self._ticker.get('24h_volume_usd'),
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
ATTR_AVAILABLE_SUPPLY: self._ticker.get('available_supply'),
ATTR_MARKET_CAP: self._ticker.get('market_cap_usd'),
ATTR_PERCENT_CHANGE_24H: self._ticker.get('percent_change_24h'),
ATTR_PERCENT_CHANGE_7D: self._ticker.get('percent_change_7d'),
ATTR_SYMBOL: self._ticker.get('symbol'),
ATTR_TOTAL_SUPPLY: self._ticker.get('total_supply'),
}
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._ticker = json.loads(
self.data.ticker.decode('utf-8').strip('\n '))[0]
class CoinMarketCapData(object):
"""Get the latest data and update the states."""
def __init__(self, currency):
"""Initialize the data object."""
self.currency = currency
self.ticker = None
def update(self):
"""Get the latest data from blockchain.info."""
from coinmarketcap import Market
self.ticker = Market().ticker(self.currency)
|
the-stack_106_31765 | import json
import re
import scrapy
from locations.items import GeojsonPointItem
class WilcoFarmSpider(scrapy.Spider):
name = "wilcofarm"
allowed_domains = ["www.farmstore.com"]
start_urls = (
'https://www.farmstore.com/locations/',
)
def parse(self, response):
pattern = r"(var markers=\[)(.*?)(\]\;)"
data = re.search(pattern, response.body_as_unicode(), re.MULTILINE).group(2)
data = json.loads('[' + data + ']')
for item in data:
properties = {
'ref': item['storeId'],
'name': item['storeName'],
'addr_full': item['storeStreet'],
'city': item['storeCity'],
'state': item['storeState'],
'postcode': item['storeZip'],
'lat': item['storeLat'],
'lon': item['storeLng'],
'phone': item['storePhone'],
'opening_hours': item['storeHours']
}
yield GeojsonPointItem(**properties)
|
the-stack_106_31768 | from hls4ml.converters.keras_to_hls import parse_default_keras_layer
from hls4ml.converters.keras_to_hls import keras_handler
from hls4ml.converters.keras.core import TernaryQuantizer
@keras_handler('GarNet', 'GarNetStack')
def parse_garnet_layer(keras_layer, input_names, input_shapes, data_reader, config):
assert(keras_layer['class_name'] in ['GarNet', 'GarNetStack'])
if not keras_layer['config']['simplified']:
raise Exception('HLS GarNet is compatible only with keras GarNet with simplified=True')
if keras_layer['config']['output_activation'] not in [None, 'linear']:
raise Exception('HLS GarNet cannot have nonlinear output activation')
layer = parse_default_keras_layer(keras_layer, input_names)
layer['input_format'] = keras_layer['config']['input_format']
if layer['input_format'] != 'xn':
raise NotImplementedError('HLS GarNet currently only implements signed inputs (input_format="xn")')
layer['n_vertices'] = input_shapes[0][1]
layer['collapse'] = keras_layer['config']['collapse']
layer['mean_by_nvert'] = keras_layer['config']['mean_by_nvert']
if keras_layer['config']['quantize_transforms']:
layer['quantizer'] = TernaryQuantizer()
layer['n_aggregators'] = keras_layer['config']['n_aggregators']
layer['n_out_features'] = keras_layer['config']['n_filters'] # number of output features
layer['n_propagate'] = keras_layer['config']['n_propagate'] # number of latent features
if layer['class_name'] == 'GarNet':
layer['n_in_features'] = input_shapes[0][2]
n_out_features = layer['n_out_features']
elif layer['class_name'] == 'GarNetStack':
layer['n_sublayers'] = keras_layer['config']['n_sublayers']
layer['n_in_features'] = [input_shapes[0][2]]
for il in range(1, layer['n_sublayers']):
layer['n_in_features'].append(layer['n_out_features'][il - 1])
n_out_features = layer['n_out_features'][-1]
if layer['collapse'] in ['mean', 'sum', 'max']:
output_shape = [input_shapes[0][0], n_out_features]
else:
output_shape = input_shapes[0][:2] + [n_out_features]
return layer, output_shape
|
the-stack_106_31769 |
from flask import Flask
from redis import Redis
app = Flask(__name__)
redis = Redis(host="redis")
counter_key = "haha_counter"
@app.route("/")
def index():
## increae the counter by 1, get outcome.
count = redis.incr(counter_key)
return "The total number of visit to this page: {0}".format(count)
if __name__=="__main__":
app.run(host="0.0.0.0",debug=True)
|
the-stack_106_31770 | from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
val = pd.read_csv('val.csv')
#val = pd.read_csv('train.csv')
true_labels = val["Id"].values
embeddings = pd.read_pickle('trained/embeddings.pkl')
labels = embeddings['Id'].values.astype('int')
embeddings = embeddings.drop(['Id'], axis=1).values
whales = np.load('trained/raw_predictions.npy')
KNN = KNeighborsClassifier(n_neighbors=5, metric='sqeuclidean', weights='distance', algorithm='brute')
KNN.fit(embeddings, labels)
pred = KNN.predict(whales)
# dists, neighbours = KNN.kneighbors(whales, n_neighbors=5)
# neighbours_labels = labels[neighbours.flat].reshape(neighbours.shape)
# pred = neighbours_labels[:, 0].flatten()
mapping = np.load('../data/meta/idx_to_whales_mapping.npy').item()
pred_labels = [mapping[x][0] for x in pred]
print('true labels: \n', true_labels)
print('pred labels: \n', pred_labels)
acc = sum(true_labels == pred_labels) / len(true_labels)
print('accuracy: ', acc)
|
the-stack_106_31771 | import copy
import logging
from typing import List, Union, Optional
from pathlib import Path
import shutil
import tempfile
import tarfile
import zipfile
import warnings
import functools
from datetime import datetime
from datetime import time as datetime_time
from geopandas import GeoDataFrame
import shapely
from shapely.geometry import Point, Polygon
from geojson import Feature, FeatureCollection
from geojson import Polygon as geojson_Polygon
import requests
from tqdm import tqdm
def get_logger(
name: str,
level=logging.INFO,
verbose: bool = False,
):
"""
Use level=logging.CRITICAL to disable temporarily.
"""
logger = logging.getLogger(name) # pylint: disable=redefined-outer-name
logger.setLevel(level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(level)
if verbose:
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)"
else:
# hide logger module & level, truncate log messages > 2000 characters (e.g. huge geometries)
log_format = "%(asctime)s - %(message).2000s"
formatter = logging.Formatter(log_format)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
return logger
logger = get_logger(__name__)
def deprecation(
function_name: str,
replacement_name: str,
version: str = "0.13.0",
extra_message: str = "",
):
"""
Decorator for custom deprecation warnings.
Args:
function_name: Name of the to be deprecated function.
replacement_name: Name of the replacement function.
version: The package version in which the deprecation will happen.
extra_message: Optional message after default deprecation warning.
"""
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
message = (
f"`{function_name}` will be deprecated in version {version}, "
f"use `{replacement_name}` instead! {extra_message}"
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return actual_decorator
def download_results_from_gcs(
download_url: str, output_directory: Union[str, Path]
) -> List[str]:
"""
General download function for results of job and jobtask from cloud storage
provider.
Args:
download_url: The signed gcs url to download.
output_directory: The file output directory, defaults to the current working
directory.
"""
output_directory = Path(output_directory)
# Download
compressed_file = tempfile.mktemp()
with open(compressed_file, "wb") as dst_tgz:
try:
r = requests.get(download_url, stream=True)
r.raise_for_status()
for chunk in tqdm(r.iter_content(chunk_size=1024)):
if chunk: # filter out keep-alive new chunks
dst_tgz.write(chunk)
except requests.exceptions.HTTPError as err:
logger.debug(f"Connection error, please try again! {err}")
raise requests.exceptions.HTTPError(
f"Connection error, please try again! {err}"
)
if tarfile.is_tarfile(compressed_file):
unpack = tarfile.open
elif zipfile.is_zipfile(compressed_file):
unpack = zipfile.ZipFile # type: ignore
else:
raise ValueError("Downloaded file is not a TAR or ZIP archive.")
with unpack(compressed_file) as f:
f.extractall(path=output_directory)
output_folder_path = output_directory / "output"
out_filepaths = []
if output_folder_path.exists():
for src_path in output_folder_path.glob("**/*"):
dst_path = output_directory / src_path.relative_to(output_folder_path)
shutil.move(str(src_path), str(dst_path))
if dst_path.is_dir():
out_filepaths += [str(x) for x in dst_path.glob("**/*")]
elif dst_path.is_file():
out_filepaths.append(str(dst_path))
output_folder_path.rmdir()
else:
out_filepaths += [str(x) for x in output_directory.glob("**/*")]
logger.info(
f"Download successful of {len(out_filepaths)} files to output_directory "
f"'{output_directory}': {[Path(p).name for p in out_filepaths]}"
)
return out_filepaths
def download_results_from_gcs_without_unpacking(
download_url: str, output_directory: Union[str, Path]
) -> List[str]:
"""
General download function for results of job and jobtask from cloud storage
provider.
Args:
download_url: The signed gcs url to download.
output_directory: The file output directory, defaults to the current working
directory.
"""
output_directory = Path(output_directory)
# Download
out_filepaths: List[str] = []
out_fp = Path().joinpath(output_directory, "output.tgz")
with open(out_fp, "wb") as dst:
try:
r = requests.get(download_url, stream=True)
r.raise_for_status()
for chunk in tqdm(r.iter_content(chunk_size=1024)):
if chunk: # filter out keep-alive new chunks
dst.write(chunk)
out_filepaths.append(str(out_fp))
except requests.exceptions.HTTPError as err:
logger.debug(f"Connection error, please try again! {err}")
raise requests.exceptions.HTTPError(
f"Connection error, please try again! {err}"
)
logger.info(
f"Download successful of {len(out_filepaths)} files to output_directory"
f" '{output_directory}': {[Path(p).name for p in out_filepaths]}"
)
return out_filepaths
def format_time_period(
start_date: Optional[Union[str, datetime]], end_date: Optional[Union[str, datetime]]
):
"""
Formats a time period string from start date and end date.
Args:
start_date: Query period starting day as iso-format string or datetime object,
e.g. "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM:SS".
end_date: Query period ending day as iso-format or datetime object,
e.g. "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM:SS".
Returns:
Time period string in the format "2014-01-01T00:00:00Z/2016-12-31T10:11:12Z"
"""
if start_date is None or end_date is None:
raise ValueError(
"When using dates, both start_date and end_date need to be provided."
)
# Start and end date can be any combination of str ("YYYY-MM-DD" or "YYYY-MM-DDTHH:MM:SS")
# or datetime objects.
if not isinstance(start_date, datetime):
start_dt: datetime = datetime.fromisoformat(start_date)
else:
start_dt = start_date
if not isinstance(end_date, datetime):
end_dt: datetime = datetime.fromisoformat(end_date)
try:
# For "YYYY-MM-DD" string the default datetime conversion sets to
# start of day, but image archive query requires end of day.
datetime.strptime(end_date, "%Y-%m-%d") # format validation
end_dt = datetime.combine(end_dt.date(), datetime_time(23, 59, 59, 999999))
except ValueError:
pass
else:
end_dt = end_date
if start_dt > end_dt:
raise ValueError("The start_date can not be later than the end_date!")
formatting = "%Y-%m-%dT%H:%M:%S"
time_period = f"{start_dt.strftime(formatting)}Z/{end_dt.strftime(formatting)}Z"
return time_period
def any_vector_to_fc(
vector: Union[
dict,
Feature,
FeatureCollection,
list,
GeoDataFrame,
Polygon,
Point,
],
as_dataframe: bool = False,
) -> Union[dict, GeoDataFrame]:
"""
Gets a uniform feature collection dictionary (with fc and f bboxes) from any input vector type.
Args:
vector: One of dict, FeatureCollection, Feature, list of bounds coordinates,
GeoDataFrame, shapely.geometry.Polygon, shapely.geometry.Point.
All assume EPSG 4326 and Polygons!
as_dataframe: GeoDataFrame output with as_dataframe=True.
"""
if not isinstance(
vector,
(
dict,
FeatureCollection,
Feature,
geojson_Polygon,
list,
GeoDataFrame,
Polygon,
Point,
),
):
raise ValueError(
"The provided geometry muste be a FeatureCollection, Feature, dict, geopandas "
"Dataframe, shapely Polygon, shapely Point or a list of 4 bounds coordinates."
)
## Transform all possible input geometries to a uniform feature collection.
vector = copy.deepcopy(vector) # otherwise changes input geometry.
if isinstance(vector, (dict, FeatureCollection, Feature)):
try:
if vector["type"] == "FeatureCollection":
df = GeoDataFrame.from_features(vector, crs=4326)
elif vector["type"] == "Feature":
# TODO: Handle point features?
df = GeoDataFrame.from_features(FeatureCollection([vector]), crs=4326)
elif vector["type"] == "Polygon": # Geojson geometry
df = GeoDataFrame.from_features(
FeatureCollection([Feature(geometry=vector)]), crs=4326
)
except KeyError as e:
raise ValueError(
"Provided geometry dictionary has to include a featurecollection or feature."
) from e
else:
if isinstance(vector, list):
if len(vector) == 4:
box_poly = shapely.geometry.box(*vector)
df = GeoDataFrame({"geometry": [box_poly]}, crs=4326)
else:
raise ValueError("The list requires 4 bounds coordinates.")
elif isinstance(vector, Polygon):
df = GeoDataFrame({"geometry": [vector]}, crs=4326)
elif isinstance(vector, Point):
df = GeoDataFrame(
{"geometry": [vector.buffer(0.00001)]}, crs=4326
) # Around 1m buffer # TODO: Find better solution than small buffer?
elif isinstance(vector, GeoDataFrame):
df = vector
try:
if df.crs.to_string() != "EPSG:4326":
df = df.to_crs(epsg=4326)
except AttributeError as e:
raise AttributeError("GeoDataFrame requires a crs.") from e
if as_dataframe:
return df
else:
fc = df.__geo_interface__
return fc
def fc_to_query_geometry(
fc: Union[dict, FeatureCollection], geometry_operation: str
) -> Union[List, dict]:
"""
From a feature collection with a single feature, depending on the geometry_operation,
returns the feature as a list of bounds coordinates or a geojson Polygon (as dict).
Args:
fc: feature collection
geometry_operation: One of "bbox", "intersects", "contains".
Returns:
The feature as a list of bounds coordinates or a geojson Polygon (as dict)
"""
try:
if fc["type"] != "FeatureCollection":
raise ValueError("Geometry argument only supports Feature Collections!")
except (KeyError, TypeError) as e:
raise ValueError("Geometry argument only supports Feature Collections!") from e
geometry_error = "The provided geometry {}, UP42 only accepts single geometries."
if len(fc["features"]) != 1:
logger.info(geometry_error.format("contains multiple geometries"))
raise ValueError(geometry_error.format("contains multiple geometries"))
feature = fc["features"][0]
if feature["geometry"]["type"] == "MultiPolygon":
logger.info(geometry_error.format("is a MultiPolygon"))
raise ValueError(geometry_error.format("is a MultiPolygon"))
if geometry_operation == "bbox":
try:
query_geometry = list(feature["bbox"])
except KeyError:
query_geometry = list(shapely.geometry.shape(feature["geometry"]).bounds)
elif geometry_operation in ["intersects", "contains"]:
query_geometry = feature["geometry"]
else:
raise ValueError(
"geometry_operation needs to be one of bbox, intersects or contains!",
)
return query_geometry
def filter_jobs_on_mode(
jobs_json: List[dict], test_jobs: bool = True, real_jobs: bool = True
) -> List[dict]:
"""
Filter jobs according to selected mode.
Args:
jobs_json: List of jobs as returned by /jobs endpoint.
test_jobs: If returning test jobs or test queries.
real_jobs: If returning real jobs.
Returns:
List of filtered jobs.
Raises:
ValueError: When no modes are selected to filter jobs with.
"""
selected_modes = []
if test_jobs:
selected_modes.append("DRY_RUN")
if real_jobs:
selected_modes.append("DEFAULT")
if not selected_modes:
raise ValueError("At least one of test_jobs and real_jobs must be True.")
jobs_json = [job for job in jobs_json if job["mode"] in selected_modes]
logger.info(f"Returning {selected_modes} jobs.")
return jobs_json
|
the-stack_106_31775 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides test suites that can be run to test fused convolutions.
Each of the two test suites in this module, FusedConv2DBiasActivationTest and
FusedConvInt8Tests, should be "instantiated" by declaring a class which inherits
from the FusedConv test and a class that provides the standard test.TestCase
API.
See e.g. fused_conv2d_bias_activation_op_test.py in this folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384], [
4, 8, 8, 2048
], [4, 8, 8, 448], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 2048], [
4, 8, 8, 1760
], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 17, 17, 192], [
4, 17, 17, 192
], [4, 17, 17, 1248], [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 1216], [4, 17, 17, 1216], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152], [4, 17, 17, 192], [
4, 17, 17, 160
], [4, 17, 17, 1152], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [
4, 17, 17, 768
], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 768],
[4, 17, 17, 768], [4, 35, 35, 96], [4, 35, 35, 288], [
4, 35, 35, 64
], [4, 35, 35, 288], [4, 35, 35, 256], [4, 35, 35, 48], [
4, 35, 35, 256
], [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192], [
4, 35, 35, 192
], [4, 73, 73, 64], [4, 73, 73, 64], [4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384], [
1, 1, 2048, 192
], [3, 3, 448, 384], [1, 1, 2048, 320], [1, 1, 2048, 448], [1, 1, 2048, 384],
[1, 1, 1760, 384], [1, 1, 1760, 192], [1, 1, 1760, 448], [
1, 1, 1760, 320
], [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192], [
3, 3, 128, 320
], [1, 1, 1248, 128], [1, 3, 224, 224], [3, 1, 192, 256], [
1, 3, 192, 256
], [1, 1, 1216, 192], [1, 1, 1216, 96], [3, 1, 224, 224], [
3, 3, 192, 224
], [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128], [
3, 1, 192, 192
], [3, 3, 160, 192], [1, 1, 1152, 160], [1, 1, 1024, 128], [
1, 3, 128, 192
], [1, 1, 1024, 160], [3, 1, 128, 192], [1, 1, 1024, 256], [
3, 1, 128, 128
], [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128], [
1, 1, 768, 128
], [1, 1, 768, 320], [3, 3, 96, 96], [3, 3, 288, 384], [
3, 3, 64, 96
], [1, 1, 288, 64], [1, 1, 256, 64], [5, 5, 48, 64],
[1, 1, 256, 48], [3, 3, 96, 96], [1, 1, 192, 32], [
1, 1, 192, 64
], [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64,
64], [1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [
4, 8, 8, 384
], [4, 8, 8, 320], [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384], [
4, 8, 8, 192
], [4, 8, 8, 448], [4, 8, 8, 320], [4, 8, 8, 192], [4, 17, 17, 192], [
4, 17, 17, 192
], [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224], [4, 17, 17, 256], [
4, 17, 17, 256
], [4, 17, 17, 192], [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 192], [
4, 17, 17, 160
], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 192], [
4, 17, 17, 256
], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128], [
4, 17, 17, 128
], [4, 17, 17, 320], [4, 17, 17, 96], [4, 17, 17, 384], [4, 35, 35, 96], [
4, 35, 35, 64
], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48], [4, 35, 35, 96],
[4, 35, 35, 32], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 71, 71, 192], [4, 73, 73, 64], [4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def _GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NCHW", True), ("NHWC", True)]
return test_configs
def _IotaNdF32Constant(dim_sizes):
def MakeList(dims):
if len(dims) == 1:
return [float(1 + f) for f in range(dims[0])]
return [MakeList(dims[1:]) for _ in range(dims[0])]
return constant_op.constant(MakeList(dim_sizes), dtype=dtypes.float32)
def _GetInceptionFwdTest(input_size,
filter_size,
stride,
padding,
gpu_only=True):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s",
(input_size, filter_size, stride, padding))
return
tf_logging.info("Testing InceptionFwd %s",
(input_size, filter_size, stride, padding))
self.CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
class FusedConv2DBiasActivationTest(object):
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def _DtypesToTest(self, use_gpu):
return [dtypes.float32]
def _FilterFormatsToTest(self, use_gpu):
return ["HWIO", "OIHW"]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, bias,
strides, padding, activation_mode, data_format,
filter_format, dtype):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
bias: 1-D bias tensor of length output_depth.
strides: Stride: [col_stride, row_stride]
padding: Padding type.
activation_mode: Activation mode.
data_format: Format of the data tensors.
filter_format: Filter format to use for the fused convolution.
dtype: Data type for inputs and outputs.
Returns:
Symbolic tensor value and reference value that can be used to
execute the computation and verify the results.
"""
input_size = np.prod(tensor_in_sizes)
filter_size = np.prod(filter_in_sizes)
bias_size = filter_in_sizes[-1] # equals to output depth
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, input_size + 1)]
x2 = [f * 1.0 for f in range(1, filter_size + 1)]
# This is to guarantee that there are always negative values after
# bias add so that we can test whether relu works correctly.
x3 = bias
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
fused_t2 = t2
if filter_format == "OIHW":
fused_t2 = _HwioToOihw(t2)
t3 = constant_op.constant(x3, shape=[bias_size], dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
fused_t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
filter_format=filter_format,
activation_mode=activation_mode)
ref_conv_output = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
ref_bias_output = nn_ops.bias_add(
ref_conv_output, t3, data_format=data_format)
ref_output = nn_ops.relu(ref_bias_output)
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
ref_output = test_util.NCHWToNHWC(ref_output)
return output, ref_output
def CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
x3 = np.random.rand(*[filter_in_sizes[-1]]).astype(np.float32)
def _SetupVal(data_format, use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
activation_mode="Relu")
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
return output
with self.session() as sess, self.test_scope():
tensors = []
for (data_format, use_gpu) in _GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = sess.run(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, bias, strides,
padding):
with self.session() as sess, self.test_scope():
tensors = []
ref_tensors = []
for (data_format, use_gpu) in _GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
for filter_format in self._FilterFormatsToTest(use_gpu):
result, expected = self._SetupValuesForDevice(
tensor_in_sizes, filter_in_sizes, bias, strides, padding,
"Relu", data_format, filter_format, dtype)
tensors.append(result)
ref_tensors.append(expected)
values = sess.run(tensors)
ref_values = sess.run(ref_tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
ref_value = ref_values[i]
tf_logging.info("expected = %s", ref_value)
tf_logging.info("actual = %s", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(
np.ravel(ref_value), np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D1x1Filter test.")
return
# expected_output = [
# 0.0, 0.0, 0.0, 21.0, 0.0, 0.0, 57.0, 0.0, 0.0, 93.0, 41.0, 0.0, 129.0,
# 86.0, 43.0, 165.0, 131.0, 97.0
# ]
medians = [-45.0, -130.0, -215.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
bias=medians,
strides=[1, 1],
padding="VALID")
def testConv2DEmpty(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DEmpty test.")
return
# expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
bias=[0.0, 0.0, 0.0],
strides=[1, 1],
padding="VALID")
def testConv2D2x2Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2Filter test.")
return
# expected_output = [0.0, 0.0, 0.0, 401.0, 533.0, 665.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2500.0, -2500.0, -2500.0],
strides=[1, 1],
padding="VALID")
def testConv2D1x2Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D1x2Filter test.")
return
# expected_output = [
# 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 190.0, 265.0, 340.0, 343.0, 436.0, 529.0
# ]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
bias=[-500.0, -500.0, -500.0],
strides=[1, 1],
padding="VALID")
def testConv2D2x2FilterStride2(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride2 test.")
return
# expected_output = [0.0, 67.0, 163.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2300.0, -2300.0, -2300.0],
strides=[2, 2],
padding="VALID")
def testConv2D2x2FilterStride2Same(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride2Same test.")
return
# expected_output = [0.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2300.0, -1000.0, -1000.0],
strides=[2, 2],
padding="SAME")
def testConv2D2x2FilterStride1x2(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride1x2 test.")
return
# expected_output = [0.0, 0.0, 8.0, 28.0, 48.0, 68.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-90.0],
strides=[1, 2],
padding="VALID")
def testConv2DKernelSmallerThanStrideValid(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSmallerThanStrideValid test.")
return
# expected_output = [0, 0, 175, 205]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-100.0],
strides=[3, 3],
padding="VALID")
def testConv2DKernelSmallerThanStrideSame(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSmallerThanStrideSame test.")
return
# expected = [0, 0, 2, 4]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
bias=[-5.0],
strides=[2, 2],
padding="SAME")
# expected = [0, 0, 4, 6]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
bias=[-5.0],
strides=[2, 2],
padding="SAME")
# expected = [4, 0, 1, 0]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-40.0],
strides=[3, 3],
padding="SAME")
def testConv2DKernelSizeMatchesInputSize(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSizeMatchesInputSize test.")
return
# expected = [0, 5]
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
bias=[-50.0, -55.0],
strides=[1, 1],
padding="VALID")
# expected = [0, 2, 282, 322]
self._VerifyValues(
tensor_in_sizes=[1, 8, 8, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-200.0],
strides=[4, 4],
padding="SAME")
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
# Incorrect filter shape.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
# Depth mismatch.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 2]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
def testOpEdgeCases(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping OpEdgeCases tests.")
return
with self.session() as sess, self.test_scope():
# Illegal strides.
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
".*strides.*in the batch and depth dimensions"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[2, 1, 1, 1],
padding="SAME",
activation_mode="Relu"))
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
".*strides.*in the batch and depth dimensions"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[1, 1, 1, 2],
padding="SAME",
activation_mode="Relu"))
# Illegal activation mode.
with self.assertRaisesRegexp(ValueError,
"Op passed string 'Tanh' not in:"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Tanh"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([32, 20, 20, 3]),
_IotaNdF32Constant([20, 21, 3, 2]),
_IotaNdF32Constant([2]),
strides=[1, 1, 1, 1],
padding="VALID",
activation_mode="Relu"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([32, 20, 20, 3]),
_IotaNdF32Constant([21, 20, 3, 2]),
_IotaNdF32Constant([2]),
strides=[1, 1, 1, 1],
padding="VALID",
activation_mode="Relu"))
# Add InceptionFwd tests to FusedConv2DBiasActivationTest.
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(_GetShrunkInceptionShapes()):
setattr(FusedConv2DBiasActivationTest, "testInceptionFwd_" + str(index),
_GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(FusedConv2DBiasActivationTest, "testInceptionFwd_No_Winograd_Nonfused",
_GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True))
def _CalculateConvolvedOutputDim(input_dim, filter_dim, stride, padding_type):
"""Calculates the size of an output dimension of a strided convolution.
Given the sizes of the corresponding dimension of the input and filter shapes,
and the stride and padding_types, calculates the size of the output dimension.
This function can be called separately for each input dimension.
Args:
input_dim: An `int` specifying the size of the input dimension.
filter_dim: An `int` specifying the size of the filter dimension.
stride: An `int` specifying the step size of the convolution along the
input dimension.
padding_type: either 'VALID' or 'SAME'.
Returns:
The size of the output dimension.
"""
if padding_type == "VALID":
return (input_dim - filter_dim + stride) // stride
else: # padding_type == 'SAME'
return (input_dim + stride - 1) // stride
def _GetFusedConvInt8TestParams():
"""Returns test parameters shared by all Int8 FusedConv tests."""
_test_params = [
{
"batch_size": 1,
"input_channels": 4,
"output_channels": 4,
"input_height": 8,
"input_width": 8,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 1,
"input_channels": 4,
"output_channels": 4,
"input_height": 6,
"input_width": 6,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "VALID"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "VALID"
},
{
"batch_size": 2,
"input_channels": 16,
"output_channels": 16,
"input_height": 9,
"input_width": 9,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.001,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 5,
"filter_width": 5,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.001,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 7,
"filter_width": 1,
"vertical_stride": 2,
"horizontal_stride": 1,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 1,
"filter_width": 7,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
]
return _test_params
def _Int8Roundtrip(fn, tensor):
return array_ops.bitcast(
fn(array_ops.bitcast(tensor, dtypes.int8)), dtypes.qint8)
def _NchwVectCToNchw(in_tensor):
# [N, C / 4, H, W, 4] => [N, C / 4, 4, H, W] == [N, C, H, W]
t = array_ops.transpose(in_tensor, [0, 1, 4, 2, 3])
n = in_tensor.shape.dims[0].value
c = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
return array_ops.reshape(t, [n, c, h, w])
def _NchwVectCToNhwc(in_tensor):
# [N, C / 4, H, W, 4] => [N, H, W, C / 4, 4] == [N, H, W, C]
t = array_ops.transpose(in_tensor, [0, 2, 3, 1, 4])
n = in_tensor.shape.dims[0].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
c = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
return array_ops.reshape(t, [n, h, w, c])
def _OihwVectIToHwio(in_tensor):
# [O, I / 4, H, W, 4] => [O, I / 4, 4, H, W] == [O, I, H, W]
t = array_ops.transpose(in_tensor, [2, 3, 1, 4, 0])
o = in_tensor.shape.dims[0].value
i = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
return array_ops.reshape(t, [h, w, i, o])
def _NchwToNchwVectC(in_tensor):
n, c, h, w = in_tensor.shape.as_list()
assert c % 4 == 0
t = array_ops.reshape(in_tensor, [n, c // 4, 4, h, w])
return array_ops.transpose(t, [0, 1, 3, 4, 2])
def _NhwcToNchwVectC(in_tensor):
# [H, H, W, C] => [N, H, W, C //4, 4] => [N, C / 4, H, W, 4]
n, h, w, c = in_tensor.shape.as_list()
assert c % 4 == 0
t = array_ops.reshape(in_tensor, [n, h, w, c // 4, 4])
return array_ops.transpose(t, [0, 3, 1, 2, 4])
def _HwioToOihw(in_tensor):
return array_ops.transpose(in_tensor, [3, 2, 0, 1])
def _SimulateFusedConv2dBiasActivationInt8OnCpu(conv_input_scale, conv_input,
kernel, padding, strides,
side_input_scale, side_input,
biases, apply_relu):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
restrictions as the actual op.
Args:
conv_input_scale: A scalar 'float'.
conv_input: A `Tensor` of type `qint8` in NHWC layout.
kernel: A `Tensor` of type `qint8` in HWIO layout.
padding: A `string` from: `"SAME", "VALID"`.
strides: A list of `ints`.
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NHWC layout.
biases: A `Tensor` of type `float32` in NHWC layout.
apply_relu: A boolean to specify whether to apply "Relu" activation function
that clips outputs to the range [0, 127], or "None" activation that clips
to the range [-128, 127].
Returns:
A `Tensor` of type `qint8` in NHWC layout.
"""
conv_result = nn_ops.conv2d(
math_ops.cast(conv_input, dtypes.float32),
math_ops.cast(kernel, dtypes.float32),
strides=strides,
padding=padding,
data_format="NHWC") * conv_input_scale
conv_and_side_inputs = conv_result + side_input_scale * math_ops.cast(
side_input, dtypes.float32)
output = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NHWC")
if apply_relu:
output = nn_ops.relu(output)
# In this case quantization is identical to clipping and casting.
result, _, _ = gen_array_ops.quantize_v2(output, -128, 127, dtypes.qint8)
return result
# FusedConv2DBiasActivation on CPU supports only NHWC/HWIO data format.
class FusedConvInt8CPUTests(object):
"""Verify quantization with CPU kernel."""
_test_params = _GetFusedConvInt8TestParams()
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def runTest(self, test_param, apply_relu):
"""Runs tests for dimensions configured in test_param."""
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
input_height = test_param["input_height"]
input_width = test_param["input_width"]
filter_height = test_param["filter_height"]
filter_width = test_param["filter_width"]
vertical_stride = test_param["vertical_stride"]
horizontal_stride = test_param["horizontal_stride"]
conv_input_scale = test_param["conv_input_scale"]
side_input_scale = test_param["side_input_scale"]
bias_scale = test_param["bias_scale"]
padding_type = test_param["padding_type"]
with self.session() as sess, self.test_scope():
conv_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, input_height, input_width, input_channels],
minval=-0.0,
maxval=1.0,
dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)
kernel, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[filter_height, filter_width, input_channels, output_channels],
minval=-1.0,
maxval=1.0,
dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)
output_height = _CalculateConvolvedOutputDim(input_height, filter_height,
vertical_stride,
padding_type)
output_width = _CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride,
padding_type)
tf_logging.info("output_height=%s, output_width=%s", output_height,
output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, output_height, output_width, output_channels],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)
biases = random_ops.random_uniform([output_channels],
minval=-10 * bias_scale,
maxval=20 * bias_scale,
dtype=dtypes.float32)
strides = [1, vertical_stride, horizontal_stride, 1]
actual = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
kernel,
biases,
strides=strides,
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else side_input),
activation_mode="Relu" if apply_relu else "None",
data_format="NHWC",
filter_format="HWIO")
expected = _SimulateFusedConv2dBiasActivationInt8OnCpu(
conv_input_scale, conv_input, kernel, padding_type, strides,
side_input_scale, side_input, biases, apply_relu)
actual_y, expected_y = sess.run([actual, expected])
self.assertAllClose(actual_y, expected_y, rtol=0, atol=1)
def testFusedConvInt8(self):
for apply_relu in [True, False]:
for test_param in self._test_params:
self.runTest(test_param, apply_relu)
# Test that GPU and CPU kernels produce identical results for QInt8 data type.
class FusedConvInt8CorrespondenceTests(object):
"""Verify quantization with CPU kernel."""
_test_params = _GetFusedConvInt8TestParams()
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def runTest(self, test_param, apply_relu):
"""Runs tests for dimensions configured in test_param."""
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
input_height = test_param["input_height"]
input_width = test_param["input_width"]
filter_height = test_param["filter_height"]
filter_width = test_param["filter_width"]
vertical_stride = test_param["vertical_stride"]
horizontal_stride = test_param["horizontal_stride"]
conv_input_scale = test_param["conv_input_scale"]
side_input_scale = test_param["side_input_scale"]
bias_scale = test_param["bias_scale"]
padding_type = test_param["padding_type"]
with self.session() as sess, self.test_scope():
conv_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, input_channels // 4, input_height, input_width, 4],
minval=-0.0,
maxval=1.0,
dtype=dtypes.float32), -1.0, 1.0, dtypes.qint8)
kernel, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform([
output_channels, input_channels // 4, filter_height, filter_width,
4
],
minval=-1.0,
maxval=1.0,
dtype=dtypes.float32), -1.0, 1.0,
dtypes.qint8)
output_height = _CalculateConvolvedOutputDim(input_height, filter_height,
vertical_stride,
padding_type)
output_width = _CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride,
padding_type)
tf_logging.info("output_height=%s, output_width=%s", output_height,
output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform([
batch_size, output_channels // 4, output_height, output_width, 4
],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32), -1.0, 1.0,
dtypes.qint8)
biases = random_ops.random_uniform([output_channels],
minval=-10 * bias_scale,
maxval=20 * bias_scale,
dtype=dtypes.float32)
with ops.device("/cpu:0"):
t = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_Int8Roundtrip(_NchwVectCToNhwc, conv_input),
_Int8Roundtrip(_OihwVectIToHwio, kernel),
biases,
strides=[1, vertical_stride, horizontal_stride, 1],
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else _Int8Roundtrip(
_NchwVectCToNhwc, side_input)),
activation_mode="Relu" if apply_relu else "None",
data_format="NHWC",
filter_format="HWIO")
cpu_result = _Int8Roundtrip(_NhwcToNchwVectC, t)
with ops.device("/gpu:0"):
t = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
kernel,
biases,
strides=[1, 1, vertical_stride, horizontal_stride],
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else side_input),
activation_mode="Relu" if apply_relu else "None",
data_format="NCHW_VECT_C",
filter_format="OIHW_VECT_I")
gpu_result = t
cpu_y, gpu_y = sess.run([cpu_result, gpu_result])
self.assertAllClose(cpu_y, gpu_y, rtol=0, atol=0)
def testFusedConvInt8(self):
if not test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
tf_logging.info("int8 test skipped because not run with --config=cuda or "
"no GPUs with compute capability >= 6.1 are available.")
return
for apply_relu in [True, False]:
for test_param in self._test_params:
self.runTest(test_param, apply_relu)
if __name__ == "__main__":
test.main()
|
the-stack_106_31777 | import codecs
import os
import re
from setuptools import find_packages, setup
def get_absolute_path(*args):
"""Transform relative pathnames into absolute pathnames."""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
def get_contents(*args):
"""Get the contents of a file relative to the source distribution directory."""
with codecs.open(get_absolute_path(*args), "r", "UTF-8") as handle:
return handle.read()
def get_version(*args):
"""Extract the version number from a Python module."""
contents = get_contents(*args)
metadata = dict(re.findall("__([a-z]+)__ = ['\"]([^'\"]+)", contents))
return metadata["version"]
setup(
name="ppln-mlflow",
version=get_version("ppln_mlflow", "__init__.py"),
author="Miras Amir",
author_email="[email protected]",
description="MLflow hook for ppln",
long_description_content_type="text/markdown",
url="https://github.com/ppln-team/ppln-mlflow",
packages=find_packages(),
install_requires=["mlflow>=1.4"],
setup_requires=["pytest-runner"],
python_requires=">=3.6.0",
)
|
the-stack_106_31778 | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A context for execution based on an embedded executor instance."""
import asyncio
import contextlib
from typing import Any
from typing import Callable
from typing import Optional
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl.compiler import compiler_pipeline
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.executors import cardinalities_utils
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_factory
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.executors import ingestable_base
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import typed_object
def _unwrap(value):
if isinstance(value, tf.Tensor):
return value.numpy()
elif isinstance(value, structure.Struct):
return structure.Struct(
(k, _unwrap(v)) for k, v in structure.iter_elements(value))
else:
return value
class AsyncExecutionContextValue(typed_object.TypedObject):
"""Wrapper class for values produced by `ExecutionContext`."""
def __init__(self, value, type_spec):
py_typecheck.check_type(type_spec, computation_types.Type)
self._value = value
self._type_spec = type_spec
@property
def type_signature(self):
return self._type_spec
@property
def value(self):
return self._value
async def _ingest(executor, val, type_spec):
"""A coroutine that handles ingestion.
Args:
executor: An instance of `executor_base.Executor`.
val: The first argument to `context_base.Context.ingest()`.
type_spec: The second argument to `context_base.Context.ingest()`.
Returns:
The result of the ingestion.
Raises:
TypeError: If the `val` is not a value of type `type_spec`.
"""
if isinstance(val, executor_value_base.ExecutorValue):
return val
elif isinstance(val, ingestable_base.Ingestable):
val_type = val.type_signature
py_typecheck.check_type(val_type, computation_types.Type)
type_spec.check_assignable_from(val_type)
return await val.ingest(executor)
elif (isinstance(val, structure.Struct) and not type_spec.is_federated()):
type_spec.check_struct()
v_elem = structure.to_elements(val)
t_elem = structure.to_elements(type_spec)
if len(v_elem) != len(t_elem):
raise TypeError(
'Value {} does not match type {}: mismatching tuple length.'.format(
val, type_spec))
for ((vk, _), (tk, _)) in zip(v_elem, t_elem):
if vk not in [tk, None]:
raise TypeError(
'Value {} does not match type {}: mismatching tuple element '
'names {} vs. {}.'.format(val, type_spec, vk, tk))
ingested = []
for (_, v), (_, t) in zip(v_elem, t_elem):
ingested.append(_ingest(executor, v, t))
ingested = await asyncio.gather(*ingested)
return await executor.create_struct(
structure.Struct(
(name, val) for (name, _), val in zip(t_elem, ingested)))
else:
return await executor.create_value(val, type_spec)
async def _invoke(executor, comp, arg, result_type: computation_types.Type):
"""A coroutine that handles invocation.
Args:
executor: An instance of `executor_base.Executor`.
comp: The first argument to `context_base.Context.invoke()`.
arg: The optional second argument to `context_base.Context.invoke()`.
result_type: The type signature of the result. This is used to convert the
execution result into the proper container types.
Returns:
The result of the invocation.
"""
if arg is not None:
py_typecheck.check_type(arg, executor_value_base.ExecutorValue)
comp = await executor.create_value(comp)
result = await executor.create_call(comp, arg)
py_typecheck.check_type(result, executor_value_base.ExecutorValue)
result_val = _unwrap(await result.compute())
return type_conversions.type_to_py_container(result_val, result_type)
def _unwrap_execution_context_value(val):
"""Recursively removes wrapping from `val` under anonymous tuples."""
if isinstance(val, structure.Struct):
value_elements_iter = structure.iter_elements(val)
return structure.Struct((name, _unwrap_execution_context_value(elem))
for name, elem in value_elements_iter)
elif isinstance(val, AsyncExecutionContextValue):
return _unwrap_execution_context_value(val.value)
else:
return val
class AsyncExecutionContext(context_base.Context):
"""An asynchronous execution context backed by an `executor_base.Executor`.
This context's `ingest` and `invoke` methods return Python coroutine objects
which represent the actual work of ingestion and invocation in the backing
executor.
"""
def __init__(self,
executor_fn: executor_factory.ExecutorFactory,
compiler_fn: Optional[Callable[[computation_base.Computation],
Any]] = None):
"""Initializes an execution context.
Args:
executor_fn: Instance of `executor_factory.ExecutorFactory`.
compiler_fn: A Python function that will be used to compile a computation.
"""
py_typecheck.check_type(executor_fn, executor_factory.ExecutorFactory)
self._executor_factory = executor_fn
if compiler_fn is not None:
py_typecheck.check_callable(compiler_fn)
self._compiler_pipeline = compiler_pipeline.CompilerPipeline(compiler_fn)
else:
self._compiler_pipeline = None
async def ingest(self, val, type_spec):
return AsyncExecutionContextValue(val, type_spec)
async def invoke(self, comp, arg):
comp.type_signature.check_function()
# Save the type signature before compiling. Compilation currently loses
# container types, so we must remember them here so that they can be
# restored in the output.
result_type = comp.type_signature.result
if self._compiler_pipeline is not None:
with tracing.span('ExecutionContext', 'Compile', span=True):
comp = self._compiler_pipeline.compile(comp)
with tracing.span('ExecutionContext', 'Invoke', span=True):
@contextlib.contextmanager
def reset_factory_on_error(ex_factory, cardinalities):
try:
yield ex_factory.create_executor(cardinalities)
except Exception as e:
ex_factory.clean_up_executors()
raise e
if arg is not None:
py_typecheck.check_type(arg, AsyncExecutionContextValue)
unwrapped_arg = _unwrap_execution_context_value(arg)
cardinalities = cardinalities_utils.infer_cardinalities(
unwrapped_arg, arg.type_signature)
else:
cardinalities = {}
with reset_factory_on_error(self._executor_factory,
cardinalities) as executor:
py_typecheck.check_type(executor, executor_base.Executor)
if arg is not None:
arg = await tracing.wrap_coroutine_in_current_trace_context(
_ingest(executor, unwrapped_arg, arg.type_signature))
return await tracing.wrap_coroutine_in_current_trace_context(
_invoke(executor, comp, arg, result_type))
|
the-stack_106_31779 | import re
class TokexTokenizer(object):
"""
Base class for Tokex tokenizers. Uses re.findall & a collection of regular expressions to break up an
input string into a sequence of tokens.
Can be extended by subclassing this class & implementing a `tokenize` function or creating a custom
list of tokenizer_regexes/tweaking tokenize_newlines
Use of this function without subclassing will tokenize an input string by breaking up all occurrances of quotes
into their own separate token; all sequences of alphanumeric tokens into their own separate token, and all
concurrent non-alphanumeric space characters into their own token.
"""
tokenizer_regexes = (
r'"[^"]*"',
r"'[^']*'",
r"\b\w+\b",
r"[^a-zA-Z0-9_ \t\n\r\f\v]+"
)
def __init__(self, tokenizer_regexes=None, tokenize_newlines=False, ignore_empty_lines=False):
"""
Inputs: tokenizer_regexes - Can be passed to provide a custom list of tokenizer regexes to parse
an input string with.
tokenize_newlines - A boolean indicating whether newlines should be treated as tokens
ignore_empty_lines - A boolean indicating whether we should skip over empty lines or not.
Only has an effect if tokenize_newlines is passed and True
"""
self.tokenize_newlines = tokenize_newlines
self.ignore_empty_lines = ignore_empty_lines
if tokenizer_regexes:
self.tokenizer_regexes = tokenizer_regexes
if self.tokenize_newlines:
self.tokenizer_regexes = list(self.tokenizer_regexes) + [r"\n"]
def tokenize(self, input_string):
"""
Function which is called by tokex to break an input string into tokens, processed by tokex.
Inputs: input_string - A string, to break into tokens.
Outputs: A list of tokens from input_string.
"""
tokens = re.findall(
"(%s)" % "|".join(self.tokenizer_regexes),
input_string,
flags=re.MULTILINE
)
if self.tokenize_newlines and self.ignore_empty_lines:
for idx in reversed(range(len(tokens))):
if tokens[idx] == "\n" and ((idx > 0 and tokens[idx - 1] == "\n") or idx == 0):
tokens.pop(idx)
return tokens
class NumericTokenizer(TokexTokenizer):
"""
Tokenizers which keeps numeric values together as a single token
"""
tokenizer_regexes = (
r'"[^"]*"',
r"'[^']*'",
r"\S+",
)
|
the-stack_106_31780 | # -*- coding: utf-8 -*-
"""
hooks.pre_gen_project
~~~~~~~~~~~~~~~~~~~~~
Hooks to run before project generation.
:copyright: (c) 2016 by John P. Neumann.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
MODULE_REGEX = r'^[_a-zA-Z][_a-zA-Z0-9]+$'
module_name = '{{ cookiecutter.project_slug }}'
if not re.match(MODULE_REGEX, module_name):
sys.stderr.write('ERROR: The project slug ({0}) is not a valid Python module name. Please do not use a - and use '
'_ instead\n'.format(module_name))
sys.exit(1)
|
the-stack_106_31781 | from setuptools import setup
requirements = []
with open("requirements.txt", "r") as fh:
for line in fh:
requirements.append(line.strip())
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name = "welearn-bot-iiserkol",
description = "A command line client for WeLearn, in the IISER Kolkata domain",
long_description=long_description,
long_description_content_type="text/markdown",
author = "Parth Bibekar",
author_email = "[email protected]",
url = "https://github.com/ParthBibekar/Welearn-bot",
version = "1.0.0",
license = "MIT",
scripts = ["welearn_bot"],
install_requires = requirements
)
|
the-stack_106_31784 | import os
from virgil_trust_provisioner.core_utils import CRCCCITT
from virgil_trust_provisioner.data_types import TrustList
class FileKeyStorage:
def __init__(self, storage_path):
super(FileKeyStorage, self).__init__()
self.storage_path = storage_path
def __save_key_pair(self, file_name, key_pair):
file_prefix = CRCCCITT().calculate(bytes(key_pair.public_key))
file_path_public = os.path.join(self.storage_path, file_name + '_pub_' + str(file_prefix))
file_path_private = os.path.join(self.storage_path, file_name + '_priv_' + str(file_prefix))
open(file_path_public, 'wb').write(bytearray(key_pair.public_key))
open(file_path_private, 'wb').write(bytearray(key_pair.private_key))
def __save_trust_list(self, file_name, trust_list):
file_prefix = CRCCCITT().calculate(bytes(trust_list))
if not os.path.exists(self.storage_path):
os.makedirs(self.storage_path)
file_path = os.path.join(
self.storage_path,
file_name + '_' + str(file_prefix) + '.tl'
)
open(file_path, 'wb').write(bytes(trust_list))
def __save_blob(self, file_name, data):
file_path = os.path.join(self.storage_path, file_name)
open(file_path, 'wb').write(data)
def save(self, data, place):
"""
Store data to file in hardcoded place.
Args:
data: Data for storing.
place: File name for storing data.
"""
if isinstance(data, TrustList):
self.__save_trust_list(place, data)
if isinstance(data, (bytes, bytearray)):
self.__save_blob(place, data)
|
the-stack_106_31786 | # Perform the necessary imports
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
# from rlkit.envs.read_hdf5 import get_dataset, qlearning_dataset
import matplotlib.pyplot as plt
import numpy as np
import h5py
import torch
from uncertainty_modeling.rl_uncertainty.model import *
import gym
import d4rl
def get_diffs(x, model, batch_size=256):
model.eval()
with torch.no_grad():
batchified = x.split(batch_size)
stacked = []
for _x in batchified:
model.eval()
diffs = []
_x = _x.to(next(model.parameters()).device).float()
x_tilde = model(_x)
diffs.append((x_tilde - _x).cpu())
for layer in model.enc_layer_list:
_x = layer(_x)
x_tilde = layer(x_tilde)
diffs.append((x_tilde - _x).cpu())
stacked.append(diffs)
stacked = list(zip(*stacked))
diffs = [torch.cat(s, dim=0).numpy() for s in stacked]
return diffs
def get_keys(h5file):
keys = []
def visitor(name, item):
if isinstance(item, h5py.Dataset):
keys.append(name)
h5file.visititems(visitor)
return keys
def get_dataset(h5path):
dataset_file = h5py.File(h5path, 'r')
data_dict = {k: dataset_file[k][:] for k in get_keys(dataset_file)}
dataset_file.close()
return data_dict
def qlearning_dataset(dataset):
"""
Returns datasets formatted for use by standard Q-learning algorithms,
with observations, actions, next_observations, rewards, and a terminal
flag.
Args:
dataset: dataset to pass in for processing.
Returns:
A dictionary containing keys:
observations: An N x dim_obs array of observations.
actions: An N x dim_action array of actions.
next_observations: An N x dim_obs array of next observations.
rewards: An N-dim float array of rewards.
terminals: An N-dim boolean array of "done" or episode termination flags.
"""
N = dataset['rewards'].shape[0]
obs_ = []
next_obs_ = []
action_ = []
reward_ = []
done_ = []
for i in range(N-1):
obs = dataset['observations'][i]
new_obs = dataset['observations'][i+1]
action = dataset['actions'][i]
reward = dataset['rewards'][i]
done_bool = bool(dataset['terminals'][i])
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
reward_.append(reward)
done_.append(done_bool)
return {
'observations': np.array(obs_),
'actions': np.array(action_),
'next_observations': np.array(next_obs_),
'rewards': np.array(reward_),
'terminals': np.array(done_),
}
file_path = '/home/user/Documents/Workspace-Changyeop/Workspace/AdvancedDL/AI602_Project/bear/online_buffer.hdf5' # offline_buffer_itr_140
file_path2 = '/home/user/Documents/Workspace-Changyeop/Workspace/AdvancedDL/AI602_Project/bear/offline_buffer_itr_140.hdf5' # offline_buffer_itr_140
# Read the saved replay buffer
data_dict = get_dataset(file_path)
data_dict2 = get_dataset(file_path2)
print(data_dict['observations'].shape)
print(data_dict['actions'].shape)
# feature_sub = np.hstack([data_dict['observations'][::10], data_dict['actions'][::10]]) #
# feature_sub2 = np.hstack([data_dict2['observations'][::10], data_dict2['actions'][::10]])
feature_sub = np.hstack([data_dict['observations'], data_dict['actions']]) #
feature_sub2 = np.hstack([data_dict2['observations'], data_dict2['actions']])
print(np.max(data_dict['observations']))
print(np.min(data_dict['observations']))
print(np.max(data_dict['actions']))
print(np.min(data_dict['actions']))
a = np.linspace(0, 1, 7)
b = np.linspace(0, 1, 7)
c = np.linspace(0, 1, 7)
d = np.linspace(0, 1, 7)
av, bv, cv, dv = np.meshgrid(a,b,c,d)
meshgrid_data = torch.from_numpy(np.stack([av,bv,cv,dv], axis=-1))
meshgrid_data = np.reshape(meshgrid_data, [-1, 4])
model = RaPP(4).cuda()
model.load_state_dict(torch.load("/home/user/Documents/Workspace-Changyeop/Workspace/AdvancedDL/AI602_Project/bear/uncertainty_modeling/rl_uncertainty/rapp/model/point-robot/model_1980.pt")) # if not handling ensemble
# id_dif = get_diffs(meshgrid_data, model)
id_dif = get_diffs(torch.from_numpy(feature_sub2), model)
id_difs = torch.cat([torch.from_numpy(i) for i in id_dif], dim=-1).numpy()
id_dif = (id_difs**2).mean(axis=1)
print(np.mean(id_dif), np.max(id_dif), np.min(id_dif))
#
#
# # feature = np.vstack([feature_sub, feature_sub2, meshgrid_data])
# feature = np.vstack([expert_data, medium_data])
#
# model = TSNE(learning_rate=10)
# transformed = model.fit_transform(feature)
#
#
# xs = transformed[:,0]
# ys = transformed[:,1]
#
# # plt.scatter(xs[1600:3200],ys[1600:3200],color="g")
# plt.scatter(xs[:2000],ys[:2000],color="g")
# import seaborn as sns
# cmap = sns.diverging_palette(240, 10, l=65, center="dark", as_cmap=True)
# vmin= np.min(id_dif)
# vmax= np.max(id_dif)
# sc = plt.scatter(xs[2000:],ys[2000:], c=id_dif,vmin=vmin, vmax=vmax, cmap=cmap)
# plt.colorbar(sc)
# plt.show()
|
the-stack_106_31790 | from json import dumps
from pathlib import Path
from os import getenv
path = Path(".")
directory = []
origin = getenv("origin", "https://noo.farfrom.world/")
for noo_file in path.glob("*/*.noofile.yml"):
directory.append(str(noo_file))
# Export as json
with open("index.json", "w+") as f:
f.write(dumps(directory))
# Export as html
# Yes this sucks, I didn't want to add the complexity of svelte.
html = ""
for noo_file in directory:
html += f"<a href=\"/{noo_file}\">{noo_file}</a><br>"
with open("index.html", "w+") as f:
f.write(html)
# Build registry
with open("registry.json", "w+") as f:
registry = {}
for noo_file in directory:
name = noo_file.replace(".noofile.yml", "")
url = origin + noo_file
registry[name] = url
f.write(dumps(registry))
|
the-stack_106_31793 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import signal
import sys
import traceback
import threading
from typing import Optional, TYPE_CHECKING, List
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer, Qt
import PyQt5.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugin import run_hook
from electrum.base_wizard import GoBack
from electrum.util import (UserCancelled, profiler, send_exception_to_crash_reporter,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.wallet_db import WalletDB
from electrum.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box, MessageBoxMixin
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
from .lightning_dialog import LightningDialog
from .watchtower_dialog import WatchtowerDialog
from .exception_window import Exception_Hook
if TYPE_CHECKING:
from electrum.daemon import Daemon
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toString())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
network_dialog: Optional['NetworkDialog']
lightning_dialog: Optional['LightningDialog']
watchtower_dialog: Optional['WatchtowerDialog']
@profiler
def __init__(self, config: 'SimpleConfig', daemon: 'Daemon', plugins: 'Plugins'):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
self.logger.info(f"Qt GUI starting up... Qt={QtCore.QT_VERSION_STR}, PyQt={QtCore.PYQT_VERSION_STR}")
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum-bsty.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = [] # type: List[ElectrumWindow]
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum-bsty.png"))
self._cleaned_up = False
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.network_dialog = None
self.lightning_dialog = None
self.watchtower_dialog = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
self.dark_icon = self.config.get("dark_icon", False)
self.tray = None
self._init_tray()
self.app.new_window_signal.connect(self.start_new_window)
self.set_dark_theme_if_needed()
run_hook('init_qt', self)
def _init_tray(self):
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
def set_dark_theme_if_needed(self):
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except BaseException as e:
use_dark_theme = False
self.logger.warning(f'Error setting dark theme: {repr(e)}')
# Apply any necessary stylesheet patches
patch_qt_stylesheet(use_dark_theme=use_dark_theme)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def build_tray_menu(self):
if not self.tray:
return
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
network = self.daemon.network
m.addAction(_("Network"), self.show_network_dialog)
if network and network.lngossip:
m.addAction(_("Lightning Network"), self.show_lightning_dialog)
if network and network.local_watchtower:
m.addAction(_("Local Watchtower"), self.show_watchtower_dialog)
for window in self.windows:
name = window.wallet.basename()
submenu = m.addMenu(name)
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum"), self.app.quit)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
if not self.tray:
return
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def _cleanup_before_exit(self):
if self._cleaned_up:
return
self._cleaned_up = True
self.app.new_window_signal.disconnect()
self.efilter = None
# If there are still some open windows, try to clean them up.
for window in list(self.windows):
window.close()
window.clean_up()
if self.network_dialog:
self.network_dialog.close()
self.network_dialog.clean_up()
self.network_dialog = None
self.network_updated_signal_obj = None
if self.lightning_dialog:
self.lightning_dialog.close()
self.lightning_dialog = None
if self.watchtower_dialog:
self.watchtower_dialog.close()
self.watchtower_dialog = None
# Shut down the timer cleanly
self.timer.stop()
self.timer = None
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
if self.tray:
self.tray.hide()
self.tray.deleteLater()
self.tray = None
def _maybe_quit_if_no_windows_open(self) -> None:
"""Check if there are any open windows and decide whether we should quit."""
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
self.app.quit()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_lightning_dialog(self):
if not self.daemon.network.has_channel_db():
return
if not self.lightning_dialog:
self.lightning_dialog = LightningDialog(self)
self.lightning_dialog.bring_to_top()
def show_watchtower_dialog(self):
if not self.watchtower_dialog:
self.watchtower_dialog = WatchtowerDialog(self)
self.watchtower_dialog.bring_to_top()
def show_network_dialog(self):
if self.network_dialog:
self.network_dialog.on_update()
self.network_dialog.show()
self.network_dialog.raise_()
return
self.network_dialog = NetworkDialog(
network=self.daemon.network,
config=self.config,
network_updated_signal_obj=self.network_updated_signal_obj)
self.network_dialog.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
self._maybe_quit_if_no_windows_open()
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False) -> Optional[ElectrumWindow]:
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except Exception as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + repr(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + repr(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except Exception as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + repr(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins, gui_object=self)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage, db = wizard.create_storage(path)
else:
db = WalletDB(storage.read(), manual_upgrades=False)
wizard.run_upgrades(storage, db)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or db.get_action():
return
wallet = Wallet(db, storage, config=self.config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins, gui_object=self)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
# setup Ctrl-C handling and tear-down code first, so that user can easily exit whenever
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(self._maybe_quit_if_no_windows_open)
self.app.aboutToQuit.connect(self._cleanup_before_exit)
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
# hook for crash reporter
Exception_Hook.maybe_setup(config=self.config)
# first-start network-setup
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except Exception as e:
self.logger.exception('')
return
# start wizard to select/create wallet
self.timer.start()
path = self.config.get_wallet_path(use_gui_last_wallet=True)
try:
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
except Exception as e:
self.logger.error("error loading wallet (or creating window for it)")
send_exception_to_crash_reporter(e)
# Let Qt event loop start properly so that crash reporter window can appear.
# We will shutdown when the user closes that window, via lastWindowClosed signal.
# main loop
self.logger.info("starting Qt main loop")
self.app.exec_()
# on some platforms the exec_ call may not return, so use _cleanup_before_exit
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
|
the-stack_106_31794 | import pytest
import gym
from gym.spaces import MultiDiscrete
from regym.environments import Task, EnvType
from regym.environments import gym_parser
@pytest.fixture
def RPS_env():
import gym_rock_paper_scissors
return gym.make('RockPaperScissors-v0')
@pytest.fixture
def Pendulum_env():
return gym.make('Pendulum-v0')
def test_multidiscrete_action_flattening():
space = MultiDiscrete([3, 3, 2, 3])
expected_action_space_size = 54
action_space_size = gym_parser.compute_multidiscrete_space_size(space.nvec)
assert action_space_size == expected_action_space_size
def test_RPS_get_observation_dimensions(RPS_env):
expected_observation_dim = [10, 10, 10]
expected_observation_size = 30
expected_observation_type = 'Discrete'
observation_dims, observation_size, observation_type = \
gym_parser.get_observation_dimensions_and_type(RPS_env)
assert expected_observation_dim == observation_dims
assert expected_observation_size == observation_size
assert expected_observation_type == observation_type
def test_RPS_get_action_dimensions(RPS_env):
expected_action_dim = 3
expected_action_size = 3
expected_action_type = 'Discrete'
a = gym_parser.get_action_dimensions_and_type(RPS_env)
action_dims, action_dims_size, action_type = \
gym_parser.get_action_dimensions_and_type(RPS_env)
assert expected_action_dim == action_dims
assert expected_action_size == action_dims_size
assert expected_action_type == action_type
def test_creating_single_agent_env_with_multiagent_envtype_raises_value_error(Pendulum_env):
with pytest.raises(ValueError) as _:
_ = gym_parser.parse_gym_environment(Pendulum_env, env_type=EnvType.MULTIAGENT_SIMULTANEOUS_ACTION)
with pytest.raises(ValueError) as _:
_ = gym_parser.parse_gym_environment(Pendulum_env, env_type=EnvType.MULTIAGENT_SEQUENTIAL_ACTION)
def test_creating_single_agent_env_with_multiagent_envtype_raises_value_error(RPS_env):
with pytest.raises(ValueError) as _:
_ = gym_parser.parse_gym_environment(RPS_env, env_type=EnvType.SINGLE_AGENT)
|
the-stack_106_31795 | """IPython terminal interface using prompt_toolkit in place of readline"""
from __future__ import print_function
import base64
import errno
from getpass import getpass
from io import BytesIO
import os
import signal
import subprocess
import sys
import time
from warnings import warn
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from zmq import ZMQError
from IPython.core import page
from IPython.utils.py3compat import cast_unicode_py2, input
from ipython_genutils.tempdir import NamedFileInTemporaryDirectory
from traitlets import (Bool, Integer, Float, Unicode, List, Dict, Enum,
Instance, Any)
from traitlets.config import SingletonConfigurable
from .completer import ZMQCompleter
from .zmqhistory import ZMQHistoryManager
from . import __version__
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from prompt_toolkit.filters import HasFocus, HasSelection, ViInsertMode, EmacsInsertMode
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.shortcuts import create_prompt_application, create_eventloop, create_output
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.key_binding.vi_state import InputMode
from prompt_toolkit.key_binding.bindings.vi import ViStateFilter
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.lexers import PygmentsLexer
from prompt_toolkit.styles import PygmentsStyle
from pygments.styles import get_style_by_name
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from pygments.token import Token
def ask_yes_no(prompt, default=None, interrupt=None):
"""Asks a question and returns a boolean (y/n) answer.
If default is given (one of 'y','n'), it is used if the user input is
empty. If interrupt is given (one of 'y','n'), it is used if the user
presses Ctrl-C. Otherwise the question is repeated until an answer is
given.
An EOF is treated as the default answer. If there is no default, an
exception is raised to prevent infinite loops.
Valid answers are: y/yes/n/no (match is not case sensitive)."""
answers = {'y':True,'n':False,'yes':True,'no':False}
ans = None
while ans not in answers.keys():
try:
ans = input(prompt+' ').lower()
if not ans: # response was an empty string
ans = default
except KeyboardInterrupt:
if interrupt:
ans = interrupt
except EOFError:
if default in answers.keys():
ans = default
print()
else:
raise
return answers[ans]
def get_pygments_lexer(name):
name = name.lower()
if name == 'ipython2':
from IPython.lib.lexers import IPythonLexer
return IPythonLexer
elif name == 'ipython3':
from IPython.lib.lexers import IPython3Lexer
return IPython3Lexer
else:
try:
return get_lexer_by_name(name).__class__
except ClassNotFound:
warn("No lexer found for language %r. Treating as plain text." % name)
from pygments.lexers.special import TextLexer
return TextLexer
class JupyterPTCompleter(Completer):
"""Adaptor to provide kernel completions to prompt_toolkit"""
def __init__(self, jup_completer):
self.jup_completer = jup_completer
def get_completions(self, document, complete_event):
if not document.current_line.strip():
return
content = self.jup_completer.complete_request(
code=document.text,
cursor_pos=document.cursor_position
)
start_pos = content['cursor_start'] - document.cursor_position
for m in content['matches']:
yield Completion(m, start_position=start_pos)
class ZMQTerminalInteractiveShell(SingletonConfigurable):
readline_use = False
pt_cli = None
_executing = False
_execution_state = Unicode('')
_pending_clearoutput = False
_eventloop = None
editing_mode = Unicode('emacs', config=True,
help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
)
highlighting_style = Unicode('', config=True,
help="The name of a Pygments style to use for syntax highlighting"
)
highlighting_style_overrides = Dict(config=True,
help="Override highlighting format for specific tokens"
)
true_color = Bool(False, config=True,
help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
"If your terminal supports true color, the following command "
"should print 'TRUECOLOR' in orange: "
"printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")
)
history_load_length = Integer(1000, config=True,
help="How many history items to load into memory"
)
banner = Unicode('Jupyter console {version}\n\n{kernel_banner}', config=True,
help=("Text to display before the first prompt. Will be formatted with "
"variables {version} and {kernel_banner}.")
)
kernel_timeout = Float(60, config=True,
help="""Timeout for giving up on a kernel (in seconds).
On first connect and restart, the console tests whether the
kernel is running and responsive by sending kernel_info_requests.
This sets the timeout in seconds for how long the kernel can take
before being presumed dead.
"""
)
image_handler = Enum(('PIL', 'stream', 'tempfile', 'callable'),
'PIL', config=True, allow_none=True, help=
"""
Handler for image type output. This is useful, for example,
when connecting to the kernel in which pylab inline backend is
activated. There are four handlers defined. 'PIL': Use
Python Imaging Library to popup image; 'stream': Use an
external program to show the image. Image will be fed into
the STDIN of the program. You will need to configure
`stream_image_handler`; 'tempfile': Use an external program to
show the image. Image will be saved in a temporally file and
the program is called with the temporally file. You will need
to configure `tempfile_image_handler`; 'callable': You can set
any Python callable which is called with the image data. You
will need to configure `callable_image_handler`.
"""
)
stream_image_handler = List(config=True, help=
"""
Command to invoke an image viewer program when you are using
'stream' image handler. This option is a list of string where
the first element is the command itself and reminders are the
options for the command. Raw image data is given as STDIN to
the program.
"""
)
tempfile_image_handler = List(config=True, help=
"""
Command to invoke an image viewer program when you are using
'tempfile' image handler. This option is a list of string
where the first element is the command itself and reminders
are the options for the command. You can use {file} and
{format} in the string to represent the location of the
generated image file and image format.
"""
)
callable_image_handler = Any(config=True, help=
"""
Callable object called via 'callable' image handler with one
argument, `data`, which is `msg["content"]["data"]` where
`msg` is the message from iopub channel. For exmaple, you can
find base64 encoded PNG data as `data['image/png']`. If your function
can't handle the data supplied, it should return `False` to indicate
this.
"""
)
mime_preference = List(
default_value=['image/png', 'image/jpeg', 'image/svg+xml'],
config=True, help=
"""
Preferred object representation MIME type in order. First
matched MIME type will be used.
"""
)
use_kernel_is_complete = Bool(True, config=True,
help="""Whether to use the kernel's is_complete message
handling. If False, then the frontend will use its
own is_complete handler.
"""
)
kernel_is_complete_timeout = Float(1, config=True,
help="""Timeout (in seconds) for giving up on a kernel's is_complete
response.
If the kernel does not respond at any point within this time,
the kernel will no longer be asked if code is complete, and the
console will default to the built-in is_complete test.
"""
)
confirm_exit = Bool(True, config=True,
help="""Set to display confirmation dialog on exit.
You can always use 'exit' or 'quit', to force a
direct exit without any confirmation.
"""
)
manager = Instance('jupyter_client.KernelManager', allow_none=True)
client = Instance('jupyter_client.KernelClient', allow_none=True)
def _client_changed(self, name, old, new):
self.session_id = new.session.session
session_id = Unicode()
def _banner1_default(self):
return "Jupyter Console {version}\n".format(version=__version__)
simple_prompt = Bool(False,
help="""Use simple fallback prompt. Features may be limited."""
).tag(config=True)
def __init__(self, **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(ZMQTerminalInteractiveShell, self).__init__(**kwargs)
self.configurables = [self]
self.init_history()
self.init_completer()
self.init_io()
self.init_kernel_info()
self.init_prompt_toolkit_cli()
self.keep_running = True
self.execution_count = 1
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
self.Completer = ZMQCompleter(self, self.client, config=self.config)
def init_history(self):
"""Sets up the command history. """
self.history_manager = ZMQHistoryManager(client=self.client)
self.configurables.append(self.history_manager)
def get_prompt_tokens(self, cli):
return [
(Token.Prompt, 'In ['),
(Token.PromptNum, str(self.execution_count)),
(Token.Prompt, ']: '),
]
def get_continuation_tokens(self, cli, width):
return [
(Token.Prompt, (' ' * (width - 2)) + ': '),
]
def get_out_prompt_tokens(self):
return [
(Token.OutPrompt, 'Out['),
(Token.OutPromptNum, str(self.execution_count)),
(Token.OutPrompt, ']: ')
]
def print_out_prompt(self):
self.pt_cli.print_tokens(self.get_out_prompt_tokens())
kernel_info = {}
def init_kernel_info(self):
"""Wait for a kernel to be ready, and store kernel info"""
timeout = self.kernel_timeout
tic = time.time()
self.client.hb_channel.unpause()
msg_id = self.client.kernel_info()
while True:
try:
reply = self.client.get_shell_msg(timeout=1)
except Empty:
if (time.time() - tic) > timeout:
raise RuntimeError("Kernel didn't respond to kernel_info_request")
else:
if reply['parent_header'].get('msg_id') == msg_id:
self.kernel_info = reply['content']
return
def show_banner(self):
print(self.banner.format(version=__version__,
kernel_banner=self.kernel_info.get('banner', '')))
def init_prompt_toolkit_cli(self):
if self.simple_prompt or ('JUPYTER_CONSOLE_TEST' in os.environ):
# Simple restricted interface for tests so we can find prompts with
# pexpect. Multi-line input not supported.
def prompt():
return cast_unicode_py2(input('In [%d]: ' % self.execution_count))
self.prompt_for_code = prompt
self.print_out_prompt = \
lambda: print('Out[%d]: ' % self.execution_count, end='')
return
kbmanager = KeyBindingManager.for_prompt()
insert_mode = ViInsertMode() | EmacsInsertMode()
# Ctrl+J == Enter, seemingly
@kbmanager.registry.add_binding(Keys.ControlJ,
filter=(HasFocus(DEFAULT_BUFFER)
& ~HasSelection()
& insert_mode
))
def _(event):
b = event.current_buffer
d = b.document
if not (d.on_last_line or d.cursor_position_row >= d.line_count
- d.empty_line_count_at_the_end()):
b.newline()
return
more, indent = self.check_complete(d.text)
if (not more) and b.accept_action.is_returnable:
b.accept_action.validate_and_handle(event.cli, b)
else:
b.insert_text('\n' + indent)
@kbmanager.registry.add_binding(Keys.ControlC, filter=HasFocus(DEFAULT_BUFFER))
def _(event):
event.current_buffer.reset()
# Pre-populate history from IPython's history database
history = InMemoryHistory()
last_cell = u""
for _, _, cell in self.history_manager.get_tail(self.history_load_length,
include_latest=True):
# Ignore blank lines and consecutive duplicates
cell = cell.rstrip()
if cell and (cell != last_cell):
history.append(cell)
style_overrides = {
Token.Prompt: '#009900',
Token.PromptNum: '#00ff00 bold',
Token.OutPrompt: '#ff2200',
Token.OutPromptNum: '#ff0000 bold',
}
if self.highlighting_style:
style_cls = get_style_by_name(self.highlighting_style)
else:
style_cls = get_style_by_name('default')
# The default theme needs to be visible on both a dark background
# and a light background, because we can't tell what the terminal
# looks like. These tweaks to the default theme help with that.
style_overrides.update({
Token.Number: '#007700',
Token.Operator: 'noinherit',
Token.String: '#BB6622',
Token.Name.Function: '#2080D0',
Token.Name.Class: 'bold #2080D0',
Token.Name.Namespace: 'bold #2080D0',
})
style_overrides.update(self.highlighting_style_overrides)
style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls,
style_dict=style_overrides)
editing_mode = getattr(EditingMode, self.editing_mode.upper())
langinfo = self.kernel_info.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', 'text'))
app = create_prompt_application(multiline=True,
editing_mode=editing_mode,
lexer=PygmentsLexer(get_pygments_lexer(lexer)),
get_prompt_tokens=self.get_prompt_tokens,
get_continuation_tokens=self.get_continuation_tokens,
key_bindings_registry=kbmanager.registry,
history=history,
completer=JupyterPTCompleter(self.Completer),
enable_history_search=True,
style=style,
)
self._eventloop = create_eventloop()
self.pt_cli = CommandLineInterface(app,
eventloop=self._eventloop,
output=create_output(true_color=self.true_color),
)
def prompt_for_code(self):
document = self.pt_cli.run(pre_run=self.pre_prompt,
reset_current_buffer=True)
return document.text
def init_io(self):
if sys.platform not in {'win32', 'cli'}:
return
import colorama
colorama.init()
def check_complete(self, code):
if self.use_kernel_is_complete:
msg_id = self.client.is_complete(code)
try:
return self.handle_is_complete_reply(msg_id, timeout=self.kernel_is_complete_timeout)
except SyntaxError:
return False, ""
else:
lines = code.splitlines()
if len(lines):
more = (lines[-1] != "")
return more, ""
else:
return False, ""
def ask_exit(self):
self.keep_running = False
# This is set from payloads in handle_execute_reply
next_input = None
def pre_prompt(self):
if self.next_input:
b = self.pt_cli.application.buffer
b.text = cast_unicode_py2(self.next_input)
self.next_input = None
# Move the cursor to the end
b.cursor_position += b.document.get_end_of_document_position()
def interact(self, display_banner=None):
while self.keep_running:
print('\n', end='')
try:
code = self.prompt_for_code()
except EOFError:
if (not self.confirm_exit) \
or ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
self.ask_exit()
else:
if code:
self.run_cell(code, store_history=True)
def mainloop(self):
self.keepkernel = False
# An extra layer of protection in case someone mashing Ctrl-C breaks
# out of our internal code.
while True:
try:
self.interact()
break
except KeyboardInterrupt:
print("\nKeyboardInterrupt escaped interact()\n")
if self._eventloop:
self._eventloop.close()
if self.keepkernel and not self.own_kernel:
print('keeping kernel alive')
elif self.keepkernel and self.own_kernel :
print("owning kernel, cannot keep it alive")
self.client.shutdown()
else :
print("Shutting down kernel")
self.client.shutdown()
def run_cell(self, cell, store_history=True):
"""Run a complete IPython cell.
Parameters
----------
cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
"""
if (not cell) or cell.isspace():
# pressing enter flushes any pending display
self.handle_iopub()
return
# flush stale replies, which could have been ignored, due to missed heartbeats
while self.client.shell_channel.msg_ready():
self.client.shell_channel.get_msg()
# execute takes 'hidden', which is the inverse of store_hist
msg_id = self.client.execute(cell, not store_history)
# first thing is wait for any side effects (output, stdin, etc.)
self._executing = True
self._execution_state = "busy"
while self._execution_state != 'idle' and self.client.is_alive():
try:
self.handle_input_request(msg_id, timeout=0.05)
except Empty:
# display intermediate print statements, etc.
self.handle_iopub(msg_id)
except ZMQError as e:
# Carry on if polling was interrupted by a signal
if e.errno != errno.EINTR:
raise
# after all of that is done, wait for the execute reply
while self.client.is_alive():
try:
self.handle_execute_reply(msg_id, timeout=0.05)
except Empty:
pass
else:
break
self._executing = False
#-----------------
# message handlers
#-----------------
def handle_execute_reply(self, msg_id, timeout=None):
msg = self.client.shell_channel.get_msg(block=False, timeout=timeout)
if msg["parent_header"].get("msg_id", None) == msg_id:
self.handle_iopub(msg_id)
content = msg["content"]
status = content['status']
if status == 'aborted':
self.write('Aborted\n')
return
elif status == 'ok':
# handle payloads
for item in content.get("payload", []):
source = item['source']
if source == 'page':
page.page(item['data']['text/plain'])
elif source == 'set_next_input':
self.next_input = item['text']
elif source == 'ask_exit':
self.keepkernel = item.get('keepkernel', False)
self.ask_exit()
elif status == 'error':
pass
self.execution_count = int(content["execution_count"] + 1)
def handle_is_complete_reply(self, msg_id, timeout=None):
"""
Wait for a repsonse from the kernel, and return two values:
more? - (boolean) should the frontend ask for more input
indent - an indent string to prefix the input
Overloaded methods may want to examine the comeplete source. Its is
in the self._source_lines_buffered list.
"""
## Get the is_complete response:
msg = None
try:
msg = self.client.shell_channel.get_msg(block=True, timeout=timeout)
except Empty:
warn('The kernel did not respond to an is_complete_request. '
'Setting `use_kernel_is_complete` to False.')
self.use_kernel_is_complete = False
return False, ""
## Handle response:
if msg["parent_header"].get("msg_id", None) != msg_id:
warn('The kernel did not respond properly to an is_complete_request: %s.' % str(msg))
return False, ""
else:
status = msg["content"].get("status", None)
indent = msg["content"].get("indent", "")
## Return more? and indent string
if status == "complete":
return False, indent
elif status == "incomplete":
return True, indent
elif status == "invalid":
raise SyntaxError()
elif status == "unknown":
return False, indent
else:
warn('The kernel sent an invalid is_complete_reply status: "%s".' % status)
return False, indent
include_other_output = Bool(False, config=True,
help="""Whether to include output from clients
other than this one sharing the same kernel.
Outputs are not displayed until enter is pressed.
"""
)
other_output_prefix = Unicode("[remote] ", config=True,
help="""Prefix to add to outputs coming from clients other than this one.
Only relevant if include_other_output is True.
"""
)
def from_here(self, msg):
"""Return whether a message is from this session"""
return msg['parent_header'].get("session", self.session_id) == self.session_id
def include_output(self, msg):
"""Return whether we should include a given output message"""
from_here = self.from_here(msg)
if msg['msg_type'] == 'execute_input':
# only echo inputs not from here
return self.include_other_output and not from_here
if self.include_other_output:
return True
else:
return from_here
def handle_iopub(self, msg_id=''):
"""Process messages on the IOPub channel
This method consumes and processes messages on the IOPub channel,
such as stdout, stderr, execute_result and status.
It only displays output that is caused by this session.
"""
while self.client.iopub_channel.msg_ready():
sub_msg = self.client.iopub_channel.get_msg()
msg_type = sub_msg['header']['msg_type']
parent = sub_msg["parent_header"]
if self.include_output(sub_msg):
if msg_type == 'status':
self._execution_state = sub_msg["content"]["execution_state"]
elif msg_type == 'stream':
if sub_msg["content"]["name"] == "stdout":
if self._pending_clearoutput:
print("\r", end="")
self._pending_clearoutput = False
print(sub_msg["content"]["text"], end="")
sys.stdout.flush()
elif sub_msg["content"]["name"] == "stderr":
if self._pending_clearoutput:
print("\r", file=sys.stderr, end="")
self._pending_clearoutput = False
print(sub_msg["content"]["text"], file=sys.stderr, end="")
sys.stderr.flush()
elif msg_type == 'execute_result':
if self._pending_clearoutput:
print("\r", end="")
self._pending_clearoutput = False
self.execution_count = int(sub_msg["content"]["execution_count"])
if not self.from_here(sub_msg):
sys.stdout.write(self.other_output_prefix)
format_dict = sub_msg["content"]["data"]
self.handle_rich_data(format_dict)
if 'text/plain' not in format_dict:
continue
# prompt_toolkit writes the prompt at a slightly lower level,
# so flush streams first to ensure correct ordering.
sys.stdout.flush()
sys.stderr.flush()
self.print_out_prompt()
text_repr = format_dict['text/plain']
if '\n' in text_repr:
# For multi-line results, start a new line after prompt
print()
print(text_repr)
elif msg_type == 'display_data':
data = sub_msg["content"]["data"]
handled = self.handle_rich_data(data)
if not handled:
if not self.from_here(sub_msg):
sys.stdout.write(self.other_output_prefix)
# if it was an image, we handled it by now
if 'text/plain' in data:
print(data['text/plain'])
elif msg_type == 'execute_input':
content = sub_msg['content']
if not self.from_here(sub_msg):
sys.stdout.write(self.other_output_prefix)
sys.stdout.write('In [{}]: '.format(content['execution_count']))
sys.stdout.write(content['code']+'\n')
elif msg_type == 'clear_output':
if sub_msg["content"]["wait"]:
self._pending_clearoutput = True
else:
print("\r", end="")
elif msg_type == 'error':
for frame in sub_msg["content"]["traceback"]:
print(frame, file=sys.stderr)
_imagemime = {
'image/png': 'png',
'image/jpeg': 'jpeg',
'image/svg+xml': 'svg',
}
def handle_rich_data(self, data):
for mime in self.mime_preference:
if mime in data and mime in self._imagemime:
if self.handle_image(data, mime):
return True
return False
def handle_image(self, data, mime):
handler = getattr(
self, 'handle_image_{0}'.format(self.image_handler), None)
if handler:
return handler(data, mime)
def handle_image_PIL(self, data, mime):
if mime not in ('image/png', 'image/jpeg'):
return False
try:
from PIL import Image, ImageShow
except ImportError:
return False
raw = base64.decodestring(data[mime].encode('ascii'))
img = Image.open(BytesIO(raw))
return ImageShow.show(img)
def handle_image_stream(self, data, mime):
raw = base64.decodestring(data[mime].encode('ascii'))
imageformat = self._imagemime[mime]
fmt = dict(format=imageformat)
args = [s.format(**fmt) for s in self.stream_image_handler]
with open(os.devnull, 'w') as devnull:
proc = subprocess.Popen(
args, stdin=subprocess.PIPE,
stdout=devnull, stderr=devnull)
proc.communicate(raw)
return (proc.returncode == 0)
def handle_image_tempfile(self, data, mime):
raw = base64.decodestring(data[mime].encode('ascii'))
imageformat = self._imagemime[mime]
filename = 'tmp.{0}'.format(imageformat)
with NamedFileInTemporaryDirectory(filename) as f, \
open(os.devnull, 'w') as devnull:
f.write(raw)
f.flush()
fmt = dict(file=f.name, format=imageformat)
args = [s.format(**fmt) for s in self.tempfile_image_handler]
rc = subprocess.call(args, stdout=devnull, stderr=devnull)
return (rc == 0)
def handle_image_callable(self, data, mime):
res = self.callable_image_handler(data)
if res is not False:
# If handler func returns e.g. None, assume it has handled the data.
res = True
return res
def handle_input_request(self, msg_id, timeout=0.1):
""" Method to capture raw_input
"""
req = self.client.stdin_channel.get_msg(timeout=timeout)
# in case any iopub came while we were waiting:
self.handle_iopub(msg_id)
if msg_id == req["parent_header"].get("msg_id"):
# wrap SIGINT handler
real_handler = signal.getsignal(signal.SIGINT)
def double_int(sig,frame):
# call real handler (forwards sigint to kernel),
# then raise local interrupt, stopping local raw_input
real_handler(sig,frame)
raise KeyboardInterrupt
signal.signal(signal.SIGINT, double_int)
content = req['content']
read = getpass if content.get('password', False) else input
try:
raw_data = read(content["prompt"])
except EOFError:
# turn EOFError into EOF character
raw_data = '\x04'
except KeyboardInterrupt:
sys.stdout.write('\n')
return
finally:
# restore SIGINT handler
signal.signal(signal.SIGINT, real_handler)
# only send stdin reply if there *was not* another request
# or execution finished while we were reading.
if not (self.client.stdin_channel.msg_ready() or self.client.shell_channel.msg_ready()):
self.client.input(raw_data)
|
the-stack_106_31796 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import ctypes
from .logging import get_logger
_logger = get_logger(__name__)
if sys.platform == 'darwin':
name = 'libzbar.dylib'
elif sys.platform in ('windows', 'win32'):
name = 'libzbar-0.dll'
else:
name = 'libzbar.so.0'
try:
libzbar = ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), name))
except BaseException as e1:
try:
libzbar = ctypes.cdll.LoadLibrary(name)
except BaseException as e2:
libzbar = None
if sys.platform != 'darwin':
_logger.error(f"failed to load zbar. exceptions: {[e1,e2]!r}")
def scan_barcode_ctypes(device='', timeout=-1, display=True, threaded=False):
if libzbar is None:
raise RuntimeError("Cannot start QR scanner; zbar not available.")
libzbar.zbar_symbol_get_data.restype = ctypes.c_char_p
libzbar.zbar_processor_create.restype = ctypes.POINTER(ctypes.c_int)
libzbar.zbar_processor_get_results.restype = ctypes.POINTER(ctypes.c_int)
libzbar.zbar_symbol_set_first_symbol.restype = ctypes.POINTER(ctypes.c_int)
# libzbar.zbar_set_verbosity(100) # verbose logs for debugging
proc = libzbar.zbar_processor_create(threaded)
libzbar.zbar_processor_request_size(proc, 640, 480)
if libzbar.zbar_processor_init(proc, device.encode('utf-8'), display) != 0:
raise RuntimeError("Can not start QR scanner; initialization failed.")
libzbar.zbar_processor_set_visible(proc)
if libzbar.zbar_process_one(proc, timeout):
symbols = libzbar.zbar_processor_get_results(proc)
else:
symbols = None
libzbar.zbar_processor_destroy(proc)
if symbols is None:
return
if not libzbar.zbar_symbol_set_get_size(symbols):
return
symbol = libzbar.zbar_symbol_set_first_symbol(symbols)
data = libzbar.zbar_symbol_get_data(symbol)
return data.decode('utf8')
def scan_barcode_osx(*args_ignored, **kwargs_ignored):
import subprocess
# NOTE: This code needs to be modified if the positions of this file changes with respect to the helper app!
# This assumes the built macOS .app bundle which ends up putting the helper app in
# .app/contrib/osx/CalinsQRReader/build/Release/CalinsQRReader.app.
root_ec_dir = os.path.abspath(os.path.dirname(__file__) + "/../")
prog = root_ec_dir + "/" + "contrib/osx/CalinsQRReader/build/Release/CalinsQRReader.app/Contents/MacOS/CalinsQRReader"
if not os.path.exists(prog):
raise RuntimeError("Cannot start QR scanner; helper app not found.")
data = ''
try:
# This will run the "CalinsQRReader" helper app (which also gets bundled with the built .app)
# Just like the zbar implementation -- the main app will hang until the QR window returns a QR code
# (or is closed). Communication with the subprocess is done via stdout.
# See contrib/CalinsQRReader for the helper app source code.
with subprocess.Popen([prog], stdout=subprocess.PIPE) as p:
data = p.stdout.read().decode('utf-8').strip()
return data
except OSError as e:
raise RuntimeError("Cannot start camera helper app; {}".format(e.strerror))
scan_barcode = scan_barcode_osx if sys.platform == 'darwin' else scan_barcode_ctypes
def _find_system_cameras():
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
path = os.path.join(device_root, device, 'name')
try:
with open(path, encoding='utf-8') as f:
name = f.read()
except Exception:
continue
name = name.strip('\n')
devices[name] = os.path.join("/dev", device)
return devices
if __name__ == "__main__":
print(scan_barcode())
|
the-stack_106_31798 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from argparse import Action, ArgumentTypeError, Namespace, _ActionsContainer
from pex import pex_warnings
from pex.argparse import HandleBoolAction
from pex.network_configuration import NetworkConfiguration
from pex.orderedset import OrderedSet
from pex.resolve.resolver_configuration import (
PYPI,
PexRepositoryConfiguration,
PipConfiguration,
ReposConfiguration,
ResolverVersion,
)
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import Union
class _ManylinuxAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = "?"
super(_ManylinuxAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
if option_str.startswith("--no"):
setattr(namespace, self.dest, None)
elif value.startswith("manylinux"):
setattr(namespace, self.dest, value)
else:
raise ArgumentTypeError(
"Please specify a manylinux standard; ie: --manylinux=manylinux1. "
"Given {}".format(value)
)
class _HandleTransitiveAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = 0
super(_HandleTransitiveAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
setattr(namespace, self.dest, option_str == "--transitive")
def register(
parser, # type: _ActionsContainer
include_pex_repository=False, # type: bool
):
# type: (...) -> None
"""Register resolver configuration options with the given parser.
:param parser: The parser to register resolver configuration options with.
:param include_pex_repository: Whether to include the `--pex-repository` option.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"--resolver-version",
dest="resolver_version",
default=default_resolver_configuration.resolver_version,
choices=ResolverVersion.values(),
type=ResolverVersion.for_value,
help=(
"The dependency resolver version to use. Read more at "
"https://pip.pypa.io/en/stable/user_guide/#resolver-changes-2020"
),
)
register_repos_options(parser)
register_network_options(parser)
parser.add_argument(
"--cache-ttl",
metavar="DEPRECATED",
default=None,
type=int,
help="Deprecated: No longer used.",
)
parser.add_argument(
"-H",
"--header",
dest="headers",
metavar="DEPRECATED",
default=None,
type=str,
action="append",
help="Deprecated: No longer used.",
)
if include_pex_repository:
parser.add_argument(
"--pex-repository",
dest="pex_repository",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given PEX file instead of from --index servers or "
"--find-links repos."
),
)
parser.add_argument(
"--pre",
"--no-pre",
dest="allow_prereleases",
default=default_resolver_configuration.allow_prereleases,
action=HandleBoolAction,
help="Whether to include pre-release and development versions of requirements.",
)
parser.add_argument(
"--wheel",
"--binary",
"--no-wheel",
"--no-use-wheel",
"--no-binary",
"--no-use-binary",
dest="allow_wheels",
default=default_resolver_configuration.allow_wheels,
action=HandleBoolAction,
help="Whether to allow binary distributions.",
)
parser.add_argument(
"--build",
"--no-build",
dest="allow_builds",
default=default_resolver_configuration.allow_builds,
action=HandleBoolAction,
help="Whether to allow building of distributions from source.",
)
parser.add_argument(
"--prefer-wheel",
"--prefer-binary",
"--no-prefer-wheel",
"--no-prefer-binary",
dest="prefer_older_binary",
default=default_resolver_configuration.prefer_older_binary,
action=HandleBoolAction,
help=(
"Whether to prefer older binary distributions to newer source distributions (prefer "
"not building wheels)."
),
)
parser.add_argument(
"--force-pep517",
"--use-pep517",
"--no-use-pep517",
dest="use_pep517",
default=default_resolver_configuration.use_pep517,
action=HandleBoolAction,
help=(
"Whether to force use of PEP 517 for building source distributions into wheels ("
"https://www.python.org/dev/peps/pep-0518) or force direct invocation of"
"`setup.py bdist_wheel` (which requires all source distributions have a `setup.py` "
"based build). Defaults to using PEP-517 only when a `pyproject.toml` file is present "
"with a `build-system` section. If PEP-517 is forced (--use-pep517 is passed) and no "
"`pyproject.toml` file is present or one is but does not have a `build-system` section "
"defined, then the build is executed as if a `pyproject.toml` was present with a "
'`build-system` section comprised of `requires = ["setuptools>=40.8.0", "wheel"]` and '
'`build-backend = "setuptools.build_meta:__legacy__"`.'
),
)
parser.add_argument(
"--build-isolation",
"--no-build-isolation",
dest="build_isolation",
default=default_resolver_configuration.build_isolation,
action=HandleBoolAction,
help=(
"Disable `sys.path` isolation when building a modern source distribution. Build "
"dependencies specified by PEP 518 (https://www.python.org/dev/peps/pep-0518) must "
"already be installed on the `sys.path` if this option is used."
),
)
parser.add_argument(
"--transitive",
"--no-transitive",
"--intransitive",
dest="transitive",
default=default_resolver_configuration.transitive,
action=_HandleTransitiveAction,
help="Whether to transitively resolve requirements.",
)
register_max_jobs_option(parser)
def register_repos_options(parser):
# type: (_ActionsContainer) -> None
"""Register repos configuration options with the given parser.
:param parser: The parser to register repos configuration options with.
"""
parser.add_argument(
"--pypi",
"--no-pypi",
"--no-index",
dest="pypi",
action=HandleBoolAction,
default=True,
help="Whether to use PyPI to resolve dependencies.",
)
parser.add_argument(
"-f",
"--find-links",
"--repo",
metavar="PATH/URL",
action="append",
dest="find_links",
type=str,
help="Additional repository path (directory or URL) to look for requirements.",
)
parser.add_argument(
"-i",
"--index",
"--index-url",
metavar="URL",
action="append",
dest="indexes",
type=str,
help="Additional cheeseshop indices to use to satisfy requirements.",
)
def register_network_options(parser):
# type: (_ActionsContainer) -> None
"""Register network configuration options with the given parser.
:param parser: The parser to register network configuration options with.
"""
default_resolver_configuration = PipConfiguration()
default_network_configuration = default_resolver_configuration.network_configuration
parser.add_argument(
"--retries",
default=default_network_configuration.retries,
type=int,
help="Maximum number of retries each connection should attempt.",
)
parser.add_argument(
"--timeout",
metavar="SECS",
default=default_network_configuration.timeout,
type=int,
help="Set the socket timeout in seconds.",
)
parser.add_argument(
"--proxy",
type=str,
default=default_network_configuration.proxy,
help="Specify a proxy in the form http(s)://[user:passwd@]proxy.server:port.",
)
parser.add_argument(
"--cert",
metavar="PATH",
type=str,
default=default_network_configuration.cert,
help="Path to alternate CA bundle.",
)
parser.add_argument(
"--client-cert",
metavar="PATH",
type=str,
default=default_network_configuration.client_cert,
help=(
"Path to an SSL client certificate which should be a single file containing the "
"private key and the certificate in PEM format."
),
)
def register_max_jobs_option(parser):
# type: (_ActionsContainer) -> None
"""Register the max jobs configuration option with the given parser.
:param parser: The parser to register the max job option with.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"-j",
"--jobs",
metavar="JOBS",
dest="max_jobs",
type=int,
default=default_resolver_configuration.max_jobs,
help=(
"The maximum number of parallel jobs to use when resolving, building and "
"installing distributions. You might want to increase the maximum number of "
"parallel jobs to potentially improve the latency of the pex creation process at "
"the expense of other processes on your system."
),
)
class InvalidConfigurationError(Exception):
"""Indicates an invalid resolver configuration."""
def configure(options):
# type: (Namespace) -> Union[PipConfiguration, PexRepositoryConfiguration]
"""Creates a resolver configuration from options registered by `register`.
:param options: The resolver configuration options.
:raise: :class:`InvalidConfigurationError` if the resolver configuration is invalid.
"""
pex_repository = getattr(options, "pex_repository", None)
if pex_repository and (options.indexes or options.find_links):
raise InvalidConfigurationError(
'The "--pex-repository" option cannot be used together with the "--index" or '
'"--find-links" options.'
)
if pex_repository:
return PexRepositoryConfiguration(
pex_repository=pex_repository,
network_configuration=create_network_configuration(options),
transitive=options.transitive,
)
return create_pip_configuration(options)
def create_pip_configuration(options):
# type: (Namespace) -> PipConfiguration
"""Creates a Pip configuration from options registered by `register`.
:param options: The Pip resolver configuration options.
"""
if options.cache_ttl:
pex_warnings.warn("The --cache-ttl option is deprecated and no longer has any effect.")
if options.headers:
pex_warnings.warn("The --header option is deprecated and no longer has any effect.")
repos_configuration = create_repos_configuration(options)
return PipConfiguration(
resolver_version=options.resolver_version,
repos_configuration=repos_configuration,
network_configuration=create_network_configuration(options),
allow_prereleases=options.allow_prereleases,
allow_wheels=options.allow_wheels,
allow_builds=options.allow_builds,
prefer_older_binary=options.prefer_older_binary,
use_pep517=options.use_pep517,
build_isolation=options.build_isolation,
transitive=options.transitive,
max_jobs=get_max_jobs_value(options),
)
def create_repos_configuration(options):
# type: (Namespace) -> ReposConfiguration
"""Creates a repos configuration from options registered by `register_repos_options`.
:param options: The Pip resolver configuration options.
"""
indexes = OrderedSet(
([PYPI] if options.pypi else []) + (options.indexes or [])
) # type: OrderedSet[str]
find_links = OrderedSet(options.find_links or ()) # type: OrderedSet[str]
return ReposConfiguration(indexes=tuple(indexes), find_links=tuple(find_links))
def create_network_configuration(options):
# type: (Namespace) -> NetworkConfiguration
"""Creates a network configuration from options registered by `register_network_options`.
:param options: The Pip resolver configuration options.
"""
return NetworkConfiguration(
retries=options.retries,
timeout=options.timeout,
proxy=options.proxy,
cert=options.cert,
client_cert=options.client_cert,
)
def get_max_jobs_value(options):
# type: (Namespace) -> int
"""Retrieves the max jobs value from the option registered by `register_max_jobs_option`.
:param options: The max jobs configuration option.
"""
return cast(int, options.max_jobs)
|
the-stack_106_31800 | def to_dict(obj, class_key=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = to_dict(v, class_key)
return data
elif hasattr(obj, "_ast"):
return to_dict(obj._ast())
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
return [to_dict(v, class_key) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, to_dict(value, class_key))
for key, value in obj.__dict__.items()
if not callable(value) and not key.startswith('_')])
if class_key is not None and hasattr(obj, "__class__"):
data[class_key] = obj.__class__.__name__
return data
else:
return obj
|
the-stack_106_31802 | import numpy as np
from scipy.linalg import expm
from matplotlib.pylab import *
sigma_x = 0.5*np.r_[[[0, 1],[1, 0]]]
sigma_y = 0.5*np.r_[[[0,-1j],[1j, 0]]]
sigma_z = 0.5*np.r_[[[1, 0],[0, -1]]]
print(sigma_x)
print(sigma_y)
print(sigma_z)
print('commutator test')
print(np.dot(sigma_x,sigma_y) - np.dot(sigma_y,sigma_x))
print(sigma_z * 1j)
print(np.allclose(sigma_z * 1j, np.dot(sigma_x,sigma_y) - np.dot(sigma_y,sigma_x)))
omega = 2. # frequency offset from carrier, Hz
dt = 0.01 # Dwell Time, s
pts = 1024 # Points in FID
T2 = 2. # T2 used for apodization
t = np.r_[0:pts] * dt
coil = sigma_x + 1j*sigma_y # Detection Operator (NMR Coil)
H = 2*np.pi * omega * sigma_z # Calculate Hamiltonian (only Zeeman)
P = expm(1j*H*dt) # Define Propagator
sigma = sigma_x # Initial Density Matrix (After 90-pulse)
M_list = []
for ix in range(pts):
M = np.trace(np.dot(coil,sigma)) # Detect
M_list.append(M) # Append to FID array
sigma = np.dot(np.dot(P,sigma),np.conjugate(P)) # Propagate Density Matrix
M = np.array(M_list)
fid = M*np.exp(-1.*t/T2) # Apply Apodization
f = np.r_[-0.5/dt:0.5/dt:1j*pts] # Calculate frequency
print(f)
spec = np.fft.fftshift(np.fft.fft(fid)) # Fourier Transform
figure('fid')
title('FID')
plot(t, np.real(fid), label = 'real')
plot(t, np.imag(fid), label = 'imag')
legend()
xlabel('Time (s)')
figure('spec')
title('Spectrum')
plot(f,spec)
xlabel('Frequency (Hz)')
show()
|
the-stack_106_31804 |
def permutate(elements):
if len(elements) == 1: return elements
permutations = []
for index, element in enumerate(elements):
for suffix in permutate(elements[:index] + elements[index+1:]):
permutations.append(element + suffix)
return permutations
counter = {}
for permutation in permutate(list('(())')):
word = '(' + "".join(permutation) + ')'
if word not in counter:
counter[word] = 0
counter[word] += 1
print(counter) |
the-stack_106_31807 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Resource(Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have
everything other than required location and tags.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProxyResource, self).__init__(**kwargs)
class AppResource(ProxyResource):
"""App resource payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param properties: Properties of the App resource
:type properties: ~azure.mgmt.appplatform.models.AppResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'AppResourceProperties'},
}
def __init__(self, **kwargs):
super(AppResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class AppResourceProperties(Model):
"""App resource properties payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:param public: Indicates whether the App exposes public endpoint
:type public: bool
:ivar url: URL of the App
:vartype url: str
:ivar provisioning_state: Provisioning state of the App. Possible values
include: 'Succeeded', 'Failed', 'Creating', 'Updating'
:vartype provisioning_state: str or
~azure.mgmt.appplatform.models.AppResourceProvisioningState
:param active_deployment_name: Name of the active deployment of the App
:type active_deployment_name: str
:ivar created_time: Date time when the resource is created
:vartype created_time: datetime
:param temporary_disk: Temporary disk settings
:type temporary_disk: ~azure.mgmt.appplatform.models.TemporaryDisk
:param persistent_disk: Persistent disk settings
:type persistent_disk: ~azure.mgmt.appplatform.models.PersistentDisk
"""
_validation = {
'url': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'public': {'key': 'public', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'active_deployment_name': {'key': 'activeDeploymentName', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'temporary_disk': {'key': 'temporaryDisk', 'type': 'TemporaryDisk'},
'persistent_disk': {'key': 'persistentDisk', 'type': 'PersistentDisk'},
}
def __init__(self, **kwargs):
super(AppResourceProperties, self).__init__(**kwargs)
self.public = kwargs.get('public', None)
self.url = None
self.provisioning_state = None
self.active_deployment_name = kwargs.get('active_deployment_name', None)
self.created_time = None
self.temporary_disk = kwargs.get('temporary_disk', None)
self.persistent_disk = kwargs.get('persistent_disk', None)
class BindingResource(ProxyResource):
"""Binding resource payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param properties: Properties of the Binding resource
:type properties: ~azure.mgmt.appplatform.models.BindingResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BindingResourceProperties'},
}
def __init__(self, **kwargs):
super(BindingResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class BindingResourceProperties(Model):
"""Binding resource properties payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:param resource_name: The name of the bound resource
:type resource_name: str
:param resource_type: The standard Azure resource type of the bound
resource
:type resource_type: str
:param resource_id: The Azure resource id of the bound resource
:type resource_id: str
:param key: The key of the bound resource
:type key: str
:param binding_parameters: Binding parameters of the Binding resource
:type binding_parameters: dict[str, object]
:ivar generated_properties: The generated Spring Boot property file for
this binding. The secret will be deducted.
:vartype generated_properties: str
:ivar created_at: Creation time of the Binding resource
:vartype created_at: str
:ivar updated_at: Update time of the Binding resource
:vartype updated_at: str
"""
_validation = {
'generated_properties': {'readonly': True},
'created_at': {'readonly': True},
'updated_at': {'readonly': True},
}
_attribute_map = {
'resource_name': {'key': 'resourceName', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'binding_parameters': {'key': 'bindingParameters', 'type': '{object}'},
'generated_properties': {'key': 'generatedProperties', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'str'},
'updated_at': {'key': 'updatedAt', 'type': 'str'},
}
def __init__(self, **kwargs):
super(BindingResourceProperties, self).__init__(**kwargs)
self.resource_name = kwargs.get('resource_name', None)
self.resource_type = kwargs.get('resource_type', None)
self.resource_id = kwargs.get('resource_id', None)
self.key = kwargs.get('key', None)
self.binding_parameters = kwargs.get('binding_parameters', None)
self.generated_properties = None
self.created_at = None
self.updated_at = None
class CloudError(Model):
"""An error response from the service.
:param error:
:type error: ~azure.mgmt.appplatform.models.CloudErrorBody
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'CloudErrorBody'},
}
def __init__(self, **kwargs):
super(CloudError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class CloudErrorException(HttpOperationError):
"""Server responsed with exception of type: 'CloudError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args)
class CloudErrorBody(Model):
"""An error response from the service.
:param code: An identifier for the error. Codes are invariant and are
intended to be consumed programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable
for display in a user interface.
:type message: str
:param target: The target of the particular error. For example, the name
of the property in error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.appplatform.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(self, **kwargs):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class ClusterResourceProperties(Model):
"""Service properties payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provisioning_state: Provisioning state of the Service. Possible
values include: 'Creating', 'Updating', 'Deleting', 'Deleted',
'Succeeded', 'Failed', 'Moving', 'Moved', 'MoveFailed'
:vartype provisioning_state: str or
~azure.mgmt.appplatform.models.ProvisioningState
:param config_server_properties: Config server git properties of the
Service
:type config_server_properties:
~azure.mgmt.appplatform.models.ConfigServerProperties
:param trace: Trace properties of the Service
:type trace: ~azure.mgmt.appplatform.models.TraceProperties
:ivar version: Version of the Service
:vartype version: int
:ivar service_id: ServiceInstanceEntity GUID which uniquely identifies a
created resource
:vartype service_id: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'version': {'readonly': True},
'service_id': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'config_server_properties': {'key': 'configServerProperties', 'type': 'ConfigServerProperties'},
'trace': {'key': 'trace', 'type': 'TraceProperties'},
'version': {'key': 'version', 'type': 'int'},
'service_id': {'key': 'serviceId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ClusterResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.config_server_properties = kwargs.get('config_server_properties', None)
self.trace = kwargs.get('trace', None)
self.version = None
self.service_id = None
class ConfigServerGitProperty(Model):
"""Property of git.
All required parameters must be populated in order to send to Azure.
:param repositories: Repositories of git.
:type repositories:
list[~azure.mgmt.appplatform.models.GitPatternRepository]
:param uri: Required. URI of the repository
:type uri: str
:param label: Label of the repository
:type label: str
:param search_paths: Searching path of the repository
:type search_paths: list[str]
:param username: Username of git repository basic auth.
:type username: str
:param password: Password of git repository basic auth.
:type password: str
:param host_key: Public sshKey of git repository.
:type host_key: str
:param host_key_algorithm: SshKey algorithm of git repository.
:type host_key_algorithm: str
:param private_key: Private sshKey algorithm of git repository.
:type private_key: str
:param strict_host_key_checking: Strict host key checking or not.
:type strict_host_key_checking: bool
"""
_validation = {
'uri': {'required': True},
}
_attribute_map = {
'repositories': {'key': 'repositories', 'type': '[GitPatternRepository]'},
'uri': {'key': 'uri', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'search_paths': {'key': 'searchPaths', 'type': '[str]'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'host_key': {'key': 'hostKey', 'type': 'str'},
'host_key_algorithm': {'key': 'hostKeyAlgorithm', 'type': 'str'},
'private_key': {'key': 'privateKey', 'type': 'str'},
'strict_host_key_checking': {'key': 'strictHostKeyChecking', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(ConfigServerGitProperty, self).__init__(**kwargs)
self.repositories = kwargs.get('repositories', None)
self.uri = kwargs.get('uri', None)
self.label = kwargs.get('label', None)
self.search_paths = kwargs.get('search_paths', None)
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
self.host_key = kwargs.get('host_key', None)
self.host_key_algorithm = kwargs.get('host_key_algorithm', None)
self.private_key = kwargs.get('private_key', None)
self.strict_host_key_checking = kwargs.get('strict_host_key_checking', None)
class ConfigServerProperties(Model):
"""Config server git properties payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar state: State of the config server. Possible values include:
'NotAvailable', 'Deleted', 'Failed', 'Succeeded', 'Updating'
:vartype state: str or ~azure.mgmt.appplatform.models.ConfigServerState
:param error: Error when apply config server settings.
:type error: ~azure.mgmt.appplatform.models.Error
:param config_server: Settings of config server.
:type config_server: ~azure.mgmt.appplatform.models.ConfigServerSettings
"""
_validation = {
'state': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'config_server': {'key': 'configServer', 'type': 'ConfigServerSettings'},
}
def __init__(self, **kwargs):
super(ConfigServerProperties, self).__init__(**kwargs)
self.state = None
self.error = kwargs.get('error', None)
self.config_server = kwargs.get('config_server', None)
class ConfigServerSettings(Model):
"""The settings of config server.
:param git_property: Property of git environment.
:type git_property: ~azure.mgmt.appplatform.models.ConfigServerGitProperty
"""
_attribute_map = {
'git_property': {'key': 'gitProperty', 'type': 'ConfigServerGitProperty'},
}
def __init__(self, **kwargs):
super(ConfigServerSettings, self).__init__(**kwargs)
self.git_property = kwargs.get('git_property', None)
class DeploymentInstance(Model):
"""Deployment instance payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Name of the deployment instance
:vartype name: str
:ivar status: Status of the deployment instance
:vartype status: str
:ivar reason: Failed reason of the deployment instance
:vartype reason: str
:ivar discovery_status: Discovery status of the deployment instance
:vartype discovery_status: str
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'reason': {'readonly': True},
'discovery_status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
'discovery_status': {'key': 'discoveryStatus', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DeploymentInstance, self).__init__(**kwargs)
self.name = None
self.status = None
self.reason = None
self.discovery_status = None
class DeploymentResource(ProxyResource):
"""Deployment resource payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param properties: Properties of the Deployment resource
:type properties:
~azure.mgmt.appplatform.models.DeploymentResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentResourceProperties'},
}
def __init__(self, **kwargs):
super(DeploymentResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class DeploymentResourceProperties(Model):
"""Deployment resource properties payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:param source: Uploaded source information of the deployment.
:type source: ~azure.mgmt.appplatform.models.UserSourceInfo
:ivar app_name: App name of the deployment
:vartype app_name: str
:ivar provisioning_state: Provisioning state of the Deployment. Possible
values include: 'Creating', 'Updating', 'Succeeded', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.appplatform.models.DeploymentResourceProvisioningState
:param deployment_settings: Deployment settings of the Deployment
:type deployment_settings:
~azure.mgmt.appplatform.models.DeploymentSettings
:ivar status: Status of the Deployment. Possible values include:
'Unknown', 'Stopped', 'Running', 'Failed', 'Allocating', 'Upgrading',
'Compiling'
:vartype status: str or
~azure.mgmt.appplatform.models.DeploymentResourceStatus
:ivar active: Indicates whether the Deployment is active
:vartype active: bool
:ivar created_time: Date time when the resource is created
:vartype created_time: datetime
:ivar instances: Collection of instances belong to the Deployment
:vartype instances:
list[~azure.mgmt.appplatform.models.DeploymentInstance]
"""
_validation = {
'app_name': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'active': {'readonly': True},
'created_time': {'readonly': True},
'instances': {'readonly': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'UserSourceInfo'},
'app_name': {'key': 'appName', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'deployment_settings': {'key': 'deploymentSettings', 'type': 'DeploymentSettings'},
'status': {'key': 'status', 'type': 'str'},
'active': {'key': 'active', 'type': 'bool'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'instances': {'key': 'instances', 'type': '[DeploymentInstance]'},
}
def __init__(self, **kwargs):
super(DeploymentResourceProperties, self).__init__(**kwargs)
self.source = kwargs.get('source', None)
self.app_name = None
self.provisioning_state = None
self.deployment_settings = kwargs.get('deployment_settings', None)
self.status = None
self.active = None
self.created_time = None
self.instances = None
class DeploymentSettings(Model):
"""Deployment settings payload.
:param cpu: Required CPU. Default value: 1 .
:type cpu: int
:param memory_in_gb: Required Memory size in GB. Default value: 1 .
:type memory_in_gb: int
:param jvm_options: JVM parameter
:type jvm_options: str
:param instance_count: Instance count. Default value: 1 .
:type instance_count: int
:param environment_variables: Collection of environment variables
:type environment_variables: dict[str, str]
:param runtime_version: Runtime version. Possible values include:
'Java_8', 'Java_11'
:type runtime_version: str or
~azure.mgmt.appplatform.models.RuntimeVersion
"""
_validation = {
'cpu': {'maximum': 4, 'minimum': 1},
'memory_in_gb': {'maximum': 8, 'minimum': 1},
'instance_count': {'maximum': 20, 'minimum': 1},
}
_attribute_map = {
'cpu': {'key': 'cpu', 'type': 'int'},
'memory_in_gb': {'key': 'memoryInGB', 'type': 'int'},
'jvm_options': {'key': 'jvmOptions', 'type': 'str'},
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DeploymentSettings, self).__init__(**kwargs)
self.cpu = kwargs.get('cpu', 1)
self.memory_in_gb = kwargs.get('memory_in_gb', 1)
self.jvm_options = kwargs.get('jvm_options', None)
self.instance_count = kwargs.get('instance_count', 1)
self.environment_variables = kwargs.get('environment_variables', None)
self.runtime_version = kwargs.get('runtime_version', None)
class Error(Model):
"""The error code compose of code and message.
:param code: The code of error.
:type code: str
:param message: The message of error.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Error, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class GitPatternRepository(Model):
"""Git repository property payload.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the repository
:type name: str
:param pattern: Collection of pattern of the repository
:type pattern: list[str]
:param uri: Required. URI of the repository
:type uri: str
:param label: Label of the repository
:type label: str
:param search_paths: Searching path of the repository
:type search_paths: list[str]
:param username: Username of git repository basic auth.
:type username: str
:param password: Password of git repository basic auth.
:type password: str
:param host_key: Public sshKey of git repository.
:type host_key: str
:param host_key_algorithm: SshKey algorithm of git repository.
:type host_key_algorithm: str
:param private_key: Private sshKey algorithm of git repository.
:type private_key: str
:param strict_host_key_checking: Strict host key checking or not.
:type strict_host_key_checking: bool
"""
_validation = {
'name': {'required': True},
'uri': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'pattern': {'key': 'pattern', 'type': '[str]'},
'uri': {'key': 'uri', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'search_paths': {'key': 'searchPaths', 'type': '[str]'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'host_key': {'key': 'hostKey', 'type': 'str'},
'host_key_algorithm': {'key': 'hostKeyAlgorithm', 'type': 'str'},
'private_key': {'key': 'privateKey', 'type': 'str'},
'strict_host_key_checking': {'key': 'strictHostKeyChecking', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(GitPatternRepository, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.pattern = kwargs.get('pattern', None)
self.uri = kwargs.get('uri', None)
self.label = kwargs.get('label', None)
self.search_paths = kwargs.get('search_paths', None)
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
self.host_key = kwargs.get('host_key', None)
self.host_key_algorithm = kwargs.get('host_key_algorithm', None)
self.private_key = kwargs.get('private_key', None)
self.strict_host_key_checking = kwargs.get('strict_host_key_checking', None)
class LogFileUrlResponse(Model):
"""Log file URL payload.
All required parameters must be populated in order to send to Azure.
:param url: Required. URL of the log file
:type url: str
"""
_validation = {
'url': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LogFileUrlResponse, self).__init__(**kwargs)
self.url = kwargs.get('url', None)
class LogSpecification(Model):
"""Specifications of the Log for Azure Monitoring.
:param name: Name of the log
:type name: str
:param display_name: Localized friendly display name of the log
:type display_name: str
:param blob_duration: Blob duration of the log
:type blob_duration: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LogSpecification, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.blob_duration = kwargs.get('blob_duration', None)
class MetricDimension(Model):
"""Specifications of the Dimension of metrics.
:param name: Name of the dimension
:type name: str
:param display_name: Localized friendly display name of the dimension
:type display_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(MetricDimension, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
class MetricSpecification(Model):
"""Specifications of the Metrics for Azure Monitoring.
:param name: Name of the metric
:type name: str
:param display_name: Localized friendly display name of the metric
:type display_name: str
:param display_description: Localized friendly description of the metric
:type display_description: str
:param unit: Unit that makes sense for the metric
:type unit: str
:param category: Name of the metric category that the metric belongs to. A
metric can only belong to a single category.
:type category: str
:param aggregation_type: Only provide one value for this field. Valid
values: Average, Minimum, Maximum, Total, Count.
:type aggregation_type: str
:param supported_aggregation_types: Supported aggregation types
:type supported_aggregation_types: list[str]
:param supported_time_grain_types: Supported time grain types
:type supported_time_grain_types: list[str]
:param fill_gap_with_zero: Optional. If set to true, then zero will be
returned for time duration where no metric is emitted/published.
:type fill_gap_with_zero: bool
:param dimensions: Dimensions of the metric
:type dimensions: list[~azure.mgmt.appplatform.models.MetricDimension]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
}
def __init__(self, **kwargs):
super(MetricSpecification, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.display_description = kwargs.get('display_description', None)
self.unit = kwargs.get('unit', None)
self.category = kwargs.get('category', None)
self.aggregation_type = kwargs.get('aggregation_type', None)
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.supported_time_grain_types = kwargs.get('supported_time_grain_types', None)
self.fill_gap_with_zero = kwargs.get('fill_gap_with_zero', None)
self.dimensions = kwargs.get('dimensions', None)
class NameAvailability(Model):
"""Name availability result payload.
:param name_available: Indicates whether the name is available
:type name_available: bool
:param reason: Reason why the name is not available
:type reason: str
:param message: Message why the name is not available
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NameAvailability, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class NameAvailabilityParameters(Model):
"""Name availability parameters payload.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of the resource to check name availability
:type type: str
:param name: Required. Name to be checked
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NameAvailabilityParameters, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.name = kwargs.get('name', None)
class OperationDetail(Model):
"""Operation detail payload.
:param name: Name of the operation
:type name: str
:param data_action: Indicates whether the operation is a data action
:type data_action: bool
:param display: Display of the operation
:type display: ~azure.mgmt.appplatform.models.OperationDisplay
:param origin: Origin of the operation
:type origin: str
:param properties: Properties of the operation
:type properties: ~azure.mgmt.appplatform.models.OperationProperties
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'data_action': {'key': 'dataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'OperationProperties'},
}
def __init__(self, **kwargs):
super(OperationDetail, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.data_action = kwargs.get('data_action', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
class OperationDisplay(Model):
"""Operation display payload.
:param provider: Resource provider of the operation
:type provider: str
:param resource: Resource of the operation
:type resource: str
:param operation: Localized friendly name for the operation
:type operation: str
:param description: Localized friendly description for the operation
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationProperties(Model):
"""Extra Operation properties.
:param service_specification: Service specifications of the operation
:type service_specification:
~azure.mgmt.appplatform.models.ServiceSpecification
"""
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(self, **kwargs):
super(OperationProperties, self).__init__(**kwargs)
self.service_specification = kwargs.get('service_specification', None)
class PersistentDisk(Model):
"""Persistent disk payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:param size_in_gb: Size of the persistent disk in GB
:type size_in_gb: int
:ivar used_in_gb: Size of the used persistent disk in GB
:vartype used_in_gb: int
:param mount_path: Mount path of the persistent disk
:type mount_path: str
"""
_validation = {
'size_in_gb': {'maximum': 50, 'minimum': 0},
'used_in_gb': {'readonly': True, 'maximum': 50, 'minimum': 0},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'used_in_gb': {'key': 'usedInGB', 'type': 'int'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PersistentDisk, self).__init__(**kwargs)
self.size_in_gb = kwargs.get('size_in_gb', None)
self.used_in_gb = None
self.mount_path = kwargs.get('mount_path', None)
class RegenerateTestKeyRequestPayload(Model):
"""Regenerate test key request payload.
All required parameters must be populated in order to send to Azure.
:param key_type: Required. Type of the test key. Possible values include:
'Primary', 'Secondary'
:type key_type: str or ~azure.mgmt.appplatform.models.TestKeyType
"""
_validation = {
'key_type': {'required': True},
}
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RegenerateTestKeyRequestPayload, self).__init__(**kwargs)
self.key_type = kwargs.get('key_type', None)
class ResourceUploadDefinition(Model):
"""Resource upload definition payload.
:param relative_path: Source relative path
:type relative_path: str
:param upload_url: Upload URL
:type upload_url: str
"""
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'upload_url': {'key': 'uploadUrl', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ResourceUploadDefinition, self).__init__(**kwargs)
self.relative_path = kwargs.get('relative_path', None)
self.upload_url = kwargs.get('upload_url', None)
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The GEO location of the resource.
:type location: str
:param tags: Tags of the service which is a list of key value pairs that
describe the resource.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ServiceResource(TrackedResource):
"""Service resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The GEO location of the resource.
:type location: str
:param tags: Tags of the service which is a list of key value pairs that
describe the resource.
:type tags: dict[str, str]
:param properties: Properties of the Service resource
:type properties: ~azure.mgmt.appplatform.models.ClusterResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ClusterResourceProperties'},
}
def __init__(self, **kwargs):
super(ServiceResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ServiceSpecification(Model):
"""Service specification payload.
:param log_specifications: Specifications of the Log for Azure Monitoring
:type log_specifications:
list[~azure.mgmt.appplatform.models.LogSpecification]
:param metric_specifications: Specifications of the Metrics for Azure
Monitoring
:type metric_specifications:
list[~azure.mgmt.appplatform.models.MetricSpecification]
"""
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(self, **kwargs):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = kwargs.get('log_specifications', None)
self.metric_specifications = kwargs.get('metric_specifications', None)
class TemporaryDisk(Model):
"""Temporary disk payload.
:param size_in_gb: Size of the temporary disk in GB
:type size_in_gb: int
:param mount_path: Mount path of the temporary disk
:type mount_path: str
"""
_validation = {
'size_in_gb': {'maximum': 5, 'minimum': 0},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TemporaryDisk, self).__init__(**kwargs)
self.size_in_gb = kwargs.get('size_in_gb', None)
self.mount_path = kwargs.get('mount_path', None)
class TestKeys(Model):
"""Test keys payload.
:param primary_key: Primary key
:type primary_key: str
:param secondary_key: Secondary key
:type secondary_key: str
:param primary_test_endpoint: Primary test endpoint
:type primary_test_endpoint: str
:param secondary_test_endpoint: Secondary test endpoint
:type secondary_test_endpoint: str
:param enabled: Indicates whether the test endpoint feature enabled or not
:type enabled: bool
"""
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'primary_test_endpoint': {'key': 'primaryTestEndpoint', 'type': 'str'},
'secondary_test_endpoint': {'key': 'secondaryTestEndpoint', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(TestKeys, self).__init__(**kwargs)
self.primary_key = kwargs.get('primary_key', None)
self.secondary_key = kwargs.get('secondary_key', None)
self.primary_test_endpoint = kwargs.get('primary_test_endpoint', None)
self.secondary_test_endpoint = kwargs.get('secondary_test_endpoint', None)
self.enabled = kwargs.get('enabled', None)
class TraceProperties(Model):
"""Trace properties payload.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar state: State of the trace proxy. Possible values include:
'NotAvailable', 'Failed', 'Succeeded', 'Updating'
:vartype state: str or ~azure.mgmt.appplatform.models.TraceProxyState
:param error: Error when apply trace proxy changes.
:type error: ~azure.mgmt.appplatform.models.Error
:param enabled: Indicates whether enable the tracing functionality
:type enabled: bool
:param app_insight_instrumentation_key: Target application insight
instrumentation key
:type app_insight_instrumentation_key: str
"""
_validation = {
'state': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'app_insight_instrumentation_key': {'key': 'appInsightInstrumentationKey', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TraceProperties, self).__init__(**kwargs)
self.state = None
self.error = kwargs.get('error', None)
self.enabled = kwargs.get('enabled', None)
self.app_insight_instrumentation_key = kwargs.get('app_insight_instrumentation_key', None)
class UserSourceInfo(Model):
"""Source information for a deployment.
:param type: Type of the source uploaded. Possible values include: 'Jar',
'Source'
:type type: str or ~azure.mgmt.appplatform.models.UserSourceType
:param relative_path: Relative path of the storage which stores the source
:type relative_path: str
:param version: Version of the source
:type version: str
:param artifact_selector: Selector for the artifact to be used for the
deployment for multi-module projects. This should be
the relative path to the target module/project.
:type artifact_selector: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'relative_path': {'key': 'relativePath', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'artifact_selector': {'key': 'artifactSelector', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UserSourceInfo, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.relative_path = kwargs.get('relative_path', None)
self.version = kwargs.get('version', None)
self.artifact_selector = kwargs.get('artifact_selector', None)
|
the-stack_106_31808 | from typing import Optional
from ruptures.base import BaseCost
from ruptures.detection import Binseg
from sklearn.linear_model import LinearRegression
from etna.transforms.change_points_trend import ChangePointsTrendTransform
from etna.transforms.change_points_trend import TDetrendModel
class BinsegTrendTransform(ChangePointsTrendTransform):
"""BinsegTrendTransform uses Binseg model as a change point detection model in ChangePointsTrendTransform transform."""
def __init__(
self,
in_column: str,
detrend_model: TDetrendModel = LinearRegression(),
model: str = "ar",
custom_cost: Optional[BaseCost] = None,
min_size: int = 2,
jump: int = 1,
n_bkps: int = 5,
pen: Optional[float] = None,
epsilon: Optional[float] = None,
):
"""Init BinsegTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
detrend_model:
model to get trend in data
model:
binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None.
custom_cost:
binseg custom cost function
min_size:
minimum segment length necessary to decide it is a stable trend segment
jump:
jump value can speed up computations: if jump==k, the algo will use every k-th value for change points search.
n_bkps:
number of change points to find
pen:
penalty value (>0)
epsilon:
reconstruction budget (>0)
"""
self.model = model
self.custom_cost = custom_cost
self.min_size = min_size
self.jump = jump
self.n_bkps = n_bkps
self.pen = pen
self.epsilon = epsilon
super().__init__(
in_column=in_column,
change_point_model=Binseg(
model=self.model, custom_cost=self.custom_cost, min_size=self.min_size, jump=self.jump
),
detrend_model=detrend_model,
n_bkps=self.n_bkps,
pen=self.pen,
epsilon=self.epsilon,
)
|
the-stack_106_31809 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import atexit
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import grpc
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners.portability import local_job_service
from apache_beam.utils import subprocess_server
from apache_beam.version import __version__ as beam_version
class JobServer(object):
def start(self):
"""Starts this JobServer, returning a grpc service to which to submit jobs.
"""
raise NotImplementedError(type(self))
def stop(self):
"""Stops this job server."""
raise NotImplementedError(type(self))
class ExternalJobServer(JobServer):
def __init__(self, endpoint):
self._endpoint = endpoint
def start(self):
channel = grpc.insecure_channel(self._endpoint)
grpc.channel_ready_future(channel).result()
return beam_job_api_pb2_grpc.JobServiceStub(channel)
def stop(self):
pass
class EmbeddedJobServer(JobServer):
def start(self):
return local_job_service.LocalJobServicer()
def stop(self):
pass
class StopOnExitJobServer(JobServer):
"""Wraps a JobServer such that its stop will automatically be called on exit.
"""
def __init__(self, job_server):
self._lock = threading.Lock()
self._job_server = job_server
self._started = False
def start(self):
with self._lock:
if not self._started:
self._endpoint = self._job_server.start()
self._started = True
atexit.register(self.stop)
signal.signal(signal.SIGINT, self.stop)
return self._endpoint
def stop(self):
with self._lock:
if self._started:
self._job_server.stop()
self._started = False
class SubprocessJobServer(JobServer):
"""An abstract base class for JobServers run as an external process."""
def __init__(self):
self._local_temp_root = None
self._server = None
def subprocess_cmd_and_endpoint(self):
raise NotImplementedError(type(self))
def start(self):
if self._server is None:
self._local_temp_root = tempfile.mkdtemp(prefix='beam-temp')
cmd, endpoint = self.subprocess_cmd_and_endpoint()
port = int(endpoint.split(':')[-1])
self._server = subprocess_server.SubprocessServer(
beam_job_api_pb2_grpc.JobServiceStub, cmd, port=port)
return self._server.start()
def stop(self):
if self._local_temp_root:
shutil.rmtree(self._local_temp_root)
self._local_temp_root = None
return self._server.stop()
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarJobServer(SubprocessJobServer):
MAVEN_REPOSITORY = 'https://repo.maven.apache.org/maven2/org/apache/beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache")
def java_arguments(self, job_port, artifacts_dir):
raise NotImplementedError(type(self))
def path_to_jar(self):
raise NotImplementedError(type(self))
@staticmethod
def path_to_beam_jar(gradle_target):
return subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target)
@staticmethod
def local_jar(url):
return subprocess_server.JavaJarServer.local_jar(url)
def subprocess_cmd_and_endpoint(self):
jar_path = self.local_jar(self.path_to_jar())
artifacts_dir = self.local_temp_dir(prefix='artifacts')
job_port, = subprocess_server.pick_port(None)
return (
['java', '-jar', jar_path] + list(
self.java_arguments(job_port, artifacts_dir)),
'localhost:%s' % job_port)
class DockerizedJobServer(SubprocessJobServer):
"""
Spins up the JobServer in a docker container for local execution.
"""
def __init__(self, job_host="localhost",
job_port=None,
artifact_port=None,
expansion_port=None,
harness_port_range=(8100, 8200),
max_connection_retries=5):
super(DockerizedJobServer, self).__init__()
self.job_host = job_host
self.job_port = job_port
self.expansion_port = expansion_port
self.artifact_port = artifact_port
self.harness_port_range = harness_port_range
self.max_connection_retries = max_connection_retries
def subprocess_cmd_and_endpoint(self):
# TODO This is hardcoded to Flink at the moment but should be changed
job_server_image_name = os.environ['USER'] + \
"-docker-apache.bintray.io/beam/flink-job-server:latest"
docker_path = subprocess.check_output(
['which', 'docker']).strip().decode('utf-8')
cmd = ["docker", "run",
# We mount the docker binary and socket to be able to spin up
# "sibling" containers for the SDK harness.
"-v", ':'.join([docker_path, "/bin/docker"]),
"-v", "/var/run/docker.sock:/var/run/docker.sock"]
self.job_port, self.artifact_port, self.expansion_port = (
subprocess_server.pick_port(
self.job_port, self.artifact_port, self.expansion_port))
args = ['--job-host', self.job_host,
'--job-port', str(self.job_port),
'--artifact-port', str(self.artifact_port),
'--expansion-port', str(self.expansion_port)]
if sys.platform == "darwin":
# Docker-for-Mac doesn't support host networking, so we need to explictly
# publish ports from the Docker container to be able to connect to it.
# Also, all other containers need to be aware that they run Docker-on-Mac
# to connect against the internal Docker-for-Mac address.
cmd += ["-e", "DOCKER_MAC_CONTAINER=1"]
cmd += ["-p", "{}:{}".format(self.job_port, self.job_port)]
cmd += ["-p", "{}:{}".format(self.artifact_port, self.artifact_port)]
cmd += ["-p", "{}:{}".format(self.expansion_port, self.expansion_port)]
cmd += ["-p", "{0}-{1}:{0}-{1}".format(
self.harness_port_range[0], self.harness_port_range[1])]
else:
# This shouldn't be set for MacOS because it detroys port forwardings,
# even though host networking is not supported on MacOS.
cmd.append("--network=host")
cmd.append(job_server_image_name)
return cmd + args, '%s:%s' % (self.job_host, self.job_port)
|
the-stack_106_31810 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
from __future__ import absolute_import, division, print_function
import ctypes
import os
import os.path
import platform
from pywatchman import compat
if os.name == "nt":
def open_file_win(path):
create_file = ctypes.windll.kernel32.CreateFileW
c_path = ctypes.create_unicode_buffer(path)
access = 0
mode = 7 # FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE
disposition = 3 # OPEN_EXISTING
flags = 33554432 # FILE_FLAG_BACKUP_SEMANTICS
h = create_file(c_path, access, mode, 0, disposition, flags, 0)
if h == -1:
raise WindowsError("Failed to open file: " + path)
return h
def get_canonical_filesystem_path(name):
gfpnbh = ctypes.windll.kernel32.GetFinalPathNameByHandleW
close_handle = ctypes.windll.kernel32.CloseHandle
h = open_file_win(name)
try:
gfpnbh = ctypes.windll.kernel32.GetFinalPathNameByHandleW
numwchars = 1024
while True:
buf = ctypes.create_unicode_buffer(numwchars)
result = gfpnbh(h, buf, numwchars, 0)
if result == 0:
raise Exception("unknown error while normalizing path")
# The first four chars are //?/
if result <= numwchars:
path = buf.value[4:].replace("\\", "/")
if compat.PYTHON2:
path = path.encode("utf8")
return path
# Not big enough; the result is the amount we need
numwchars = result + 1
finally:
close_handle(h)
elif platform.system() == "Darwin":
import ctypes.util
F_GETPATH = 50
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
getpath_fcntl = libc.fcntl
getpath_fcntl.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
getpath_fcntl.restype = ctypes.c_int
def get_canonical_filesystem_path(name):
fd = os.open(name, os.O_RDONLY, 0)
try:
numchars = 1024 # MAXPATHLEN
# The kernel caps this routine to MAXPATHLEN, so there is no
# point in over-allocating or trying again with a larger buffer
buf = ctypes.create_string_buffer(numchars)
ctypes.set_errno(0)
result = getpath_fcntl(fd, F_GETPATH, buf)
if result != 0:
raise OSError(ctypes.get_errno())
# buf is a bytes buffer, so normalize it if necessary
ret = buf.value
if isinstance(name, compat.UNICODE):
ret = os.fsdecode(ret)
return ret
finally:
os.close(fd)
else:
def get_canonical_filesystem_path(name):
return os.path.normpath(name)
def norm_relative_path(path):
# TODO: in the future we will standardize on `/` as the
# dir separator so we can remove the replace call from here.
# We do not need to normcase because all of our tests are
# using the appropriate case already, and watchman returns
# paths in the canonical file replace case anyway.
return path.replace("\\", "/")
def norm_absolute_path(path):
# TODO: in the future we will standardize on `/` as the
# dir separator so we can remove the replace call.
return path.replace("\\", "/")
|
the-stack_106_31811 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains the table found on asrank.caida.org.
This table inherits from Generic Table. For more information:
https://github.com/jfuruness/lib_bgp_data/wiki/Generic-Table
"""
__author__ = "Abhinna Adhikari, Justin Furuness"
__credits__ = ["Abhinna Adhikari", "Justin Furuness"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "[email protected]"
__status__ = "Production"
from ...utils.database import Generic_Table
class AS_Rank_Table(Generic_Table):
"""ASRankTable class, inherits from Generic_Table.
For a more in depth explanation see the top of the file.
"""
__slots__ = []
name = 'as_rank'
columns = ['as_rank', 'asn', 'organization', 'country', 'cone_size']
def _create_tables(self):
"""Creates new table if it doesn't already exist. The contents will
be cleared everytime asrank_website_parser is run because information
in the datebase may be out of date.
"""
sql = f"""CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (
as_rank bigint,
asn bigint,
organization varchar (250),
country varchar (2),
cone_size integer
);"""
self.cursor.execute(sql)
def get_top_100_ases(self):
"""Returns top 100 ases by as rank"""
sql = f"""SELECT * FROM {self.name} ORDER BY as_rank LIMIT 100;"""
return [x["asn"] for x in self.execute(sql)]
|
the-stack_106_31812 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from refinery.units import arg, Unit
class group(Unit):
"""
Group incoming chunks into frames of the given size.
"""
def __init__(self, size: arg.number(help='Size of each group; must be at least 2.', bound=(2, None))):
super().__init__(size=size)
def process(self, data):
members = data.temp or ()
if len(members) >= self.args.size:
raise RuntimeError(F'received {len(members) + 1} items in group')
yield data
yield from members
def filter(self, chunks):
members = []
header = None
for chunk in chunks:
if not chunk.visible:
yield chunk
continue
if len(members) > self.args.size - 2:
yield header
header = None
if header is None:
chunk.temp = members
header = chunk
members.clear()
else:
members.append(chunk)
if header is not None:
yield header
|
the-stack_106_31814 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module to contain interpolation functions."""
import warnings
import iris
import numpy as np
from scipy.interpolate import griddata
from scipy.spatial.qhull import QhullError
from improver import BasePlugin
def interpolate_missing_data(
data, method="linear", limit=None, limit_as_maximum=True, valid_points=None
):
"""
Args:
data (numpy.ndarray):
The field of data to be interpolated across gaps.
method (str):
The method to use to fill in the data. This is usually "linear" for
linear interpolation, and "nearest" for a nearest neighbour
approach. It can take any method available to the method
scipy.interpolate.griddata.
limit (numpy.ndarray):
The array containing limits for each grid point that are
imposed on any value in the region that has been interpolated.
limit_as_maximum (bool):
If True the test against the limit array is that if the
interpolated values exceed the limit they should be set to the
limit value. If False, the test is whether the interpolated values
fall below the limit value.
valid_points (numpy.ndarray):
A boolean array that allows a subset of the unmasked data to be
chosen as source data for the interpolation process. True values
in this array mark points that can be used for interpolation if
they are not otherwise invalid. False values mark points that
should not be used, even if they are otherwise valid data points.
Returns:
numpy.ndarray:
The original data plus interpolated data in masked regions where it
was possible to fill these in.
"""
if valid_points is None:
valid_points = np.full_like(data, True, dtype=np.bool)
# Interpolate linearly across the remaining points
index = ~np.isnan(data)
index_valid_data = valid_points[index]
index[index] = index_valid_data
data_filled = data
if np.any(index):
ynum, xnum = data.shape
(y_points, x_points) = np.mgrid[0:ynum, 0:xnum]
values = data[index]
try:
data_updated = griddata(
np.where(index), values, (y_points, x_points), method=method
)
except QhullError:
data_filled = data
else:
data_filled = data_updated
if limit is not None:
index = ~np.isfinite(data) & np.isfinite(data_filled)
if limit_as_maximum:
data_filled[index] = np.clip(data_filled[index], None, limit[index])
else:
data_filled[index] = np.clip(data_filled[index], limit[index], None)
index = ~np.isfinite(data)
data[index] = data_filled[index]
return data
class InterpolateUsingDifference(BasePlugin):
"""
Uses interpolation to fill masked regions in the data contained within the
input cube. This is achieved by calculating the difference between the
input cube and a complete (i.e. complete across the whole domain) reference
cube. The difference between the data in regions where they overlap is
calculated and this difference field is then interpolated across the
domain. Any masked regions in the input cube data are then filled with data
calculated as the reference cube data minus the interpolated difference
field.
"""
def __repr__(self):
"""String representation of plugin."""
return "<InterpolateUsingDifference>"
@staticmethod
def _check_inputs(cube, reference_cube, limit):
"""
Check that the input cubes are compatible and the data is complete or
masked as expected.
"""
if np.isnan(reference_cube.data).any():
raise ValueError(
"The reference cube contains np.nan data indicating that it "
"is not complete across the domain."
)
try:
reference_cube.convert_units(cube.units)
if limit is not None:
limit.convert_units(cube.units)
except ValueError as err:
raise type(err)(
"Reference cube and/or limit do not have units compatible with"
" cube. " + str(err)
)
def process(self, cube, reference_cube, limit=None, limit_as_maximum=True):
"""
Apply plugin to input data.
Args:
cube (iris.cube.Cube):
Cube for which interpolation is required to fill masked
regions.
reference_cube (iris.cube.Cube):
A cube that covers the entire domain that it shares with
cube.
limit (iris.cube.Cube or None):
A cube of limiting values to apply to the cube that is being
filled in. This can be used to ensure that the resulting values
do not fall below / exceed the limiting values; whether the
limit values should be used as a minima or maxima is
determined by the limit_as_maximum option. These values should
be on an x-y grid of the same size as an x-y slice of cube.
limit_as_maximum (bool):
If True the test against the values allowed by the limit array
is that if the interpolated values exceed the limit they should
be set to the limit value. If False, the test is whether the
interpolated values fall below the limit value.
Return:
iris.cube.Cube:
A copy of the input cube in which the missing data has been
populated with values obtained through interpolating the
difference field and subtracting the result from the reference
cube.
Raises:
ValueError: If the reference cube is not complete across the
entire domain.
"""
if not np.ma.is_masked(cube.data):
warnings.warn(
"Input cube unmasked, no data to fill in, returning " "unchanged."
)
return cube
self._check_inputs(cube, reference_cube, limit)
filled_cube = iris.cube.CubeList()
xaxis, yaxis = cube.coord(axis="x"), cube.coord(axis="y")
for cslice, rslice in zip(
cube.slices([yaxis, xaxis]), reference_cube.slices([yaxis, xaxis])
):
invalid_points = cslice.data.mask.copy()
valid_points = ~invalid_points
difference_field = np.subtract(
rslice.data,
cslice.data,
out=np.full(cslice.shape, np.nan),
where=valid_points,
)
interpolated_difference = interpolate_missing_data(
difference_field, valid_points=valid_points
)
# If any invalid points remain in the difference field, use nearest
# neighbour interpolation to fill these with the nearest difference
remain_invalid = np.isnan(interpolated_difference)
if remain_invalid.any():
interpolated_difference = interpolate_missing_data(
difference_field, valid_points=~remain_invalid, method="nearest"
)
result = cslice.copy()
result.data[invalid_points] = (
rslice.data[invalid_points] - interpolated_difference[invalid_points]
)
if limit is not None:
if limit_as_maximum:
result.data[invalid_points] = np.clip(
result.data[invalid_points], None, limit.data[invalid_points]
)
else:
result.data[invalid_points] = np.clip(
result.data[invalid_points], limit.data[invalid_points], None
)
filled_cube.append(result)
return filled_cube.merge_cube()
|
the-stack_106_31815 | """
Code here relates to displaying and string-manipulating header dictionaries created by the ABFheader class.
"""
import os
def show(header):
"""Display the contents of the header in an easy to read format."""
for key in header.keys():
if key.startswith("###"):
print("\n%s"%key)
else:
print("%s = %s"%(key,header[key]))
def html(header,fname):
"""Generate a HTML-formatted document with all header information."""
html="<html><body><code>"
for key in header.keys():
if key.startswith("###"):
key=key.replace("#","").strip()
html+="<br><b style='font-size: 200%%;'>%s</b><br>"%key
else:
html+="%s = %s<br>"%(key,header[key])
html+="</code></html></body>"
with open(fname,'w') as f:
f.write(html)
print("wrote",os.path.abspath(fname))
def markdown(header,fname):
"""Generate a markdown-formatted document with all header information."""
out="# ABF Header Contents\n"
for key in header.keys():
if key.startswith("###"):
key=key.replace("#","").strip()
out+="\n## %s\n"%key
else:
out+="* %s = `%s`\n"%(key,header[key])
with open(fname,'w') as f:
f.write(out)
print("wrote",os.path.abspath(fname)) |
the-stack_106_31816 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from avro import schema
from avro import io
from avro import datafile
DATUM = {
'intField': 12,
'longField': 15234324,
'stringField': u'hey',
'boolField': True,
'floatField': 1234.0,
'doubleField': -1234.0,
'bytesField': '12312adf',
'nullField': None,
'arrayField': [5.0, 0.0, 12.0],
'mapField': {'a': {'label': 'a'}, 'bee': {'label': 'cee'}},
'unionField': 12.0,
'enumField': 'C',
'fixedField': '1019181716151413',
'recordField': {'label': 'blah', 'children': [{'label': 'inner', 'children': []}]},
}
if __name__ == "__main__":
interop_schema = schema.parse(open(sys.argv[1], 'r').read())
writer = open(sys.argv[2], 'wb')
datum_writer = io.DatumWriter()
# NB: not using compression
dfw = datafile.DataFileWriter(writer, datum_writer, interop_schema)
dfw.append(DATUM)
dfw.close()
|
the-stack_106_31818 | """Min temp after, max temp after, count of days"""
import datetime
from scipy.stats import linregress
from pandas.io.sql import read_sql
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.network import Table as NetworkTable
BOOLS = {
'yes': 'Yes, fit linear regression',
'no': 'No, do not plot regression'
}
META = {
'annual_sum_precip': {
'title': 'Annual Precipitation (rain + melted snow)',
'ylabel': 'Precipitation [inches]',
'xlabel': 'Year',
'func': 'sum(precip)',
'month_bounds': '',
'valid_offset': '',
},
'annual_avg_temp': {
'title': 'Annual Average Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg((high+low)/2.)',
'month_bounds': '',
'valid_offset': '',
},
'winter_avg_temp': {
'title': 'Winter [DJF] Average Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg((high+low)/2.)',
'month_bounds': 'and month in (12,1,2)',
'valid_offset': "- '2 month'::interval ",
},
'winter_avg_low': {
'title': 'Winter [DJF] Average Low Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg(low)',
'month_bounds': 'and month in (12,1,2)',
'valid_offset': "- '2 month'::interval ",
},
'winter_avg_high': {
'title': 'Winter [DJF] Average High Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg(high)',
'month_bounds': 'and month in (12,1,2)',
'valid_offset': "- '2 month'::interval ",
},
'summer_avg_temp': {
'title': 'Summer [JJA] Average Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg((high+low)/2.)',
'month_bounds': 'and month in (6,7,8)',
'valid_offset': " ",
},
'summer_avg_high': {
'title': 'Summer [JJA] Average High Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg(high)',
'month_bounds': 'and month in (6,7,8)',
'valid_offset': " ",
},
'summer_avg_low': {
'title': 'Summer [JJA] Average Low Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg(low)',
'month_bounds': 'and month in (6,7,8)',
'valid_offset': " ",
},
'spring_avg_temp': {
'title': 'Spring [MAM] Average Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg((high+low)/2.)',
'month_bounds': 'and month in (3,4,5)',
'valid_offset': " ",
},
'fall_avg_temp': {
'title': 'Fall [SON] Average Temperature',
'ylabel': 'Temperature [F]',
'xlabel': 'Year',
'func': 'avg((high+low)/2.)',
'month_bounds': 'and month in (9,10,11)',
'valid_offset': " ",
},
'frost_free': {
'title': 'Frost Free Days',
'ylabel': 'Days',
'xlabel': 'Year',
'month_bounds': '',
'func': '',
'valid_offset': '',
},
'gdd50': {
'title': 'Growing Degree Days (1 May - 1 Oct) (base=50)',
'ylabel': 'GDD Units [F]',
'xlabel': 'Year',
'func': 'sum(gddxx(50, 86, high, low))',
'month_bounds': 'and month in (5,6,7,8,9)',
'valid_offset': '',
},
'hdd65': {
'title': 'Heating Degree Days (1 Oct - 1 May) (base=65)',
'ylabel': 'HDD Units [F]',
'xlabel': 'Year',
'func': 'sum(hdd65(high,low))',
'month_bounds': 'and month in (10,11,12,1,2,3,4)',
'valid_offset': " - '6 months'::interval ",
},
}
def yearly_plot(ax, ctx):
"""
Make a yearly plot of something
"""
pgconn = get_dbconn('coop', user='nobody')
if ctx['plot_type'] == 'frost_free':
ctx['st'] = ctx['station'][:2]
df = read_sql("""
select fall.year as yr, fall.s - spring.s as data from
(select year, max(extract(doy from day)) as s
from alldata_%(st)s where station = '%(station)s' and
month < 7 and low <= 32 and year >= %(first_year)s and
year <= %(last_year)s GROUP by year) as spring,
(select year, min(extract(doy from day)) as s
from alldata_%(st)s where station = '%(station)s' and
month > 7 and low <= 32 and year >= %(first_year)s and
year <= %(last_year)s GROUP by year) as fall
WHERE spring.year = fall.year ORDER by fall.year ASC
""" % ctx, pgconn)
else:
df = read_sql("""
SELECT extract(year from (day %s)) as yr, %s as data
from alldata_%s WHERE station = '%s'
%s GROUP by yr ORDER by yr ASC
""" % (META[ctx['plot_type']]['valid_offset'],
META[ctx['plot_type']]['func'],
ctx['station'][:2], ctx['station'],
META[ctx['plot_type']]['month_bounds']), pgconn)
df = df[(df['yr'] >= ctx['first_year']) & (df['yr'] <= ctx['last_year'])]
if df.empty:
raise ValueError("no data found, sorry")
ax.plot(df['yr'].values, df['data'].values, 'bo-')
ax.set_title(("%s (%s - %s)\nLocation Name: %s"
) % (META[ctx['plot_type']].get('title', 'TITLE'),
ctx['first_year'], ctx['last_year'],
ctx['nt'].sts[ctx['station']]['name']))
ax.set_xlabel(META[ctx['plot_type']].get('xlabel', 'XLABEL'))
ax.set_ylabel(META[ctx['plot_type']].get('ylabel', 'YLABEL'))
ax.set_xlim(ctx['first_year'] - 1, ctx['last_year'] + 1)
miny = df['data'].min()
maxy = df['data'].max()
ax.set_ylim(miny - ((maxy-miny) / 10.), maxy + ((maxy-miny) / 10.))
ax.grid(True)
if ctx['linregress'] == 'yes':
(slope, intercept, r_value,
_p_value, _std_err) = linregress(df['yr'].values, df['data'].values)
ax.plot(df['yr'].values, slope * df['yr'].values + intercept,
color='#CC6633')
ax.text(ctx['first_year'], maxy, '$R^2$=%.2f' % (r_value ** 2,),
color='#CC6633')
return df
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """Create plots of yearly totals and optionally fit
a linear trendline. Here is a brief description of some of the
available metrics.
<ul>
<li><strong>Frost Free Days</strong>: Number of days each year between
the last spring sub 32F temperature and first fall sub 32F temperature.
</li>
</ul>
"""
pdict = dict()
for varname in META:
pdict[varname] = META[varname]['title']
today = datetime.date.today()
desc['arguments'] = [
dict(type='station', name='station', default='IA0200',
label='Select Station:', network='IACLIMATE'),
dict(type='select', options=pdict, default='frost_free',
label='Which metric to compute', name='plot_type'),
dict(type='year', default=1951, name='first_year',
label='First Year to Plot'),
dict(type='year', default=(today.year - 1), name='last_year',
label='Last Year to Plot'),
dict(type='select', options=BOOLS, name='linregress',
default='no', label='Plot Regression?')
]
return desc
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
ctx['nt'] = NetworkTable(ctx['network'])
fig = plt.figure()
ax = fig.add_subplot(111)
# Transparent background for the plot area
ax.patch.set_alpha(1.0)
df = yearly_plot(ax, ctx)
return fig, df
if __name__ == '__main__':
plotter(dict())
|
the-stack_106_31819 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test diazd aborts if can't disconnect a block.
- Start a single node and generate 3 blocks.
- Delete the undo data.
- Mine a fork that requires disconnecting the tip.
- Verify that diazd AbortNode's.
"""
from test_framework.test_framework import DiazTestFramework
from test_framework.util import wait_until, get_datadir_path, connect_nodes
import os
class AbortNodeTest(DiazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.setup_nodes()
# We'll connect the nodes later
def run_test(self):
self.nodes[0].generate(3)
datadir = get_datadir_path(self.options.tmpdir, 0)
# Deleting the undo file will result in reorg failure
os.unlink(os.path.join(datadir, 'regtest', 'blocks', 'rev00000.dat'))
# Connecting to a node with a more work chain will trigger a reorg
# attempt.
self.nodes[1].generate(3)
with self.nodes[0].assert_debug_log(["Failed to disconnect block"]):
connect_nodes(self.nodes[0], 1)
self.nodes[1].generate(1)
# Check that node0 aborted
self.log.info("Waiting for crash")
wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=60)
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
if __name__ == '__main__':
AbortNodeTest().main()
|
the-stack_106_31820 | """
@author: Thang Nguyen <[email protected]>
"""
import os
import argparse
import shutil
import cv2
import numpy as np
from src.utils import *
import pickle
from src.yolo_net import Yolo
CLASSES = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog",
"horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
"handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
"baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
"wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
"teddy bear", "hair drier", "toothbrush"]
def get_args():
parser = argparse.ArgumentParser("You Only Look Once: Unified, Real-Time Object Detection")
parser.add_argument("--image_size", type=int, default=448, help="The common width and height for all images")
parser.add_argument("--conf_threshold", type=float, default=0.35)
parser.add_argument("--nms_threshold", type=float, default=0.5)
parser.add_argument("--test_set", type=str, default="val")
parser.add_argument("--year", type=str, default="2017", help="The year of dataset (2014 or 2017)")
parser.add_argument("--root_path", type=str, default="data/COCO", help="the root folder of dataset")
parser.add_argument("--pre_trained_model_type", type=str, choices=["model", "params"], default="model")
parser.add_argument("--pre_trained_model_path", type=str, default="trained_models/whole_model_trained_yolo_coco")
parser.add_argument("--output", type=str, default="predictions")
args = parser.parse_args()
return args
def test(opt):
input_image_folder = os.path.join(opt.root_path, "images", "{}{}".format(opt.test_set, opt.year))
anno_path = os.path.join(opt.root_path, "anno_pickle", "COCO_{}{}.pkl".format(opt.test_set, opt.year))
id_list_path = pickle.load(open(anno_path, "rb"))
id_list_path = list(id_list_path.values())
output_folder = os.path.join(opt.output, "COCO_{}{}".format(opt.test_set, opt.year))
colors = pickle.load(open("src/pallete", "rb"))
if os.path.isdir(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
if torch.cuda.is_available():
if opt.pre_trained_model_type == "model":
model = torch.load(opt.pre_trained_model_path)
else:
model = Yolo(80)
model.load_state_dict(torch.load(opt.pre_trained_model_path))
else:
if opt.pre_trained_model_type == "model":
model = torch.load(opt.pre_trained_model_path, map_location=lambda storage, loc: storage)
else:
model = Yolo(80)
model.load_state_dict(torch.load(opt.pre_trained_model_path, map_location=lambda storage, loc: storage))
model.eval()
for id in id_list_path:
image_path = os.path.join(input_image_folder, id["file_name"])
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width = image.shape[:2]
image = cv2.resize(image, (opt.image_size, opt.image_size))
image = np.transpose(np.array(image, dtype=np.float32), (2, 0, 1))
image = image[None, :, :, :]
width_ratio = float(opt.image_size) / width
height_ratio = float(opt.image_size) / height
data = Variable(torch.FloatTensor(image))
if torch.cuda.is_available():
data = data.cuda()
with torch.no_grad():
logits = model(data)
predictions = post_processing(logits, opt.image_size, CLASSES, model.anchors, opt.conf_threshold,
opt.nms_threshold)
if len(predictions) == 0:
continue
else:
predictions = predictions[0]
output_image = cv2.imread(image_path)
for pred in predictions:
xmin = int(max(pred[0] / width_ratio, 0))
ymin = int(max(pred[1] / height_ratio, 0))
xmax = int(min((pred[0] + pred[2]) / width_ratio, width))
ymax = int(min((pred[1] + pred[3]) / height_ratio, height))
color = colors[CLASSES.index(pred[5])]
cv2.rectangle(output_image, (xmin, ymin), (xmax, ymax), color, 2)
text_size = cv2.getTextSize(pred[5] + ' : %.2f' % pred[4], cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
cv2.rectangle(output_image, (xmin, ymin), (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color, -1)
cv2.putText(
output_image, pred[5] + ' : %.2f' % pred[4],
(xmin, ymin + text_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1,
(255, 255, 255), 1)
print("Object: {}, Bounding box: ({},{}) ({},{})".format(pred[5], xmin, xmax, ymin, ymax))
cv2.imwrite("{}/{}_prediction.jpg".format(output_folder, id["file_name"][:-4]), output_image)
if __name__ == "__main__":
opt = get_args()
test(opt)
|
the-stack_106_31821 | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
W60X SDK
"""
from os.path import isfile, isdir, join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
FRAMEWORK_DIR = platform.get_package_dir("framework-wm60x-sdk")
assert isdir(FRAMEWORK_DIR)
mcu = env.BoardConfig().get("build.mcu", "")
board_name = env.subst("$BOARD")
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
def process_standard_library_configuration(cpp_defines):
if "PIO_FRAMEWORK_ARDUINO_STANDARD_LIB" in cpp_defines:
env['LINKFLAGS'].remove("--specs=nano.specs")
if "PIO_FRAMEWORK_ARDUINO_NANOLIB_FLOAT_PRINTF" in cpp_defines:
env.Append(LINKFLAGS=["-u_printf_float"])
if "PIO_FRAMEWORK_ARDUINO_NANOLIB_FLOAT_SCANF" in cpp_defines:
env.Append(LINKFLAGS=["-u_scanf_float"])
# choice for LWIP_141 vs lwip2.0.3 is ommited.
# other flags like COST_DOWN / TLS_COST_DOWN,
# which are present in the original makefiles,
# are not processed in actual code.
def get_arm_math_lib(cpu):
core = board.get("build.cpu")[7:9]
if core == "m4":
return "arm_cortexM4lf_math"
elif core == "m7":
return "arm_cortexM7lfsp_math"
return "arm_cortex%sl_math" % core.upper()
env.Append(
ASFLAGS=["-x", "assembler-with-cpp", "-mabi=aapcs", "-mthumb-interwork"],
CFLAGS=[
"-std=gnu11"
],
CXXFLAGS=[
"-std=gnu++14",
"-fno-threadsafe-statics",
"-fno-rtti",
"-fno-exceptions",
"-fno-use-cxa-atexit"
],
CCFLAGS=[
"-Os", # optimize for size
"-mcpu=%s" % env.BoardConfig().get("build.cpu"),
"-mthumb",
"-mabi=aapcs",
"-march=armv7-m",
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-Wall",
"-nostdlib",
"-fno-builtin",
"--param", "max-inline-insns-single=500"
],
CPPDEFINES=[
("GCC_COMPILE", 1),
("WM_W600", 1),
("TLS_OS_FREERTOS", 1), #from RTOS Makefile
"_IN_ADDR_T_DECLARED", #from toolchain.def
"__MACHINE_ENDIAN_H__",
"_TIMEVAL_DEFINED",
"__INSIDE_CYGWIN_NET__"
],
CPPPATH=[
join(FRAMEWORK_DIR, "Src", "OS", "RTOS", "include"),
# referenced in makefile but actually not existing
#join(FRAMEWORK_DIR, "Src", "Wlan", "Driver"),
#join(FRAMEWORK_DIR, "Src", "Wlan", "Supplicant"),
join(FRAMEWORK_DIR, "Platform", "Boot", "gcc"),
join(FRAMEWORK_DIR, "Platform", "Common", "Params"),
join(FRAMEWORK_DIR, "Platform", "Common", "Task"),
join(FRAMEWORK_DIR, "Platform", "Common", "mem"),
join(FRAMEWORK_DIR, "Platform", "Common", "fwup"),
join(FRAMEWORK_DIR, "Platform", "Common", "utils"),
join(FRAMEWORK_DIR, "Platform", "Common", "crypto"),
join(FRAMEWORK_DIR, "Platform", "Common", "crypto", "symmetric"),
join(FRAMEWORK_DIR, "Platform", "Common", "crypto", "digest"),
join(FRAMEWORK_DIR, "Platform", "Common", "crypto", "math"),
join(FRAMEWORK_DIR, "Platform", "Inc"),
join(FRAMEWORK_DIR, "Platform", "Sys"),
join(FRAMEWORK_DIR, "Src", "App", "wm_atcmd"),
join(FRAMEWORK_DIR, "Src", "App", "matrixssl"),
join(FRAMEWORK_DIR, "Src", "App", "libupnp-1.6.19", "ixml", "inc"),
join(FRAMEWORK_DIR, "Src", "App", "libupnp-1.6.19", "upnp" "inc"),
join(FRAMEWORK_DIR, "Src", "App", "libupnp-1.6.19", "ixml", "include"),
join(FRAMEWORK_DIR, "Src", "App", "libupnp-1.6.19", "threadutil", "include"),
join(FRAMEWORK_DIR, "Src", "App", "libupnp-1.6.19", "upnp", "include"),
join(FRAMEWORK_DIR, "Src", "App", "gmediarender-0.0.6"),
join(FRAMEWORK_DIR, "Src", "App", "web"),
join(FRAMEWORK_DIR, "Src", "App", "OTA"),
join(FRAMEWORK_DIR, "Src", "App", "cloud"),
join(FRAMEWORK_DIR, "Src", "App", "cJSON"),
join(FRAMEWORK_DIR, "Src", "App", "ajtcl-15.04.00a", "inc"),
join(FRAMEWORK_DIR, "Src", "App", "ajtcl-15.04.00a", "target", "winnermicro"),
join(FRAMEWORK_DIR, "Src", "App", "ajtcl-15.04.00a", "external", "sha2"),
join(FRAMEWORK_DIR, "Src", "App", "cJSON"),
join(FRAMEWORK_DIR, "Src", "App", "cloud"),
join(FRAMEWORK_DIR, "Src", "App", "oneshotconfig"),
join(FRAMEWORK_DIR, "Src", "App", "dhcpserver"),
join(FRAMEWORK_DIR, "Src", "App", "dnsserver"),
join(FRAMEWORK_DIR, "Src", "App", "ping"),
join(FRAMEWORK_DIR, "Src", "App", "iperf"),
join(FRAMEWORK_DIR, "Src", "App", "libcoap", "include"),
join(FRAMEWORK_DIR, "Src", "App", "polarssl", "include"),
join(FRAMEWORK_DIR, "Src", "App", "mDNS", "mDNSCore"),
join(FRAMEWORK_DIR, "Src", "App", "mDNS", "mDNSPosix"),
join(FRAMEWORK_DIR, "Src", "App", "mqtt"),
join(FRAMEWORK_DIR, "Src", "App", "easylogger", "inc"),
join(FRAMEWORK_DIR, "Demo"),
join(FRAMEWORK_DIR, "Include"),
join(FRAMEWORK_DIR, "Include", "App"),
join(FRAMEWORK_DIR, "Include", "Net"),
join(FRAMEWORK_DIR, "Include", "WiFi"),
join(FRAMEWORK_DIR, "Include", "OS"),
join(FRAMEWORK_DIR, "Include", "Driver"),
join(FRAMEWORK_DIR, "Include", "Platform"),
join(FRAMEWORK_DIR, "Src", "App", "matrixssl", "core"), # special case: includes "list.h" which also exists in Include/
join(FRAMEWORK_DIR, "Src", "Network", "api2.0.3"),
join(FRAMEWORK_DIR, "Src", "Network", "lwip2.0.3"),
join(FRAMEWORK_DIR, "Src", "Network", "lwip2.0.3", "include"),
join(FRAMEWORK_DIR, "Src", "Network", "lwip2.0.3", "include", "arch"),
join(FRAMEWORK_DIR, "Src", "Network", "lwip2.0.3", "include", "lwip"),
join(FRAMEWORK_DIR, "Src", "Network", "lwip2.0.3", "include", "netif"),
join(FRAMEWORK_DIR, "Src", "App", "libwebsockets-2.1-stable"),
join(FRAMEWORK_DIR, "Src", "App", "httpclient"),
join(FRAMEWORK_DIR, "Src", "App", "lwm2m-wakaama", "core"),
join(FRAMEWORK_DIR, "Src", "App", "lwm2m-wakaama", "core", "er-coap-13"),
join(FRAMEWORK_DIR, "Src", "App", "lwm2m-wakaama", "examples", "shared"),
join(FRAMEWORK_DIR, "Src", "App", "lwm2m-wakaama", "examples")
],
LINKFLAGS=[
"-Os",
"-mthumb",
"-mcpu=%s" % env.BoardConfig().get("build.cpu"),
"--specs=nano.specs",
"-Wl,--gc-sections,--relax",
"-Wl,--check-sections",
"-Wl,--entry=Reset_Handler",
"-Wl,--unresolved-symbols=report-all",
"-Wl,--warn-common",
"-Wl,--defsym=LD_MAX_SIZE=%d" % board.get("upload.maximum_size"),
"-Wl,--defsym=LD_MAX_DATA_SIZE=%d" % board.get(
"upload.maximum_ram_size"),
"-static",
"-nostartfiles"
],
LIBS=[
#get_arm_math_lib(env.BoardConfig().get("build.cpu"))
"c", "m", "gcc", "stdc++", "wlan", "airkiss_log"
],
LIBPATH=[
join(FRAMEWORK_DIR, "Lib", "GNU"),
join(FRAMEWORK_DIR, "Src", "App","oneshotconfig", "lib_gcc")
]
)
#
# Linker requires preprocessing with correct RAM|ROM sizes
#
if not board.get("build.ldscript", ""):
print("Warning! Cannot find linker script for the current target!\n")
env.Replace(LDSCRIPT_PATH=join("ldscripts", "ldscript.ld"))
#
# Process configuration flags
#
cpp_defines = env.Flatten(env.get("CPPDEFINES", []))
process_standard_library_configuration(cpp_defines)
# copy CCFLAGS to ASFLAGS (-x assembler-with-cpp mode)
env.Append(ASFLAGS=env.get("CCFLAGS", [])[:])
env.Append(
LIBSOURCE_DIRS=[
join(FRAMEWORK_DIR, "libraries")
]
)
#
# Target: Build Core Library
#
libs = []
platform_exclude_dirs = [
# not-built source files in crypto stuff
"Common/crypto/digest/md5Matrix.c",
"Common/crypto/digest/sha1Matrix.c",
"Common/crypto/digest/sha256Matrix.c",
"Common/crypto/math/pstm_montgomery_reduce.c",
"Common/crypto/math/pstm_sqr_comba.c",
"Common/crypto/math/pstm.c",
"Common/crypto/symmetric/aesMatrix.c",
"Common/crypto/symmetric/arc4.c",
# bootup files for other compilers
"Boot/armcc",
"Boot/iccarm",
# these files #include ALL .c files --> double definitions; ignore them
"Drivers/wm_driver.c",
"Common/wm_common.c"
]
platform_exclude_dirs_src_filter = " ".join(["-<" + d + ">" for d in platform_exclude_dirs])
# build startup files
env.BuildSources(
join("$BUILD_DIR", "SDKPlatformBoot"),
join(FRAMEWORK_DIR, "Platform"),
src_filter="+<*> " + platform_exclude_dirs_src_filter)
# build RTOS
env.BuildSources(
join("$BUILD_DIR", "SDKRTOS"),
join(FRAMEWORK_DIR, "Src", "OS", "RTOS"),
src_filter="+<*> -<ports/port_m3.c> -<wm_rtos.c>") #exclude port file meant for other compiler
network_exclude_dirs = [
"lwip2.0.3.c",
"lwip2.0.3/apps/*",
"lwip2.0.3/core/timers.c",
"lwip2.0.3/netif/ppp/*",
"lwip2.0.3/netif/lowpan6.c",
"lwip2.0.3/netif/slipif.c",
"lwip2.0.3/core/ipv4/ip_frag.c"
]
network_exclude_dirs_src_filter = " ".join(["-<" + d + ">" for d in network_exclude_dirs])
env.BuildSources(
join("$BUILD_DIR", "SDKNetwork"),
join(FRAMEWORK_DIR, "Src", "Network"),
src_filter="+<*> " + network_exclude_dirs_src_filter)
# Built needed App folders
app_exclude_dirs = [
# excludes for httpclient
"httpclient/wm_http_compile.c",
"httpclient/wm_httpclient_if.c",
# excludes for libwebsockets
"libwebsockets-2.1-stable/alloc.c",
"libwebsockets-2.1-stable/daemonize.c",
"libwebsockets-2.1-stable/extension*",
"libwebsockets-2.1-stable/extension*",
"libwebsockets-2.1-stable/getifaddrs.c",
"libwebsockets-2.1-stable/http2.c",
"libwebsockets-2.1-stable/hpack.c",
"libwebsockets-2.1-stable/lejp.c",
"libwebsockets-2.1-stable/lejp-conf.c",
"libwebsockets-2.1-stable/libev.c",
"libwebsockets-2.1-stable/libuv.c",
"libwebsockets-2.1-stable/lws-plat-esp8266.c",
"libwebsockets-2.1-stable/lws-plat-mbed3.c",
"libwebsockets-2.1-stable/lws-plat-mbed3.cpp",
"libwebsockets-2.1-stable/lws-plat-unix.c",
"libwebsockets-2.1-stable/lws-plat-win.c",
"libwebsockets-2.1-stable/minihuf.c",
"libwebsockets-2.1-stable/minilex.c",
"libwebsockets-2.1-stable/rewrite.c",
"libwebsockets-2.1-stable/server.c",
"libwebsockets-2.1-stable/server-handshake.c",
"libwebsockets-2.1-stable/sha-1.c",
"libwebsockets-2.1-stable/smtp.c",
"libwebsockets-2.1-stable/ssl-http2.c",
"libwebsockets-2.1-stable/ssl-server.c",
# excludes for libcoap
"libcoap/coap_io.c",
"libcoap/uri_libcoap.c",
# excludes for lwm2m-wakaama
"lwm2m-wakaama/examples/bootstrap_server/*",
"lwm2m-wakaama/examples/client/*",
"lwm2m-wakaama/examples/server/*",
"lwm2m-wakaama/examples/shared/dtlsconnection.c",
"lwm2m-wakaama/tests/*",
# excludes for web
"web/fsdata_ap_config.c",
"web/fsdata.c",
# excludes for wm_atmc
"wm_atcmd/wm_cmd.c", # includes all c files.
"wm_atcmd/wm_uart_timer.c",
# excludes for matrixssl
"matrixssl/wm_matrixssl_compile.c"
]
app_exclude_dirs_src_filter = " ".join(["-<" + d + ">" for d in app_exclude_dirs])
env.BuildSources(
join("$BUILD_DIR", "SDKApps"),
join(FRAMEWORK_DIR, "Src", "App"),
src_filter="+<*> " + app_exclude_dirs_src_filter)
#env.Prepend(LIBS=libs)
|
the-stack_106_31822 | from locusts.support import *
from locusts.environment import *
def create_exec_file(id_list, command_template, indir, outdir, output_filename_templates,
exec_filename, shared_inputs=[], inputs_for_clean_environment=[]):
with open(exec_filename, "w") as exec_file:
for ip, idx in enumerate(id_list):
exec_file.write(('c{0}:\t{1}\n'.format(str(ip).zfill(6), command_template.replace("<id>", idx))))
if inputs_for_clean_environment:
inls = ' '.join([indir + x.replace("<id>", idx) for x in inputs_for_clean_environment])
exec_file.write('i{0}:\t{1}\n'.format(str(ip).zfill(6), inls))
if shared_inputs:
shls = ' '.join([x.replace("<id>", idx).replace(":", ":"+indir) for x in shared_inputs])
exec_file.write('s{0}:\t{1}\n'.format(str(ip).zfill(6), shls))
ols = ' '.join([x.replace("<id>", idx) for x in output_filename_templates])
exec_file.write('o{0}:\t{1}\n'.format(str(ip).zfill(6), ols))
def check_remote_path(remote_machine, root_path):
"""Checks existence of parent folder of remote root path
and non-existence of remote root path itself"""
lsdir = subprocess.Popen(["ssh", remote_machine, "ls", root_path], stdout=open('/dev/null', 'w'), stderr=subprocess.PIPE).stderr.read().decode('ascii').strip()
lsdirup = subprocess.Popen(["ssh", remote_machine, "ls", os.path.dirname(root_path[:-1])], stdout=open('/dev/null', 'w'), stderr=subprocess.PIPE).stderr.read().decode('ascii').strip()
# If parent directory of root path is not there, error
if "ls: cannot access" in lsdirup: # If ls on parent directory gives error
print('Failed to create {0}\nPath {1} not present' \
.format(root_path, os.path.dirname(root_path[:-1])))
print(lsdirup)
exit(1)
# If root path already there, error: you must delete it yourself
elif "ls: cannot access" not in lsdir: # If ls on the exec directory does not give error
print(('Exec path {0} is already present in remote location {1}\n'
'No permission to overwrite it, please delete it manually.') \
.format(root_path, remote_machine))
print('ssh {1} rm -rf {0}'.format(root_path, remote_machine))
exit(1)
def generate_exec_filesystem(protocol_triad, cache_dir, job_data, runtime_root_path, batch_job_code,
data_transfer_protocol, env_instr=None, build_envroot=None):
devnull = open('/dev/null', 'w')
# Make sure root_path is in the format "/abspath/to/exec/" or "relpath/to/exec/" (no extra "/" or missing trailing "/")
# NOTICE: root_path denotes where the locusts filesystem root directory will be at execution time
# if protocol == "remote", it is a path on the remote machine!
runtime_root_path = reduceslash(runtime_root_path + "/")
# Define localbuild_root_path
# NOTICE: localbuild_root_path contains the filesystem root directory *during creation*
# It is thus always a local path.
# It can be an absolute path or a relative path starting from the current working dir (from where locusts was called)
protocol, remote_machine, local_shared_folder = protocol_triad
if protocol == "local":
# localbuild_root_path <- real filesystem root
if not os.path.exists(runtime_root_path):
os.mkdir(runtime_root_path)
localbuild_root_path = runtime_root_path = os.path.abspath(runtime_root_path) + '/'
elif protocol == "remote":
# localbuild_root_path <- temporary local path
# WARNING: PAY ATTENTION TO THE FORMAT OF cache_dir!!!
localbuild_root_path = reduceslash(cache_dir + "/" + batch_job_code + "_tmp_root/")
if not os.path.exists(localbuild_root_path):
os.mkdir(localbuild_root_path)
localbuild_root_path = os.path.abspath(localbuild_root_path) + '/'
elif protocol == "remote-sharedfs":
# localbuild_root_path <- local shared folder (real filesystem root)
localbuild_root_path = local_shared_folder
if not os.path.exists(localbuild_root_path):
os.mkdir(localbuild_root_path)
localbuild_root_path = os.path.abspath(localbuild_root_path) + '/'
# If the user provides the instruction file but not the local adress of the environment,
# he does not want to replicate the env (she thinks it is already in place)
# NOTICE: the snapshot will be taken anyway
env_and_do_not_replicate = True if (env_instr and not build_envroot) else False
# Filesystem locations dictionary contains the main locations of the environment
fs_locations = {
"build_root" : localbuild_root_path,
"runtime_root" : runtime_root_path,
"build_shared" : localbuild_root_path + "shared_contents/",
"runtime_shared" : runtime_root_path + "shared_contents/",
"build_exec" : localbuild_root_path + "exec_dir/",
"runtime_exec" : runtime_root_path+ "exec_dir/",
"build_work" : localbuild_root_path + batch_job_code + "/",
"runtime_work" : runtime_root_path + batch_job_code + "/"
}
# Step 1: create local versions of Shared, Exec and Work sub-environments ------------------------
# Create main locations locally
build_shared_path = fs_locations["build_shared"]
if not os.path.exists(build_shared_path):
os.mkdir(build_shared_path)
if not os.path.exists(fs_locations["build_exec"]):
os.mkdir(fs_locations["build_exec"])
if not os.path.exists(fs_locations["build_work"]):
os.mkdir(fs_locations["build_work"])
exec_filesystem = {} # For each task contains remote work path and shared files paths
shared = {}
# Step 2: create Shared sub-filesystem -------------------
for jdi, jd in enumerate(job_data):
batchno = jdi // 10000
exec_filesystem[jd['unique_code']] = [None, {}]
for skey in jd['shared_inps']:
if skey not in shared:
# Create local batch dir, and copy file there
batchisp = (len(shared) + 1)//10000
build_shbatch_folder = build_shared_path + 'batch_' + str(batchisp) + '/' # NOTICE: build_shared_path is local
if not os.path.exists(build_shbatch_folder):
os.mkdir(build_shbatch_folder)
build_shdest_path = build_shbatch_folder + os.path.basename(jd['shared_inps'][skey])
shutil.copyfile(jd['shared_inps'][skey], build_shdest_path)
# Runtime shared destination dir name
runtime_shdest_path = (fs_locations["runtime_shared"]
+ 'batch_' + str(batchisp) + '/' + os.path.basename(jd['shared_inps'][skey]))
shared[skey] = runtime_shdest_path # shared folder path at execution time
else:
runtime_shdest_path = shared[skey]
exec_filesystem[jd['unique_code']][1]["<shared>"+skey] = runtime_shdest_path
if protocol == 'remote' and not env_and_do_not_replicate:
check_remote_path(remote_machine, fs_locations["runtime_root"])
# """
if env_instr:
tr_cmd = ["bash", data_transfer_protocol, fs_locations["build_root"], "{0}:{1}".format(remote_machine, fs_locations["runtime_root"])]
if DEBUG:
print("COMMAND", " ".join(tr_cmd))
p = subprocess.Popen(tr_cmd)
else:
p = subprocess.Popen(tr_cmd,
stderr=devnull, stdout=devnull)
p.wait()
else:
mkd1_cmd = ["ssh", remote_machine, "mkdir", fs_locations["runtime_root"]]
mkd2_cmd = ["ssh", remote_machine, "mkdir", fs_locations["runtime_exec"]]
tr_cmd = ["bash", data_transfer_protocol, fs_locations["build_shared"], "{0}:{1}".format(remote_machine, fs_locations["runtime_shared"])]
if DEBUG:
print("COMMAND", " ".join(mkd1_cmd))
p = subprocess.Popen(mkd1_cmd)
p.wait()
print("COMMAND", " ".join(mkd2_cmd))
p = subprocess.Popen(mkd2_cmd)
p.wait()
print("COMMAND", " ".join(tr_cmd))
p = subprocess.Popen(tr_cmd)
else:
p = subprocess.Popen(
mkd1_cmd,
stderr=devnull, stdout=devnull)
p.wait()
p = subprocess.Popen(
mkd2_cmd,
stderr=devnull, stdout=devnull)
p.wait()
p = subprocess.Popen(
tr_cmd,
stderr=devnull, stdout=devnull)
p.wait()
time.sleep(10)
# """
# Step 3: create Work sub-filesystem: "custom" or "locusts" mode -------------------
if env_instr: # "custom" mode: environment is created following instruction file
# Replace tags in instruction file lines
# Choose replacement whether it is in remote or in local
if protocol == "remote":
mkdircmd = "ssh {0} mkdir".format(remote_machine)
copycmd = "bash {0}".format(data_transfer_protocol)
runtime_envroot_cp = "{0}:{1}".format(remote_machine, fs_locations["runtime_work"])
runtime_envroot_mkdir = fs_locations["runtime_work"]
workpath = fs_locations["runtime_work"]
elif protocol == 'remote-sharedfs':
mkdircmd, copycmd = "mkdir", "cp"
runtime_envroot_cp = fs_locations["runtime_work"]
runtime_envroot_mkdir = fs_locations["runtime_work"]
workpath = fs_locations["runtime_work"]
elif protocol == "local":
workpath = build_envroot
# Read the instruction file, replace tags and execute
workdir, instructions = parse_fs_tree(env_instr, build_envroot)
if protocol != 'local' and not env_and_do_not_replicate:
for instr in instructions:
cmd = instr.replace("<build_envroot>", build_envroot) \
.replace("<mkdir>", mkdircmd) \
.replace("<runtime_envroot_mkdir>", runtime_envroot_mkdir) \
.replace("<copy>", copycmd) \
.replace("<runtime_envroot_cp>", runtime_envroot_cp)
cmdlist = [x for x in cmd.split() if x]
if DEBUG:
print("COMMAND", cmdlist)
p = subprocess.Popen(cmdlist)
else:
p = subprocess.Popen(cmdlist, stdout=devnull, stderr=devnull)
p.wait()
time.sleep(10)
if not workdir:
print(("ERROR (generate_filesystem): Please specify the directory "
"from where to launch the specified commands\nOn top of the "
"filesystem specifications file, add #WORKDIR <path> where "
"<path> is a relative path from the environment root dir"))
exit(1)
# Task files are in the Work sub-filesystem but outside the imported environment
# A cache in the main Work folder is created for containing them
build_cache = fs_locations["build_work"] + '.cache/'
runtime_cache = fs_locations["runtime_work"] + '.cache/'
if os.path.exists(build_cache):
shutil.rmtree(build_cache)
os.mkdir(build_cache)
build_task_dir = build_cache + 'tasks/'
runtime_task_dir = runtime_cache + 'tasks/'
os.mkdir(build_task_dir)
for jdi, jd in enumerate(job_data):
batchno = jdi // 10000
# Create local task folders and copy clean env files
build_batch_folder = build_task_dir + 'batch_' + str(batchno) + '/'
if not os.path.exists(build_batch_folder):
os.mkdir(build_batch_folder)
runtime_batch_folder = runtime_task_dir + 'batch_' + str(batchno) + '/'
build_task_path = build_batch_folder + 'task_' + jd['unique_code'] + '.sh'
runtime_task_path = runtime_batch_folder + 'task_' + jd['unique_code'] + '.sh'
exec_filesystem[jd['unique_code']][0] = (runtime_batch_folder, os.path.basename(runtime_task_path))
# There, create individual task files
# Correctly indent the command, writes it in task.sh and gives it exe privileges
new_command = beautify_bash_oneliner(
"cd {0}; ".format(workdir) + jd['command'],
replacements=exec_filesystem[jd['unique_code']][1])
# print(new_command)
with open(build_task_path, "w") as tf:
tf.write(new_command)
subprocess.call(["chmod", "777", build_task_path])
# This new cache has to be copied remotely, in a separate way
if protocol == 'remote':
tr_cmd = ["bash", data_transfer_protocol, build_cache, "{0}:{1}".format(remote_machine, runtime_cache)]
if DEBUG:
print("COMMAND", tr_cmd)
p = subprocess.Popen(tr_cmd)
else:
p = subprocess.Popen(tr_cmd,
stderr=devnull, stdout=devnull)
p.wait()
else: # "locusts" mode: optimized environment for parallel and safe execution
# Batch folders for every 10000 tasks
for i in range(len(job_data)//10000 + 1):
batch_folder = fs_locations["build_work"] + 'batch_' + str(i) + '/'
if not os.path.exists(batch_folder):
os.mkdir(batch_folder)
# Job folders for individual tasks
for jdi, jd in enumerate(job_data):
batchno = jdi // 10000
# Work sub-environment --------------------------------------------
# Create local task folders and copy clean env files
job_folder = fs_locations["build_work"] + 'batch_' + str(batchno) + '/' + 'task_' + jd['unique_code'] + '/'
if not os.path.exists(job_folder):
os.mkdir(job_folder)
for fpath in jd['clean_env_inps']:
fdest_path = job_folder + os.path.basename(fpath)
shutil.copyfile(fpath, fdest_path)
# There, create task.sh file
# Correctly indent the command, writes it in task.sh and gives it exe privileges
new_command = beautify_bash_oneliner(
jd['command'],
replacements=exec_filesystem[jd['unique_code']][1])
task_filename = job_folder + "task.sh"
with open(task_filename, "w") as tf:
tf.write(new_command)
subprocess.call(["chmod", "777", task_filename])
# Runtime paths for task folders
rem_jf = (fs_locations["runtime_work"] + 'batch_'
+ str(batchno) + '/' + 'task_' + jd['unique_code'] + '/')
exec_filesystem[jd['unique_code']][0] = (rem_jf, "task.sh")
if protocol == "remote":
# Transfer local temporary filesystem in the remote location
tr_cmd = ["bash", data_transfer_protocol, fs_locations["build_work"], "{0}:{1}".format(remote_machine, fs_locations["runtime_work"])]
if DEBUG:
print("COMMAND", tr_cmd)
p = subprocess.Popen(tr_cmd)
else:
p = subprocess.Popen(tr_cmd,
stderr=devnull, stdout=devnull)
p.wait()
return {k : exec_filesystem[k][0] for k in exec_filesystem}, fs_locations
def create_manager_scripts(protocol_triad, cache_dir, task_folders, partition,
cpus_per_node, requested_nodes, batch_job_code, fs_locations,
data_transfer_protocol, singinfo=(None, None, None), task_cd=None,
email_address="", email_type="ALL", tasks_per_core=1,
nodescratch_folder="", nodescratch_mem="", walltime="24:00:00",
outer_statements="", exclusive=False):
protocol, remote_machine, hpc_shared_folder = protocol_triad
devnull = open('/dev/null', 'w')
# Check consistency of parameters
turnon_mailtype, turnon_email_address = "#", "#"
if email_address:
if "@" not in email_address:
print("ERROR (manager): The email address you provided is not valid")
print(" "+email_address)
exit(1)
else:
turnon_mailtype, turnon_email_address = "", ""
if nodescratch_folder and nodescratch_mem:
turnon_nodescratch = ""
elif (not nodescratch_folder) and (not nodescratch_mem):
turnon_nodescratch = "#"
else:
print(("ERROR (manager): -nodescratch_folder and -nodescratch_mem options"
"must be either both present or absent"))
exit(1)
turnon_exclusiveness = "#"
if exclusive:
turnon_exclusiveness = ""
if len(walltime) < 7 or walltime[-3] != ":" or walltime[-6] != ":":
print("ERROR (manager): walltime is not well formatted: XX:XX:XX")
print(" "+walltime)
exit(1)
# The location of the template file for the 1-node-manager script
outer_template_filename = os.path.dirname(os.path.realpath(__file__)) + '/outer_template_manager.txt'
inner_template_filename = os.path.dirname(os.path.realpath(__file__)) + '/inner_template_manager.txt'
# Is there singularity? Does it use a module?
if singinfo[0]:
singularitypath, singularitycont, singmodload = singinfo
else:
singularitypath, singularitycont, singmodload = "", "", ""
# List of task ids
taskid_list = sorted([k for k in task_folders])
# General method for dividing len(taskid_list) elements in requested_nodes
# lists. Ex: 6 jobs in 4 processors = [2,2,1,1]
tasks_per_node_list = distribute_items_in_fixed_length_list(
requested_nodes,
len(taskid_list)
)
# Reassess the number of requested nodes for avoiding asking
# for too many nodes. For requiring another node, all cpus of all
# nodes must have at least 10 tasks assigned to them
# WARNING: this does not keep into account the length of a task!
if protocol != 'local' and min(tasks_per_node_list) < 10*cpus_per_node:
grand = 10*cpus_per_node / max(tasks_per_node_list)
rqn = (
int(requested_nodes / grand)
+ max(tasks_per_node_list) - min(tasks_per_node_list)
)
requested_nodes = max(1, rqn)
tasks_per_node_list = distribute_items_in_fixed_length_list(
requested_nodes,
len(taskid_list)
)
print("Protocol used:", protocol)
print("Actual number of nodes used:", requested_nodes)
print("Each with {0} cpus".format(cpus_per_node))
# Each manager script handles one node
# The manager reads a file of adresses where it will find the clean
# environments to manage, knowing that in each of those folders it will
# find a task.sh executable to be run
tasks_per_job = []
for jobid in range(requested_nodes):
# Define script variables
tasks_per_node = tasks_per_node_list[jobid]
ik = sum(tasks_per_node_list[:jobid])
tfname = 'taskfile_{0}{1}'.format(batch_job_code, str(jobid).zfill(3))
task_filename = fs_locations["runtime_exec"] + '{0}.txt'.format(tfname)
outpath = fs_locations["runtime_exec"] + '{0}.out.txt'.format(tfname)
errpath = fs_locations["runtime_exec"] + '{0}.err.txt'.format(tfname)
# Compiles outer manager (with the SLURM keywords and possibly singularity) and inner manager (the core manager itself)
for prefmng, template_filename in [('outer_', outer_template_filename), ('inner_', inner_template_filename)]:
# Compile using the template
with open(template_filename) as tempf:
text = tempf.read()
text = text.replace('<jobd>', batch_job_code) \
.replace('<jobid>', str(jobid).zfill(3)) \
.replace('<time>', str(walltime)) \
.replace('<cpuspertask>', str(cpus_per_node)) \
.replace('<taskspercore>', str(tasks_per_core)) \
.replace('<turnonnodescratch>', turnon_nodescratch) \
.replace('<nodescratchfolder>', nodescratch_folder) \
.replace('<nodescratchmem>', nodescratch_mem) \
.replace('<turnonmailtype>', turnon_mailtype) \
.replace('<mailtype>', email_type) \
.replace('<turnonemailaddress>', turnon_email_address) \
.replace('<turnonexclusiveness>', turnon_exclusiveness) \
.replace('<emailaddress>', email_address) \
.replace('<outpath>', outpath) \
.replace('<errpath>', errpath) \
.replace('<taskfile>', task_filename) \
.replace('<exedir>', fs_locations["runtime_exec"]) \
.replace('<partition>', partition) \
.replace('<main_path>', fs_locations["runtime_root"]) \
.replace('<extra_outer_statements>', outer_statements) \
.replace('<singularity_module_load>', singmodload) \
.replace('<singularity_command>', "{0} exec {1} ".format(singularitypath, singularitycont)) \
.replace('<inner_manager>', "inner_manager_{0}{1}.slurm".format(batch_job_code, str(jobid).zfill(3)))
# Write manager file and give it exe privilege
manager_filename = (
fs_locations["build_exec"] + prefmng
+ 'manager_{0}{1}.slurm'.format(batch_job_code, str(jobid).zfill(3))
)
with open(manager_filename, 'w') as mf:
mf.write(text)
subprocess.call(["chmod", "777", manager_filename])
# Write task file: the file containing the adresses of the clean env folders
# the manager has to deal with
task_filename = (fs_locations["build_exec"] \
+ 'taskfile_{0}{1}.txt').format(batch_job_code, str(jobid).zfill(3))
with open(task_filename, 'w') as tf:
tf.write('\n'.join(
[(x + '\t' + task_folders[x][0] + '\t' + task_folders[x][1])
for x in taskid_list[ik:ik+tasks_per_node]]
) + '\n')
# Job manager identifier and associated tasks
tasks_per_job.append((jobid, taskid_list[ik:ik+tasks_per_node]))
if protocol == 'remote':
# Copy all files in exe dir in the remote counterpart
filestocopy = fs_locations["build_exec"]+"/"
tr_cmd = ["bash", data_transfer_protocol, filestocopy, "{0}:{1}".format(remote_machine, fs_locations["runtime_exec"])]
if DEBUG:
print("COMMAND", tr_cmd)
p = subprocess.Popen(tr_cmd)
else:
p = subprocess.Popen(tr_cmd,
stderr=devnull, stdout=devnull)
p.wait()
# Remove the local root directory
# p = subprocess.Popen(
# ["rm", "-rf", fs_locations["build_root"]],
# stderr=devnull, stdout=devnull)
# p.wait()
# rmhidden_cmd = ["ssh", remote_machine, "rm", fs_locations["runtime_exec"]+".*"]
# else:
# rmhidden_cmd = ["rm", fs_locations["build_exec"]+".*"]
# It is important to remove any hidden file from the runtime exe dir
# because locusts relies on those files to understand the degree of
# completion
# if DEBUG:
# p = subprocess.Popen(rmhidden_cmd)
# else:
# p = subprocess.Popen(
# rmhidden_cmd, stderr=devnull, stdout=devnull
# )
# p.wait()
# List of 2-tuples (job ID, [tasks IDs])
return tasks_per_job
def remote_job_control(protocol_triad, batch_job_code, fs_locations, tasks_per_job,
waiting_time):
# There must be a passwordless connection between the two machines: to
# achieve it, ssh-agent and then ssh-add.
# If the two machines share a folder - and thus there is no need to ssh to
# the remote machine just for checking - it must be stated here
protocol, remote_machine, hpc_shared_folder = protocol_triad
devnull = open('/dev/null', 'w')
if protocol != 'local':
for job_id, task_list in tasks_per_job:
chk_cmd = [
"ssh",
remote_machine,
"sbatch",
fs_locations["runtime_exec"] + 'outer_manager_{0}{1}.slurm' \
.format(batch_job_code, str(job_id).zfill(3))
]
if DEBUG:
print("COMMAND", chk_cmd)
p = subprocess.Popen(chk_cmd)
else:
p = subprocess.Popen(chk_cmd,
stderr=devnull,
stdout=devnull
)
else:
# If no hpc, there is only one node, i.e. one manager
mname = 'outer_manager_{0}{1}.slurm'.format(batch_job_code, str(0).zfill(3))
nhp_cmd = ["nohup", fs_locations["runtime_exec"] + mname]
if DEBUG:
print("COMMAND", nhp_cmd)
p = subprocess.Popen(nhp_cmd)
else:
p = subprocess.Popen(nhp_cmd,
stderr=devnull, stdout=devnull
)
# print(["nohup", fs_locations["runtime_exec"] + mname])
waitcount = {}
for job_id, task_list in tasks_per_job:
waitcount[job_id] = 0
is_over = False
while not is_over:
time.sleep(waiting_time)
is_over = True
for job_id, task_list in tasks_per_job:
macname = "{0}/.manager_activity_check_{1}{2}".format(
fs_locations["runtime_exec"],
batch_job_code,
str(job_id).zfill(3)
)
macname_sf = "{0}/.manager_activity_check_{1}{2}".format(
fs_locations["build_exec"],
batch_job_code,
str(job_id).zfill(3)
)
if protocol == 'remote':
manager_cmd = ["ssh", remote_machine, "ls", "-latrh", macname]
chk_time_cmd = "ssh " + remote_machine + " echo $(date +%H:%M)"
elif protocol == 'remote-sharedfs':
manager_cmd = ["ls", "-latrh", macname_sf]
chk_time_cmd = "echo $(date +%H:%M)"
elif protocol == 'local':
manager_cmd = ["ls", "-latrh", macname]
chk_time_cmd = "echo $(date +%H:%M)"
if DEBUG:
print("COMMAND", manager_cmd)
touch_time_txt = subprocess.Popen(
manager_cmd,
stdout=subprocess.PIPE
).stdout.read().decode('ascii')
else:
touch_time_txt = subprocess.Popen(
manager_cmd,
stderr=devnull,
stdout=subprocess.PIPE
).stdout.read().decode('ascii')
if touch_time_txt.strip():
if DEBUG:
print("COMMAND", chk_time_cmd)
local_time_l = subprocess.Popen(
chk_time_cmd,
stdout=subprocess.PIPE,
shell=True
).stdout.read().decode('ascii').split()[0].split(":")
else:
local_time_l = subprocess.Popen(
chk_time_cmd,
stderr=devnull,
stdout=subprocess.PIPE,
shell=True
).stdout.read().decode('ascii').split()[0].split(":")
touch_time_l = touch_time_txt.split()[7].split(":")
touch_time = int(touch_time_l[0])*60 + int(touch_time_l[1])
local_time = int(local_time_l[0])*60 + int(local_time_l[1])
coeff = (local_time - touch_time)%(24*60)
is_active = False if coeff > 1 else True
if is_active:
is_over = False
print("Job", job_id, "running")
else:
print("Job", job_id, "ended")
else:
is_there = False
# Check if processes are scehduled. For this you need the job IDs given by the machine...
if protocol != 'local':
isitthere_cmd = ["ssh " + remote_machine + ' \'squeue --format="%.18i %.9P %.100j %.8u %.2t %.10M %.6D %R"\'']
if DEBUG:
print("COMMAND", isitthere_cmd)
isitthere_txt = subprocess.Popen(
isitthere_cmd,
stdout=subprocess.PIPE,
shell=True
).stdout.read().decode('ascii')
else:
isitthere_txt = subprocess.Popen(
isitthere_cmd,
stderr=devnull,
stdout=subprocess.PIPE,
shell=True
).stdout.read().decode('ascii')
for line in isitthere_txt.split("\n")[1:]:
if not line.strip():
continue
fields = line.split()
if fields[2].strip() == '{0}{1}'.format(batch_job_code, str(job_id).zfill(3)):
print("Job", job_id, "pending")
is_there = True
is_over = False
break
if not is_there:
waitcount[job_id] += 1
if waitcount[job_id] > 3:
print("Job", job_id, "error/aborted")
else:
print("Job", job_id, "waiting...")
is_over = False
else:
print("Job", job_id, "error/aborted")
print("Jobs are over")
def gather_results(protocol_triad, cache_dir, job_data, batch_job_code,
task_folders, fs_locations, tasks_per_job, log_dir,
data_transfer_protocol, analysis_func=None, build_envroot=None,
snapshot=None, noenvrm=False):
protocol, remote_machine, hpc_shared_folder = protocol_triad
devnull = open('/dev/null', 'w')
# The database is now connected with local machine
if protocol == 'remote':
shutil.rmtree(fs_locations['build_root'])
scp_cmd = ["bash", data_transfer_protocol, "{0}:{1}".format(remote_machine, fs_locations["runtime_root"]), fs_locations['build_root']]
if DEBUG:
print("COMMAND", scp_cmd)
p = subprocess.Popen(scp_cmd)
else:
p = subprocess.Popen(scp_cmd, stderr=devnull, stdout=devnull)
p.wait()
# Check if some job did not even start and adds it to the reschedule set
reschedule = set()
for job_id, task_list in tasks_per_job:
# Move the mail exec task files to logs/
tfname = "taskfile_{0}{1}.*".format(batch_job_code, str(job_id).zfill(3))
task_filename = fs_locations["build_exec"] + tfname
mv_cmd = "mv {0} {1}".format(task_filename, log_dir)
#mv_cmd = ["mv", task_filename, log_dir]
if DEBUG:
print("COMMAND", mv_cmd)
p = subprocess.Popen(mv_cmd, shell=True)
else:
p = subprocess.Popen(mv_cmd,
shell=True,
stderr=devnull,
stdout=devnull
)
p.wait()
# Check whether any of the task results still pending (not executed)
# or running (might have been interrupted). If so, adds to the reschedule set
sname = "status_{0}{1}".format(batch_job_code, str(job_id).zfill(3))
status_filename = fs_locations["runtime_exec"] + sname
grep_pending_cmd = ["grep", "'pending'", status_filename]
if remote_machine:
grep_pending_cmd = ["ssh", remote_machine] + grep_pending_cmd
if DEBUG:
print("COMMAND", grep_pending_cmd)
txtlines = subprocess.Popen(
grep_pending_cmd,
stdout=subprocess.PIPE
).stdout.readlines()
else:
txtlines = subprocess.Popen(
grep_pending_cmd,
stderr=devnull,
stdout=subprocess.PIPE
).stdout.readlines()
for line in txtlines:
(
internal_id,
task_id,
status,
task_dirpath
) = line.decode('ascii').split()
if status in ['running', 'pending']:
reschedule.add(task_id)
# For the jobs that have completed, checks the expected outputs
if not build_envroot:
completed_with_error = set()
output_paths = {}
for jd in job_data:
if jd['unique_code'] in reschedule:
continue
for output in jd['outputs']:
# Get the Work, batch and task output addresses
tfuc = task_folders[jd['unique_code']][0]
slashlist = re.sub("/(/+)", "/", tfuc).split("/")
(
relative_env_folder,
relative_batch_folder,
relative_task_folder
) = [x+"/" for x in slashlist][-4:-1]
output_batch_dir = jd['output_dir'] + relative_batch_folder
output_task_dir = (jd['output_dir'] + relative_batch_folder
+ relative_task_folder)
# Create batch and task directories in the output folder
if not os.path.exists(output_batch_dir):
os.mkdir(output_batch_dir)
if not os.path.exists(output_task_dir):
os.mkdir(output_task_dir)
# Move output from the parsed address to the output folder
# and checks if output is there
output_path = (fs_locations["build_work"]
+ relative_batch_folder + relative_task_folder + output)
if os.path.exists(output_path) and ((analysis_func == None) or
(analysis_func(output_path))):
mv_cmd = ["mv", output_path, output_task_dir]
if DEBUG:
print("COMMAND", mv_cmd)
p = subprocess.Popen(mv_cmd)
else:
p = subprocess.Popen(mv_cmd,
stderr=devnull,
stdout=devnull
)
p.wait()
if jd['unique_code'] not in output_paths:
output_paths[jd['unique_code']] = {}
output_paths[jd['unique_code']][output] = output_task_dir + output
else:
completed_with_error.add(jd['unique_code'])
# Compile the main output file
output_logfilename = jd['output_dir'] + "output.log"
with open(output_logfilename, "w") as of:
for jid, jd in sorted([(k['unique_code'], k) for k in job_data], key=lambda x: x[0]):
if jid in output_paths:
for output in jd['outputs']:
if output in output_paths[jid]:
status = "present"
path = output_paths[jid][output]
elif jid in completed_with_error:
status = "error"
path = "-"
else:
status = "missing"
path = "-"
of.write("{0}\t{1}\t{2}\n".format(output, status, path))
# This will be the new job_data, containing all jobs that
# have to be rescheduled
output_d = { k['unique_code'] : k
for k in job_data if k['unique_code'] in reschedule }
elif snapshot:
new_snapshot = take_snapshot(protocol_triad, build_envroot)
newly_added = compare_snapshots(snapshot, new_snapshot)
snap_log = log_dir + 'modified.log'
with open(snap_log, 'w') as snapf:
# Creates all new folders
for d in sorted(newly_added):
if not newly_added[d]:
snapf.write("Directory created: {0}\n".format(build_envroot+d))
if protocol != 'local':
if not os.path.exists(build_envroot+d):
os.mkdir(build_envroot+d)
# Populates new and old folders
for d in sorted(newly_added):
for f in sorted(newly_added[d]):
snapf.write("File created/modified: {0}\n".format(build_envroot+f))
if protocol != 'local':
fname = fs_locations["build_root"] + f # Local copies have been restored at the beginning of this function
cpcmd = ["cp", fname, build_envroot+f]
if DEBUG:
print("COMMAND", cpcmd)
p = subprocess.Popen(cpcmd)
else:
p = subprocess.Popen(cpcmd, stdout=devnull, stderr=devnull)
p.wait()
output_d, output_paths, completed_with_error = {}, None, None
# Remove all repositories
if not DEBUG:
if protocol != 'local':
sshrm_cmd = ["ssh", remote_machine, "rm", "-rf", fs_locations['runtime_root']]
if DEBUG:
print("COMMAND", sshrm_cmd)
p = subprocess.Popen(sshrm_cmd)
else:
p = subprocess.Popen(sshrm_cmd, stderr=devnull, stdout=devnull)
p.wait()
rm_cmd = ["rm", "-rf", fs_locations['build_root']]
if DEBUG:
print("COMMAND", rm_cmd)
p = subprocess.Popen(rm_cmd)
else:
p = subprocess.Popen(rm_cmd, stderr=devnull, stdout=devnull)
p.wait()
return output_d, output_paths, completed_with_error
def take_snapshot(protocol_triad, root_dir):
protocol, remote_machine, hpc_shared_folder = protocol_triad
devnull = open('/dev/null', 'w')
"""
for treedir, files in tree:
for f in files:
if f == "*":
lsrcmd = ["ls", "-ltrh",treedir]
elif f == "**":
lsrcmd = ["ls", "-ltrhR",treedir]
##### ARRIVATO QUI
"""
lsrcmd = ["ls", "-ltrhR", root_dir]
if remote_machine:
lsrcmd = ["ssh", remote_machine] + lsrcmd
if DEBUG:
print("COMMAND", lsrcmd)
textlines = subprocess.Popen(lsrcmd,
stdout=subprocess.PIPE).stdout.readlines()
else:
textlines = subprocess.Popen(lsrcmd,
stdout=subprocess.PIPE, stderr=devnull).stdout.readlines()
snapshot = {}
for l in textlines:
line = l.decode('ascii')
if not line.strip() or line.startswith('total'):
continue
if len(line.split()) == 1:
dirname = line.strip()[:-1] # line terminates with ":"
snapshot[dirname] = {}
elif len(line.split()) == 9:
priv, _, usr1, usr2, s, d1, d2, d3, filename = line.split()
if priv.startswith('-'): # If item is not a folder
snapshot[dirname][filename] = (priv, usr1, usr2, s, d1, d2, d3)
else:
print(("WARNING (take_snapshot): output of ls -ltrhR is not"
"in the expected format"))
print(line)
return snapshot
def compare_snapshots(snap1, snap2):
newtosnap2 = {}
for d in snap2:
if d not in snap1:
newtosnap2[d] = snap2[d]
continue
for f in snap2[d]:
if f not in snap1[d]:
if d not in newtosnap2:
newtosnap2[d] = []
newtosnap2[d].append(f)
return newtosnap2
def highly_parallel_job_manager(options, exec_filename,
batch_job_code, locout_dir, env_root_dir=None,
env_instr=None, noenvcp=None, noenvrm=None):
this_name = highly_parallel_job_manager.__name__
# Creates cache dir
# The locout dir is a local directory where outputs will be collected
# in the end, but also where caches and local builds of remote filesystems
# will be stored
cache_dir = locout_dir + ".cache/"
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
os.mkdir(cache_dir)
cache_dir = os.path.realpath(cache_dir) + '/'
# Creates log dir
log_dir = locout_dir + "logs/"
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.mkdir(log_dir)
# Is there singularity?
singinfo = (options['singularity'], options['singularity_container'], options['singularity_modload'])
# Read exec file and compile job_data
job_data = []
jd = {}
with open(exec_filename) as exec_file:
for line in exec_file:
if line.startswith("c"):
if jd:
if os.path.exists(jd['log_filename']):
if options['force_redo']:
os.remove(jd['log_filename'])
job_data.append(jd)
else:
job_data.append(jd)
jd = {
'command' : '',
'outputs' : [],
'output_dir' : '',
'success' : None,
'issues' : [],
'unique_code' : '',
'log_filename' : '',
'clean_env_inps' : [],
'shared_inps' : []
}
jd['command'] = line[8:].strip()
fields = line.split()
jd['output_dir'] = locout_dir
jd['unique_code'] = batch_job_code + fields[0][1:-1]
jd['log_filename'] = (log_dir + batch_job_code +
fields[0][1:-1] + '_log.txt')
elif line.startswith("i"):
fields = line.split()
jd['clean_env_inps'] = fields[1:]
elif line.startswith("s"): # shared inputs must be declared in "s" line as file.txt:/path/of/file.txt, where file.txt is a filename appearing in the "c" line
fields = line.split()
jd['shared_inps'] = {k : p
for (k,p) in [x.split(":") for x in fields[1:]]}
elif line.startswith("o"):
fields = line.split()
jd['outputs'] = fields[1:]
if jd:
if os.path.exists(jd['log_filename']):
if options['force_redo']:
os.remove(jd['log_filename'])
job_data.append(jd)
else:
job_data.append(jd)
if options['run_on_hpc']:
# Copy files in separate location (for hpc the option 'exec location' must be set to '/data/biowulf/sartie/'
remote_machine = options['host_name']
requested_nodes = options['requested_nodes']
cpus_per_node = options['cpus_per_node']
if options['local_shared_dir']:
# The path specified in hpc_exec_dir must point to the same
# folder of local_shared_dir, only from the point of view of
# the remote machine
protocol = 'remote-sharedfs'
runtime_root_path = options['hpc_exec_dir']
local_shared_folder = options['local_shared_dir']
else:
protocol = 'remote'
runtime_root_path = options['hpc_exec_dir']
local_shared_folder = None
else:
protocol = 'local'
remote_exec_path = False
remote_machine = None
local_shared_folder = None
requested_nodes = 1
cpus_per_node = options['number_of_processors']
runtime_root_path = cache_dir + batch_job_code + "_tmp_root/"
if not os.path.exists(runtime_root_path):
os.mkdir(runtime_root_path)
protocol_triad = (protocol, remote_machine, local_shared_folder)
completed_with_error = set()
output_paths = {}
if env_root_dir:
env_root_dir = os.path.abspath(env_root_dir) + '/'
gen_env_root_dir = None if noenvcp else env_root_dir
else:
env_root_dir, gen_env_root_dir = None, None
data_transfer_protocol = options['data_transfer_protocol']
email = options['email_address']
nsf = options['nodewise_scratch_folder']
nsm = options['nodewise_scratch_memory']
wt = options['walltime']
out_st = options['extra_outer_statements']
partition = options['partition']
exclusive = options['exclusive']
while job_data:
# Create the hosting file system
task_folders, fs_locations = generate_exec_filesystem(
protocol_triad,
cache_dir,
job_data,
runtime_root_path,
batch_job_code,
data_transfer_protocol,
env_instr=env_instr,
build_envroot=gen_env_root_dir
)
# Create local manager script that does the mpiq job, launchable on each node. It checks the situation regularly each 10 secs.
tasks_per_job = create_manager_scripts(
protocol_triad,
cache_dir,
task_folders,
partition,
cpus_per_node,
requested_nodes,
batch_job_code,
fs_locations,
data_transfer_protocol,
singinfo=singinfo,
email_address=email,
nodescratch_folder=nsf,
nodescratch_mem=nsm,
walltime=wt,
outer_statements=out_st,
exclusive=exclusive
)
if env_instr:
snapshot = take_snapshot(
(protocol, "", local_shared_folder),
env_root_dir
)
if not snapshot:
print("ERROR: empty snapshot of {0} was taken".format(env_root_dir))
print(" Locusts needs to take snapshot to compare filesystems")
exit(1)
else:
snapshot = {}
# Create extrenal master script that checks out from time to time (each 5 mins or so)
# This step is over only when no process is active anymore
lenlist = [len(x[1]) for x in tasks_per_job]
waiting_time = min(600, 10*(1+len(job_data)//min(lenlist)))
remote_job_control(
protocol_triad,
batch_job_code,
fs_locations,
tasks_per_job,
waiting_time
)
# Collect results, update job_data (only processes that remained pending on running are written in job_data again
job_data, outp, witherr = gather_results(
protocol_triad,
cache_dir,
job_data,
batch_job_code,
task_folders,
fs_locations,
tasks_per_job,
log_dir,
data_transfer_protocol,
build_envroot=env_root_dir,
snapshot=snapshot,
noenvrm = noenvrm
)
if witherr:
completed_with_error |= witherr
if outp:
for x in outp:
output_paths[x] = outp[x]
|
the-stack_106_31823 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'taichi'
copyright = '2020, Taichi Developers'
author = 'Taichi Developers'
version_fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'version')
with open(version_fn) as f:
taichi_version = f.readline().strip()
print('Building doc version', taichi_version)
# The short X.Y version
version = taichi_version
# The full version, including alpha/beta/rc tags
release = taichi_version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
if os.environ.get('READTHEDOCS', '') != '':
css_files = [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css'
]
else:
css_files = []
html_context = {'css_files': css_files + ['_static/extra.css']}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'taichidoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'taichi.tex', 'taichi Documentation', 'Taichi Developers',
'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'taichi', 'taichi Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'taichi', 'taichi Documentation', author, 'taichi',
'One line description of project.', 'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
the-stack_106_31828 | # Copyright (c) 2021 SUSE LLC
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
from harvester_e2e_tests import utils
import json
import polling2
import pytest
pytest_plugins = [
'harvester_e2e_tests.fixtures.api_endpoints',
'harvester_e2e_tests.fixtures.session',
]
@pytest.fixture(scope='session')
def enable_vlan(request, admin_session, harvester_api_endpoints):
vlan_nic = request.config.getoption('--vlan-nic')
resp = admin_session.get(harvester_api_endpoints.get_vlan)
assert resp.status_code == 200, 'Failed to get vlan: %s' % (resp.content)
vlan_json = resp.json()
if 'config' not in vlan_json:
vlan_json['config'] = {}
if 'defaultPhysicalNIC' not in vlan_json['config']:
vlan_json['config']['defaultPhysicalNIC'] = None
if utils.is_marker_enabled(request, 'terraform'):
utils.create_clusternetworks_terraform(
request,
admin_session,
harvester_api_endpoints,
'resource_clusternetworks',
vlan_nic)
else:
vlan_json['enable'] = True
vlan_json['config']['defaultPhysicalNIC'] = vlan_nic
utils.poll_for_update_resource(request, admin_session,
harvester_api_endpoints.update_vlan,
vlan_json,
harvester_api_endpoints.get_vlan)
def _cleanup_network(admin_session, harvester_api_endpoints, network_id,
wait_timeout):
def _delete_network():
resp = admin_session.delete(
harvester_api_endpoints.delete_network % (network_id))
if resp.status_code in [200, 204]:
return True
elif resp.status_code == 400:
return False
else:
assert False, 'Failed to cleanup network %s: %s' % (
network_id, resp.content)
# NOTE(gyee): there's no way we know how many VMs the network is currently
# attached to. Will need to keep trying till all the VMs had been deleted
try:
polling2.poll(
_delete_network,
step=5,
timeout=wait_timeout)
except polling2.TimeoutException as e:
errmsg = 'Unable to cleanup network: %s' % (network_id)
raise AssertionError(errmsg) from e
def _lookup_network(request, admin_session, harvester_api_endpoints, vlan_id):
resp = admin_session.get(harvester_api_endpoints.list_networks)
if resp.status_code == 200:
for network in resp.json()['data']:
if json.loads(network['spec']['config'])['vlan'] == vlan_id:
return network
return None
def _create_network(request, admin_session, harvester_api_endpoints, vlan_id):
# NOTE(gyee): will name the network with the following convention as
# VLAN ID must be unique. vlan_network_<VLAN ID>
network_name = f'vlan-network-{vlan_id}'
# If a network with the same VLAN ID already exist, just use it.
network_data = _lookup_network(request, admin_session,
harvester_api_endpoints, vlan_id)
if network_data:
return network_data
request_json = utils.get_json_object_from_template(
'basic_network',
name=network_name,
vlan=vlan_id
)
resp = admin_session.post(harvester_api_endpoints.create_network,
json=request_json)
assert resp.status_code == 201, 'Unable to create a network: %s' % (
resp.content)
network_data = resp.json()
utils.poll_for_resource_ready(request, admin_session,
network_data['links']['view'])
return network_data
@pytest.fixture(scope='session')
def network(request, admin_session, harvester_api_endpoints, enable_vlan):
vlan_id = request.config.getoption('--vlan-id')
# don't create network if VLAN is not correctly specified
if vlan_id == -1:
return
network_data = _create_network(request, admin_session,
harvester_api_endpoints, vlan_id)
yield network_data
if not request.config.getoption('--do-not-cleanup'):
_cleanup_network(admin_session, harvester_api_endpoints,
network_data['id'],
request.config.getoption('--wait-timeout'))
@pytest.fixture(scope='class')
def bogus_network(request, admin_session, harvester_api_endpoints,
enable_vlan):
vlan_id = request.config.getoption('--vlan-id')
# don't create network if VLAN is not correctly specified
if vlan_id == -1:
return
# change the VLAN ID to an invalid one
vlan_id += 1
network_data = _create_network(request, admin_session,
harvester_api_endpoints, vlan_id)
yield network_data
if not request.config.getoption('--do-not-cleanup'):
_cleanup_network(admin_session, harvester_api_endpoints,
network_data['id'],
request.config.getoption('--wait-timeout'))
# This fixture is only called by test_create_edit_network
# in apis/test_networks.py.
# vlan_id is set to vlan_id + 1
@pytest.fixture(scope='class')
def network_for_update_test(request, admin_session,
harvester_api_endpoints, enable_vlan):
vlan_id = request.config.getoption('--vlan-id')
# don't create network if VLAN is not correctly specified
if vlan_id == -1:
return
request_json = utils.get_json_object_from_template(
'basic_network',
vlan=vlan_id + 1
)
resp = admin_session.post(harvester_api_endpoints.create_network,
json=request_json)
assert resp.status_code == 201, 'Unable to create a network: %s' % (
resp.content)
network_data = resp.json()
utils.poll_for_resource_ready(request, admin_session,
network_data['links']['view'])
yield network_data
if not request.config.getoption('--do-not-cleanup'):
_cleanup_network(admin_session, harvester_api_endpoints,
network_data['id'],
request.config.getoption('--wait-timeout'))
@pytest.fixture(scope='class')
def network_using_terraform(request, admin_session,
harvester_api_endpoints, enable_vlan):
vlan_id = request.config.getoption('--vlan-id')
# don't create network if VLAN is not correctly specified
if vlan_id == -1:
return
# If a network with the same VLAN ID already exist,
# don't try to create but import it
network_data = _lookup_network(request, admin_session,
harvester_api_endpoints, vlan_id)
if network_data:
import_flag = True
else:
import_flag = False
network_json = utils.create_network_terraform(request, admin_session,
harvester_api_endpoints,
'resource_network',
vlan_id, import_flag)
yield network_json
if not request.config.getoption('--do-not-cleanup') and not import_flag:
utils.destroy_resource(
request,
admin_session,
'harvester_network.' + network_json['metadata']['name'])
|
the-stack_106_31829 | import numpy as np
import scipy.stats
import pytest
from .. import bq_c
from . import util
import logging
logger = logging.getLogger("bayesian_quadrature")
logger.setLevel("DEBUG")
DTYPE = util.DTYPE
options = util.options
def test_remove_jitter():
n = 2
arr = np.ones((n, n))
jitter = np.zeros(n)
idx = np.arange(n)
bq_c.improve_covariance_conditioning(arr, jitter, idx)
bq_c.remove_jitter(arr, jitter, idx)
assert (arr == np.ones((n, n))).all()
assert (jitter == np.zeros(n)).all()
bq_c.improve_covariance_conditioning(arr, jitter, idx)
j = jitter[-1]
aj = arr[-1, -1]
bq_c.remove_jitter(arr, jitter, idx[:-1])
assert (arr[:-1, :-1] == np.ones((n - 1, n - 1))).all()
assert (arr[:-1, -1] == np.ones(n - 1)).all()
assert (arr[-1, :-1] == np.ones(n - 1)).all()
assert arr[-1, -1] == aj
assert (jitter[:-1] == np.zeros(n - 1)).all()
assert jitter[-1] == j
def test_improve_covariance_conditioning():
util.npseed()
bq = util.make_bq()
K = bq.gp_l.Kxx
K_old = K.copy()
jitter = np.zeros(K.shape[0], dtype=DTYPE)
bq_c.improve_covariance_conditioning(
K, jitter, np.arange(K.shape[0]))
assert (K == bq.gp_l.Kxx).all()
assert K is bq.gp_l.Kxx
assert (K_old == (K - (np.eye(K.shape[0]) * jitter))).all()
K = bq.gp_log_l.Kxx
K_old = K.copy()
jitter = np.zeros(K.shape[0], dtype=DTYPE)
bq_c.improve_covariance_conditioning(
K, jitter, np.arange(K.shape[0]))
assert (K == bq.gp_log_l.Kxx).all()
assert K is bq.gp_log_l.Kxx
assert (K_old == (K - (np.eye(K.shape[0]) * jitter))).all()
|
the-stack_106_31830 | """Calculate distances and shortest paths and find nearest node/edge(s) to point(s)."""
import itertools
import multiprocessing as mp
import warnings
import networkx as nx
import numpy as np
import pandas as pd
import pyproj
from rtree.index import Index as RTreeIndex
from shapely.geometry import Point
from . import projection
from . import utils
from . import utils_geo
from . import utils_graph
# scipy is optional dependency for projected nearest-neighbor search
try:
from scipy.spatial import cKDTree
except ImportError: # pragma: no cover
cKDTree = None
# scikit-learn is optional dependency for unprojected nearest-neighbor search
try:
from sklearn.neighbors import BallTree
except ImportError: # pragma: no cover
BallTree = None
EARTH_RADIUS_M = 6_371_009
def great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=EARTH_RADIUS_M):
"""
Calculate great-circle distances between pairs of points.
Vectorized function to calculate the great-circle distance between two
points' coordinates or between arrays of points' coordinates using the
haversine formula. Expects coordinates in decimal degrees.
Parameters
----------
lat1 : float or numpy.array of float
first point's latitude coordinate
lng1 : float or numpy.array of float
first point's longitude coordinate
lat2 : float or numpy.array of float
second point's latitude coordinate
lng2 : float or numpy.array of float
second point's longitude coordinate
earth_radius : float
earth's radius in units in which distance will be returned (default is
meters)
Returns
-------
dist : float or numpy.array of float
distance from each (lat1, lng1) to each (lat2, lng2) in units of
earth_radius
"""
y1 = np.deg2rad(lat1)
y2 = np.deg2rad(lat2)
dy = y2 - y1
x1 = np.deg2rad(lng1)
x2 = np.deg2rad(lng2)
dx = x2 - x1
h = np.sin(dy / 2) ** 2 + np.cos(y1) * np.cos(y2) * np.sin(dx / 2) ** 2
h = np.minimum(1, h) # protect against floating point errors
arc = 2 * np.arcsin(np.sqrt(h))
# return distance in units of earth_radius
return arc * earth_radius
def euclidean_dist_vec(y1, x1, y2, x2):
"""
Calculate Euclidean distances between pairs of points.
Vectorized function to calculate the Euclidean distance between two
points' coordinates or between arrays of points' coordinates. For accurate
results, use projected coordinates rather than decimal degrees.
Parameters
----------
y1 : float or numpy.array of float
first point's y coordinate
x1 : float or numpy.array of float
first point's x coordinate
y2 : float or numpy.array of float
second point's y coordinate
x2 : float or numpy.array of float
second point's x coordinate
Returns
-------
dist : float or numpy.array of float
distance from each (x1, y1) to each (x2, y2) in coordinates' units
"""
# pythagorean theorem
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def add_edge_lengths(G, precision=3):
"""
Add `length` attribute (in meters) to each edge.
Vectorized function to calculate great-circle distance between each edge's
incident nodes. Ensure graph is in unprojected coordinates, and
unsimplified to get accurate distances. Note: this function is run by all
the `graph.graph_from_x` functions automatically to add `length`
attributes to all edges.
Parameters
----------
G : networkx.MultiDiGraph
unprojected, unsimplified input graph
precision : int
decimal precision to round lengths
Returns
-------
G : networkx.MultiDiGraph
graph with edge length attributes
"""
# extract edge IDs and corresponding coordinates from their nodes
uvk = tuple(G.edges)
x = G.nodes(data="x")
y = G.nodes(data="y")
try:
# two-dimensional array of coordinates: y0, x0, y1, x1
c = np.array([(y[u], x[u], y[v], x[v]) for u, v, k in uvk])
except KeyError: # pragma: no cover
raise KeyError("some edges missing nodes, possibly due to input data clipping issue")
# calculate great circle distances, round, and fill nulls with zeros
dists = great_circle_vec(c[:, 0], c[:, 1], c[:, 2], c[:, 3]).round(precision)
dists[np.isnan(dists)] = 0
nx.set_edge_attributes(G, values=dict(zip(uvk, dists)), name="length")
utils.log("Added length attributes to graph edges")
return G
def nearest_nodes(G, X, Y, return_dist=False):
"""
Find the nearest node to a point or to each of several points.
If `X` and `Y` are single coordinate values, this will return the nearest
node to that point. If `X` and `Y` are lists of coordinate values, this
will return the nearest node to each point.
If the graph is projected, this uses a k-d tree for euclidean nearest
neighbor search, which requires that scipy is installed as an optional
dependency. If it is unprojected, this uses a ball tree for haversine
nearest neighbor search, which requires that scikit-learn is installed as
an optional dependency.
Parameters
----------
G : networkx.MultiDiGraph
graph in which to find nearest nodes
X : float or list
points' x (longitude) coordinates, in same CRS/units as graph and
containing no nulls
Y : float or list
points' y (latitude) coordinates, in same CRS/units as graph and
containing no nulls
return_dist : bool
optionally also return distance between points and nearest nodes
Returns
-------
nn or (nn, dist) : int/list or tuple
nearest node IDs or optionally a tuple where `dist` contains distances
between the points and their nearest nodes
"""
is_scalar = False
if not (hasattr(X, "__iter__") and hasattr(Y, "__iter__")):
# make coordinates arrays if user passed non-iterable values
is_scalar = True
X = np.array([X])
Y = np.array([Y])
if np.isnan(X).any() or np.isnan(Y).any(): # pragma: no cover
raise ValueError("`X` and `Y` cannot contain nulls")
nodes = utils_graph.graph_to_gdfs(G, edges=False, node_geometry=False)[["x", "y"]]
if projection.is_projected(G.graph["crs"]):
# if projected, use k-d tree for euclidean nearest-neighbor search
if cKDTree is None: # pragma: no cover
raise ImportError("scipy must be installed to search a projected graph")
dist, pos = cKDTree(nodes).query(np.array([X, Y]).T, k=1)
nn = nodes.index[pos]
else:
# if unprojected, use ball tree for haversine nearest-neighbor search
if BallTree is None: # pragma: no cover
raise ImportError("scikit-learn must be installed to search an unprojected graph")
# haversine requires lat, lng coords in radians
nodes_rad = np.deg2rad(nodes[["y", "x"]])
points_rad = np.deg2rad(np.array([Y, X]).T)
dist, pos = BallTree(nodes_rad, metric="haversine").query(points_rad, k=1)
dist = dist[:, 0] * EARTH_RADIUS_M # convert radians -> meters
nn = nodes.index[pos[:, 0]]
# convert results to correct types for return
nn = nn.tolist()
dist = dist.tolist()
if is_scalar:
nn = nn[0]
dist = dist[0]
if return_dist:
return nn, dist
else:
return nn
def nearest_edges(G, X, Y, interpolate=None, return_dist=False, ref=None):
"""
Find the nearest edge to a point or to each of several points.
If `X` and `Y` are single coordinate values, this will return the nearest
edge to that point. If `X` and `Y` are lists of coordinate values, this
will return the nearest edge to each point.
If `interpolate` is None, search for the nearest edge to each point, one
at a time, using an r-tree and minimizing the euclidean distances from the
point to the possible matches. For accuracy, use a projected graph and
points. This method is precise and also fastest if searching for few
points relative to the graph's size.
For a faster method if searching for many points relative to the graph's
size, use the `interpolate` argument to interpolate points along the edges
and index them. If the graph is projected, this uses a k-d tree for
euclidean nearest neighbor search, which requires that scipy is installed
as an optional dependency. If graph is unprojected, this uses a ball tree
for haversine nearest neighbor search, which requires that scikit-learn is
installed as an optional dependency.
Parameters
----------
G : networkx.MultiDiGraph
graph in which to find nearest edges
X : float or list
points' x (longitude) coordinates, in same CRS/units as graph and
containing no nulls
Y : float or list
points' y (latitude) coordinates, in same CRS/units as graph and
containing no nulls
interpolate : float
spacing distance between interpolated points, in same units as graph.
smaller values generate more points.
return_dist : bool
optionally also return distance between points and nearest edges
Returns
-------
ne or (ne, dist) : tuple or list
nearest edges as (u, v, key) or optionally a tuple where `dist`
contains distances between the points and their nearest edges
"""
is_scalar = False
if not (hasattr(X, "__iter__") and hasattr(Y, "__iter__")):
# make coordinates arrays if user passed non-iterable values
is_scalar = True
X = np.array([X])
Y = np.array([Y])
if np.isnan(X).any() or np.isnan(Y).any(): # pragma: no cover
raise ValueError("`X` and `Y` cannot contain nulls")
geoms = utils_graph.graph_to_gdfs(G, nodes=False)["geometry"]
crs = None
if ref is not None:
crs = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84', lat_0=ref[0], lon_0=ref[1]).srs
# if no interpolation distance was provided
if interpolate is None:
# build the r-tree spatial index by position for subsequent iloc
rtree = RTreeIndex()
for pos, bounds in enumerate(geoms.bounds.values):
rtree.insert(pos, bounds)
# use r-tree to find possible nearest neighbors, one point at a time,
# then minimize euclidean distance from point to the possible matches
ne_dist = list()
for xy in zip(X, Y):
if crs is not None:
dists = geoms.iloc[list(rtree.nearest(xy))].to_crs(crs).distance(Point(xy))
else:
dists = geoms.iloc[list(rtree.nearest(xy))].distance(Point(xy))
ne_dist.append((dists.idxmin(), dists.min()))
ne, dist = zip(*ne_dist)
# otherwise, if interpolation distance was provided
else:
# interpolate points along edges to index with k-d tree or ball tree
uvk_xy = list()
for uvk, geom in zip(geoms.index, geoms.values):
uvk_xy.extend((uvk, xy) for xy in utils_geo.interpolate_points(geom, interpolate))
labels, xy = zip(*uvk_xy)
vertices = pd.DataFrame(xy, index=labels, columns=["x", "y"])
if projection.is_projected(G.graph["crs"]):
# if projected, use k-d tree for euclidean nearest-neighbor search
if cKDTree is None: # pragma: no cover
raise ImportError("scipy must be installed to search a projected graph")
dist, pos = cKDTree(vertices).query(np.array([X, Y]).T, k=1)
ne = vertices.index[pos]
else:
# if unprojected, use ball tree for haversine nearest-neighbor search
if BallTree is None: # pragma: no cover
raise ImportError("scikit-learn must be installed to search an unprojected graph")
# haversine requires lat, lng coords in radians
vertices_rad = np.deg2rad(vertices[["y", "x"]])
points_rad = np.deg2rad(np.array([Y, X]).T)
dist, pos = BallTree(vertices_rad, metric="haversine").query(points_rad, k=1)
dist = dist[:, 0] * EARTH_RADIUS_M # convert radians -> meters
ne = vertices.index[pos[:, 0]]
# convert results to correct types for return
ne = list(ne)
dist = list(dist)
if is_scalar:
ne = ne[0]
dist = dist[0]
if return_dist:
return ne, dist
else:
return ne
def get_nearest_node(G, point, method=None, return_dist=False):
"""
Do not use, deprecated.
Parameters
----------
G : networkx.MultiDiGraph
deprecated, do not use
point : tuple
deprecated, do not use
method : string
deprecated, do not use
return_dist : bool
deprecated, do not use
Returns
-------
int or tuple
"""
msg = (
"The `get_nearest_node` function has been deprecated and will be removed in a "
"future release. Use the more efficient `distance.nearest_nodes` instead."
)
warnings.warn(msg)
nn, dist = nearest_nodes(G, X=[point[1]], Y=[point[0]], return_dist=True)
if return_dist:
return nn[0], dist[0]
else:
return nn[0]
def get_nearest_edge(G, point, return_geom=False, return_dist=False):
"""
Do not use, deprecated.
Parameters
----------
G : networkx.MultiDiGraph
deprecated, do not use
point : tuple
deprecated, do not use
return_geom : bool
deprecated, do not use
return_dist : bool
deprecated, do not use
Returns
-------
tuple
"""
msg = (
"The `get_nearest_edge` function has been deprecated and will be removed in a "
"future release. Use the more efficient `distance.nearest_edges` instead."
)
warnings.warn(msg)
ne, dist = nearest_edges(G, X=[point[1]], Y=[point[0]], return_dist=True)
u, v, key = ne[0]
geom = utils_graph.graph_to_gdfs(G, nodes=False).loc[(u, v, key), "geometry"]
if return_dist and return_geom:
return u, v, key, geom, dist[0]
elif return_dist:
return u, v, key, dist[0]
elif return_geom:
return u, v, key, geom
else:
return u, v, key
def get_nearest_nodes(G, X, Y, method=None, return_dist=False):
"""
Do not use, deprecated.
Parameters
----------
G : networkx.MultiDiGraph
deprecated, do not use
X : list
deprecated, do not use
Y : list
deprecated, do not use
method : string
deprecated, do not use
return_dist : bool
deprecated, do not use
Returns
-------
numpy.array or tuple of numpy.array
"""
msg = (
"The `get_nearest_nodes` function has been deprecated and will be removed in a "
"future release. Use the more efficient `distance.nearest_nodes` instead."
)
warnings.warn(msg)
return nearest_nodes(G, X=X, Y=Y, return_dist=return_dist)
def get_nearest_edges(G, X, Y, method=None, dist=None):
"""
Do not use, deprecated.
Parameters
----------
G : networkx.MultiDiGraph
deprecated, do not use
X : list-like
deprecated, do not use
Y : list-like
deprecated, do not use
method : string
deprecated, do not use
dist : float
deprecated, do not use
Returns
-------
numpy.array
"""
msg = (
"The `get_nearest_edges` function has been deprecated and will be removed in a "
"future release. Use the more efficient `distance.nearest_edges` instead."
)
warnings.warn(msg)
return nearest_edges(G, X, Y, dist)
def _single_shortest_path(G, orig, dest, weight):
"""
Solve the shortest path from an origin node to a destination node.
This function is a convenience wrapper around networkx.shortest_path, with
exception handling for unsolvable paths.
Parameters
----------
G : networkx.MultiDiGraph
input graph
orig : int
origin node ID
dest : int
destination node ID
weight : string
edge attribute to minimize when solving shortest path
Returns
-------
path : list
list of node IDs constituting the shortest path
"""
try:
return nx.shortest_path(G, orig, dest, weight=weight)
except nx.exception.NetworkXNoPath: # pragma: no cover
utils.log(f"Cannot solve path from {orig} to {dest}")
return None
def shortest_path(G, orig, dest, weight="length", cpus=1):
"""
Solve shortest path from origin node(s) to destination node(s).
If `orig` and `dest` are single node IDs, this will return a list of the
nodes constituting the shortest path between them. If `orig` and `dest`
are lists of node IDs, this will return a list of lists of the nodes
constituting the shortest path between each origin-destination pair. If a
path cannot be solved, this will return None for that path. You can
parallelize solving multiple paths with the `cpus` parameter, but be
careful to not exceed your available RAM.
See also `k_shortest_paths` to solve multiple shortest paths between a
single origin and destination. For additional functionality or different
solver algorithms, use NetworkX directly.
Parameters
----------
G : networkx.MultiDiGraph
input graph
orig : int or list
origin node ID, or a list of origin node IDs
dest : int or list
destination node ID, or a list of destination node IDs
weight : string
edge attribute to minimize when solving shortest path
cpus : int
how many CPU cores to use; if None, use all available
Returns
-------
path : list
list of node IDs constituting the shortest path, or, if orig and dest
are lists, then a list of path lists
"""
if not (hasattr(orig, "__iter__") or hasattr(dest, "__iter__")):
# if neither orig nor dest is iterable, just return the shortest path
return _single_shortest_path(G, orig, dest, weight)
else:
# if orig/dest are iterable, ensure they have same lengths
if len(orig) != len(dest): # pragma: no cover
raise ValueError("orig and dest must contain same number of elements")
if cpus is None:
cpus = mp.cpu_count()
utils.log(f"Solving {len(orig)} paths with {cpus} CPUs...")
if cpus == 1:
# if single-threading, calculate each shortest path one at a time
paths = [_single_shortest_path(G, o, d, weight) for o, d in zip(orig, dest)]
else:
# if multi-threading, calculate shortest paths in parallel
args = ((G, o, d, weight) for o, d in zip(orig, dest))
pool = mp.Pool(cpus)
sma = pool.starmap_async(_single_shortest_path, args)
paths = sma.get()
pool.close()
pool.join()
return paths
def k_shortest_paths(G, orig, dest, k, weight="length"):
"""
Solve `k` shortest paths from an origin node to a destination node.
See also `shortest_path` to get just the one shortest path.
Parameters
----------
G : networkx.MultiDiGraph
input graph
orig : int
origin node ID
dest : int
destination node ID
k : int
number of shortest paths to get
weight : string
edge attribute to minimize when solving shortest paths. default is
edge length in meters.
Returns
-------
paths : generator
a generator of `k` shortest paths ordered by total weight. each path
is a list of node IDs.
"""
paths_gen = nx.shortest_simple_paths(utils_graph.get_digraph(G, weight), orig, dest, weight)
for path in itertools.islice(paths_gen, 0, k):
yield path
|
the-stack_106_31833 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flags related to distributed execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from official.utils.flags._conventions import help_wrap
def define_distribution(worker_hosts=True, task_index=True):
"""Register distributed execution flags.
Args:
worker_hosts: Create a flag for specifying comma-separated list of workers.
task_index: Create a flag for specifying index of task.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if worker_hosts:
flags.DEFINE_string(
name='worker_hosts',
default=None,
help=help_wrap(
'Comma-separated list of worker ip:port pairs for running '
'multi-worker models with DistributionStrategy. The user would '
'start the program on each host with identical value for this '
'flag.'))
if task_index:
flags.DEFINE_integer(
name='task_index',
default=-1,
help=help_wrap('If multi-worker training, the task_index of this '
'worker.'))
return key_flags
|
the-stack_106_31834 | # Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
from pandapipes import pandapipesNet
from pandapipes.multinet.control.run_control_multinet import prepare_run_ctrl, run_control
from pandapipes.timeseries.run_time_series import init_default_outputwriter as init_default_ow_pps
from pandapower import pandapowerNet
from pandapower.control.util.diagnostic import control_diagnostic
from pandapower.timeseries.run_time_series import get_recycle_settings, init_time_steps, output_writer_routine, \
print_progress_bar, cleanup, run_loop, init_default_outputwriter as init_default_ow_pp, init_output_writer
try:
import pandaplan.core.pplog as pplog
except ImportError:
import logging as pplog
logger = pplog.getLogger(__name__)
logger.setLevel(level=pplog.WARNING)
def _call_output_writer(multinet, time_step, pf_converged, ctrl_converged, ts_variables):
"""
Calling the output writer routine for each net in multinet.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_step: the results of each time step, which shall be retrieved by the output writer
:type time_step: sequence of array_like
:param pf_converged: did powerflow converge
:type pf_converged: bool
:param ctrl_converged: did all controller converge
:type ctrl_converged: bool
:param ts_variables: contains all relevant information and boundaries required for time series and control analyses
:type ts_variables: dict
:return: calling each output writer in order to save the results which are retrieved
:rtype: None
"""
for net_name in multinet['nets'].keys():
net = multinet['nets'][net_name]
output_writer_routine(net, time_step, pf_converged, ctrl_converged, ts_variables[net_name]["recycle_options"])
def init_time_series(multinet, time_steps, continue_on_divergence=False, verbose=True,
**kwargs):
"""
Initializes the time series calculation.
Besides it creates the dict ts_variables, which includes necessary variables for the time series / control loop.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_steps: the number of times a time series calculation shall be conducted
:type time_steps: sequence of array_like
:param continue_on_divergence: What to do if loadflow/pipeflow is not converging, fires control_repair
:type continue_on_divergence: bool, default: False
:param verbose: prints progess bar or logger debug messages
:type verbose: bool, default: True
:param kwargs: additional keyword arguments handed to each run function
:type kwargs: dict
:return: ts_variables which contains all relevant information and boundaries required for time series and
control analyses
:rtype: dict
"""
time_steps = init_time_steps(multinet, time_steps, **kwargs)
run = kwargs.get('run', None)
ts_variables = prepare_run_ctrl(multinet, None, **kwargs)
for net_name in multinet['nets'].keys():
net = multinet['nets'][net_name]
if isinstance(net, pandapowerNet):
init_default_ow_pp(net, time_steps, **kwargs)
elif isinstance(net, pandapipesNet):
init_default_ow_pps(net, time_steps, **kwargs)
else:
raise ValueError('the given nets are neither pandapipes nor pandapower nets')
recycle_options = None
if hasattr(run, "__name__") and run.__name__ == "runpp":
# use faster runpp options if possible
recycle_options = get_recycle_settings(net, **kwargs)
ts_variables[net_name]['run'] = run['net_name'] if run is not None else ts_variables[net_name]['run']
ts_variables[net_name]['recycle_options'] = recycle_options
init_output_writer(net, time_steps)
# time steps to be calculated (list or range)
ts_variables["time_steps"] = time_steps
# If True, a diverged run is ignored and the next step is calculated
ts_variables["continue_on_divergence"] = continue_on_divergence
# print settings
ts_variables["verbose"] = verbose
if logger.level != 10 and verbose:
# simple progress bar
print_progress_bar(0, len(time_steps), prefix='Progress:', suffix='Complete', length=50)
return ts_variables
def run_timeseries(multinet, time_steps=None, continue_on_divergence=False,
verbose=True, **kwargs):
"""
Time Series main function.
Runs multiple run functions for each net in multinet. Within each time step several controller loops are conducted
till all controllers and each net is converged.
A normal pp.runpp/pps.pipeflow can be optionally replaced by other run functions by setting the run function in
kwargs.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_steps: the number of times a time series calculation shall be conducted
:type time_steps: sequence of array_like, default: None
:param continue_on_divergence: What to do if loadflow/pipeflow is not converging, fires control_repair
:type continue_on_divergence: bool, default: False
:param verbose: prints progess bar or logger debug messages
:type verbose: bool, default: True
:param kwargs: additional keyword arguments handed to each run function
:type kwargs: dict
:return: runs the time series loop
:rtype: None
"""
ts_variables = init_time_series(multinet, time_steps, continue_on_divergence, verbose, **kwargs)
for net_name in multinet['nets'].keys():
control_diagnostic(multinet['nets'][net_name])
run_loop(multinet, ts_variables, run_control, _call_output_writer, **kwargs)
# cleanup functions after the last time step was calculated
for net_name in multinet['nets'].keys():
cleanup(ts_variables[net_name])
|
the-stack_106_31836 | import torch
import torch.nn as nn
import ops.create5Dimages as create5D
import ops.create4Dimages as create4D
# 新加模块
class Self_Attn(nn.Module):
"""Self attention Layer"""
# (2048,4,16,7,7,4096) (2048,4,16,7,7,256)
# 调用:self.Attention = attention.Self_Attn(2048,4,16,7,7,256)
def __init__(self, in_dim, batch_size, num_frames, width, height, channels):
super(Self_Attn, self).__init__()
self.chanel_in = in_dim
self.r = 8
self.conv1 = nn.Conv2d(in_channels=self.chanel_in, out_channels=self.chanel_in // self.r, kernel_size=1)
self.conv2 = nn.Conv2d(in_channels=self.chanel_in, out_channels=self.chanel_in // self.r, kernel_size=1)
self.conv3 = nn.Conv2d(in_channels=self.chanel_in, out_channels=self.chanel_in // self.r, kernel_size=1)
self.value_conv = nn.Conv3d(in_channels=self.chanel_in // self.r, out_channels=self.chanel_in, kernel_size=1,
stride=1, padding=0, dilation=1, groups=1, bias=True)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
self.batch_size = batch_size
self.num_frames = num_frames
self.width = width
self.height = height
self.channels = channels
def forward(self, x):
# [64,2048,7,7]
# 此处的x是不带位置编码的,并且没有经过自注意力运算,是最原始的输入x
temp = x
'''如果是先加PE再处理C,残差的是带位置编码,没有经过学习的feature map'''
'''此时,第二种方案就是先处理C,然后再添加PE,残差的是不带位置编码,没有经过学习的feature map,并且qkv单独编码'''
# Q
q = self.conv1(x) # [64,4096,7,7] [64,512,7,7] [64,1024,7,7] [64,4096,7,7]
# print('q.shape = ',q.shape) #[64,4096,7,7]
x1 = q[0:4, :, :, :].view(1, self.num_frames, self.channels, self.width,
self.height) # [1,16,256,7,7] [1,16,4096,7,7]
x2 = q[4:8, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
x3 = q[8:12, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
x4 = q[12:16, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
q = torch.cat([x1, x2, x3, x4], dim=0).permute(0, 1, 3, 4, 2) # [4,16,256,7,7]->[4,16,7,7,256]
q = q.reshape(self.batch_size, self.num_frames * self.width * self.height, self.channels)
# q.shape = [B,THW,256] = [4,784,256]
# print(q.shape,'q')
# K
k = self.conv2(x)
x1 = k[0:4, :, :, :].view(1, self.num_frames, self.channels, self.width,
self.height) # [1,16,256,7,7] [1,16,4096,7,7]
x2 = k[4:8, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
x3 = k[8:12, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
x4 = k[12:16, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
k = torch.cat([x1, x2, x3, x4], dim=0).permute(0, 1, 3, 4, 2) # [4,16,256,7,7]->[4,16,7,7,256]
k = k.permute(0, 2, 1, 3, 4).reshape(self.batch_size, self.channels, self.num_frames * self.width * self.height)
# k.shape = [B,256,THW] = [4,256,784]
# print(k.shape,'k')
# V
v = self.conv3(x) # [64,256,7,7]
x1 = v[0:4, :, :, :].view(1, self.num_frames, self.channels, self.width,
self.height) # [1,16,256,7,7] [1,16,4096,7,7]
x2 = v[4:8, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
x3 = v[8:12, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
x4 = v[12:16, :, :, :].view(1, self.num_frames, self.channels, self.width, self.height)
v = torch.cat([x1, x2, x3, x4], dim=0).permute(0, 1, 3, 4, 2) # [4,16,256,7,7]->[4,16,7,7,256]
v = v.reshape(self.batch_size, self.num_frames * self.width * self.height, self.channels)
# q.shape = [B,THW,256] = [4,784,256]
# print(v.shape,'er')
# Q*K^T
# print(q.shape)
# print(k.shape)
energy = torch.bmm(q, k) # energy.shape = [B,THW,THW] = [4,784,784]
# softmax(Q*K^T)
attention = self.softmax(energy) # attentio.shape = [B,THW,THW] = [4,784,784]
# softmax(Q*K^T)*V
out = torch.bmm(attention, v).view(self.batch_size, self.num_frames, self.width, self.height,
self.channels).permute(0, 4, 1, 2, 3)
# out.shape = [B,THW,256] = [4,784,256] -> [4,256,16,7,7]
out = self.value_conv(out).permute(0, 2, 1, 3, 4) # [4,2048,16,7,7] -> [4,16,2048,7,7]
out1 = out[0:1, :, :, :, :].view(self.num_frames, self.chanel_in, self.width,
self.height) # [1,16,2048,7,7] [1,16,4096,7,7]
out2 = out[1:2, :, :, :, :].view(self.num_frames, self.chanel_in, self.width, self.height)
out3 = out[2:3, :, :, :, :].view(self.num_frames, self.chanel_in, self.width, self.height)
out4 = out[3:4, :, :, :, :].view(self.num_frames, self.chanel_in, self.width, self.height)
out = torch.cat([out1, out2, out3, out4], dim=0) # [64,2048,7,7]
out = self.gamma * out + temp
#print(out.shape,'......')[16,2048,7,7]
return out |
the-stack_106_31839 | import os
import tensorflow as tf
from model import get_model
from dataset import dataset
def main():
"""
Get the dataset, model
Set the callback
Train and save the best weights based on validation accuracy
"""
train_images, train_labels, test_images, test_labels = dataset()
model = get_model()
model.summary()
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True)
# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=100,
validation_data=(test_images, test_labels),
callbacks=[cp_callback],
verbose=2)
if __name__ == '__main__':
main()
|
the-stack_106_31840 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This is a socket implementation of http server
'''
# Import socket module
from socket import socket
from socket import AF_INET
from socket import SOCK_STREAM
from socket import SOL_SOCKET, SO_REUSEADDR
from wsgiref.handlers import format_date_time
from datetime import datetime
from time import mktime
import os
# Put in your codes here to create a TCP sever socket
# and bind it to your server address and port number
HOST, PORT_NUM = "0.0.0.0", 4000
CLRF = '\r\n'
class Request(object):
'''
A simple http request object"
'''
def __init__(self, raw_request):
self._raw_request = raw_request
self._method, self._path, self._protocol, self._headers = self.parse_request()
def getpath(self):
'''
return the url path
'''
if self._path == "/":
# default index.html
self._path = "/index.html"
return self._path, self.start_byte
def parse_request(self):
'''
trun request into structured data
'''
temp = [i.strip() for i in self._raw_request.splitlines()]
self.start_byte = -1
# print(temp)
if -1 == temp[0].find('HTTP'):
raise Exception('Incorrect Protocol')
# Extract the range part of http request
if -1 != self._raw_request.find('\r\nRange'):
import re
# print(self._raw_request)
# r"Range: bytes=(\d*)-(\d*)"
re_result = re.search(
r"Range: bytes=(\d*)-(\d*)", self._raw_request)
# print(re_result)
if re_result != None:
# print(re_result.group(1), re_result.group(2))
self.start_byte = int(re_result.group(1))
# Figure out our request method, path, and which version of HTTP we're using
method, path, protocol = [i.strip() for i in temp[0].split()]
# Create the headers, but only if we have a GET reqeust
headers = {}
if method == "GET":
for k, value in [i.split(':', 1) for i in temp[1:-1]]:
headers[k.strip()] = value.strip()
else:
raise Exception('Only accepts GET requests')
return method, path, protocol, headers
def __repr__(self):
return repr({'method': self._method, 'path': self._path,
'protocol': self._protocol, 'headers': self._headers})
class Response(object):
'''
Process the response of http
'''
def __init__(self, filedes):
self.status = 200
self.offset = 0
# Range request and partial responce if filedes>=0
if filedes[1] >= 0:
self.status = 206
self.offset = filedes[1]
try:
self.file = open('.' + filedes[0], mode='rb')
self.filename = '.' + filedes[0]
except IOError:
self.status = 404
self.file = open('./Err404.html', mode='rb')
self.filename = './Err404.html'
finally:
self.filelen = int(os.stat(self.filename).st_size)
# print(self.filelen, self.not_found)
def get_resp_header(self):
'''
return the http header
'''
# get fommated time
now = datetime.now()
stamp = mktime(now.timetuple())
timestr = format_date_time(stamp)
# 404 header
if self.status == 404:
header = "HTTP/1.1 404 Not Found\r\n" + \
"Server: nginx\r\n" +\
"Date: %s\r\n" % timestr +\
"Content-Type: text/html\r\n" +\
"Content-Length: %d\r\n" % self.filelen +\
"Connection: keep-alive\r\n\r\n"
return header
if self.status == 200:
# 200 OK header
header = "HTTP/1.1 200 OK\r\n" +\
"Date: %s\r\n" % timestr +\
"Server: nginx\r\n" +\
"Last-Modified: %s\r\n" % timestr +\
"Accept-Ranges: bytes\r\n" +\
"Content-Length: %d\r\n" % self.filelen +\
"Keep-Alive: timeout=5, max=100\r\n" +\
"Connection: Keep-Alive\r\n" +\
"Content-Type: %s; charset=UTF-8\r\n\r\n" % self.get_content_type()
return header
if self.status == 206:
header = "HTTP/1.1 206 Partial Content\r\n" +\
"Accept-Ranges: bytes\r\n" +\
"Content-Range: bytes %d-%d/%d\r\n" \
% (self.offset, self.filelen - 1, self.filelen) +\
"Content-Length: %d\r\n" % (self.filelen - self.offset) +\
"Content-Type: %s\r\n\r\n" % self.get_content_type()
return header
def get_content_type(self):
'''
Use built in function to get filetype and map them
'''
_, extension = os.path.splitext(self.filename)
mapping = {'html': 'text/html', 'htm': 'text/html', 'txt': 'text/plain',
'mp4': 'video/mp4', 'ogg': 'audio/ogg', 'mp3': 'audio/mpeg', 'jpg': 'image/jpeg'}
# print(extension)
if extension[1:] in mapping.keys():
return mapping[extension[1:]]
else:
return 'text/plain'
def send_file(self, connection):
'''
send the main body
'''
# Send HTTP content body
if self.offset <= self.filelen:
self.file.seek(self.offset)
else:
return
buff = self.file.read(1024)
total = 0
while buff:
total += len(buff)
try:
connection.send(buff)
except BrokenPipeError as err:
# print("Detected remote disconnect", err)
break
buff = self.file.read(1024)
# print(total, self.filename)
self.file.close()
return
def main():
'''
main entrance
'''
# (SOCK_STREAM) is used for TCP
server_socket = socket(AF_INET, SOCK_STREAM)
try:
# Bind the socket to server address and server port
#server_socket.bind((HOST, PORT_NUM))
server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT_NUM))
server_socket.listen(10)
except OSError:
print("Port number in use. Exiting....")
exit()
# Server should be up and running and listening to the incoming connections
try:
while True:
connection_socket = None
# If an exception occurs during the execution of try clause
# the rest of the clause is skipped
# If the exception type matches the word after except
# the except clause is executed
try:
print('Ready to serve...')
# Set up a new connection from the client
connection_socket, addr = server_socket.accept()
# Receives the request message from the client
# connection-oriented
# For best match with hardware and network realities
# the value of bufsize should be a relatively small power of 2
# for example, 4096.
http_request = connection_socket.recv(1024).decode()
if not http_request:
raise OSError
# print(http_request)
# print(http_request)
http_request = Request(http_request)
# print(repr(http_request), http_request.getpath())
resp = Response(http_request.getpath())
print("Request from %s, path: %s" %
(addr, http_request.getpath()))
header = resp.get_resp_header()
# print(header)
connection_socket.send(header.encode())
resp.send_file(connection_socket)
except OSError:
print("Detect error (Connection closed)")
finally:
if connection_socket:
connection_socket.close()
finally:
# Put your code here to close the socket
server_socket.close()
if __name__ == "__main__":
main()
|
the-stack_106_31841 | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for vcf_reader CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from third_party.nucleus.io.python import vcf_reader
from third_party.nucleus.protos import reference_pb2
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import ranges
expected_sites_contigs = [
reference_pb2.ContigInfo(name='chr1', pos_in_fasta=0, n_bases=248956422),
reference_pb2.ContigInfo(name='chr2', pos_in_fasta=1, n_bases=242193529),
reference_pb2.ContigInfo(name='chr3', pos_in_fasta=2, n_bases=198295559),
reference_pb2.ContigInfo(name='chr4', pos_in_fasta=3, n_bases=190214555),
reference_pb2.ContigInfo(name='chr5', pos_in_fasta=4, n_bases=181538259),
reference_pb2.ContigInfo(name='chr6', pos_in_fasta=5, n_bases=170805979),
reference_pb2.ContigInfo(name='chr7', pos_in_fasta=6, n_bases=159345973),
reference_pb2.ContigInfo(name='chr8', pos_in_fasta=7, n_bases=145138636),
reference_pb2.ContigInfo(name='chr9', pos_in_fasta=8, n_bases=138394717),
reference_pb2.ContigInfo(name='chr10', pos_in_fasta=9, n_bases=133797422),
reference_pb2.ContigInfo(name='chr11', pos_in_fasta=10, n_bases=135086622),
reference_pb2.ContigInfo(name='chr12', pos_in_fasta=11, n_bases=133275309),
reference_pb2.ContigInfo(name='chr13', pos_in_fasta=12, n_bases=114364328),
reference_pb2.ContigInfo(name='chr14', pos_in_fasta=13, n_bases=107043718),
reference_pb2.ContigInfo(name='chr15', pos_in_fasta=14, n_bases=101991189),
reference_pb2.ContigInfo(name='chr16', pos_in_fasta=15, n_bases=90338345),
reference_pb2.ContigInfo(name='chr17', pos_in_fasta=16, n_bases=83257441),
reference_pb2.ContigInfo(name='chr18', pos_in_fasta=17, n_bases=80373285),
reference_pb2.ContigInfo(name='chr19', pos_in_fasta=18, n_bases=58617616),
reference_pb2.ContigInfo(name='chr20', pos_in_fasta=19, n_bases=64444167),
reference_pb2.ContigInfo(name='chr21', pos_in_fasta=20, n_bases=46709983),
reference_pb2.ContigInfo(name='chr22', pos_in_fasta=21, n_bases=50818468),
reference_pb2.ContigInfo(name='chrX', pos_in_fasta=22, n_bases=156040895),
reference_pb2.ContigInfo(name='chrY', pos_in_fasta=23, n_bases=57227415),
reference_pb2.ContigInfo(name='chrM', pos_in_fasta=24, n_bases=16569),
]
# pylint: disable=line-too-long
expected_samples_filters = [
variants_pb2.VcfFilterInfo(id='PASS', description='All filters passed'),
variants_pb2.VcfFilterInfo(id='LowQual', description='Low quality'),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL95.00to96.00',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: 0.9364 <= x < 1.0415'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL96.00to97.00',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: 0.8135 <= x < 0.9364'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL97.00to99.00',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: 0.323 <= x < 0.8135'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.00to99.50',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: -0.1071 <= x < 0.323'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.50to99.90',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: -1.845 <= x < -0.1071'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.90to99.95',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: -3.2441 <= x < -1.845'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.95to100.00+',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod < -57172.0693'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.95to100.00',
description=
'Truth sensitivity tranche level for INDEL model at VQS Lod: -57172.0693 <= x < -3.2441'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.50to99.60',
description=
'Truth sensitivity tranche level for SNP model at VQS Lod: -0.751 <= x < -0.6681'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.60to99.80',
description=
'Truth sensitivity tranche level for SNP model at VQS Lod: -1.0839 <= x < -0.751'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.80to99.90',
description=
'Truth sensitivity tranche level for SNP model at VQS Lod: -1.7082 <= x < -1.0839'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.90to99.95',
description=
'Truth sensitivity tranche level for SNP model at VQS Lod: -3.0342 <= x < -1.7082'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.95to100.00+',
description=
'Truth sensitivity tranche level for SNP model at VQS Lod < -40235.9641'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.95to100.00',
description=
'Truth sensitivity tranche level for SNP model at VQS Lod: -40235.9641 <= x < -3.0342'
)
]
# pylint: enable=line-too-long
class WrapVcfReaderTests(absltest.TestCase):
def setUp(self):
self.sites_vcf = test_utils.genomics_core_testdata('test_sites.vcf')
self.samples_vcf = test_utils.genomics_core_testdata('test_samples.vcf.gz')
self.options = variants_pb2.VcfReaderOptions()
self.sites_reader = vcf_reader.VcfReader.from_file(self.sites_vcf,
self.options)
self.samples_reader = vcf_reader.VcfReader.from_file(
self.samples_vcf, self.options)
def test_vcf_iterate(self):
iterable = self.sites_reader.iterate()
self.assertEqual(test_utils.iterable_len(iterable), 5)
def test_vcf_header(self):
header = self.sites_reader.header
expected1 = variants_pb2.VcfStructuredExtra(
key='ALT',
fields=[
variants_pb2.VcfExtra(key='ID', value='NON_REF'),
variants_pb2.VcfExtra(
key='Description',
value='Represents any possible alternative allele at th'
'is location')
])
expected2 = variants_pb2.VcfStructuredExtra(
key='META',
fields=[
variants_pb2.VcfExtra(key='ID', value='TESTMETA'),
variants_pb2.VcfExtra(key='Description', value='blah')
])
self.assertLen(header.structured_extras, 2)
self.assertEqual(header.structured_extras[1], expected2)
self.assertEqual(header.structured_extras[0], expected1)
def test_vcf_contigs(self):
self.assertEqual(expected_sites_contigs,
list(self.sites_reader.header.contigs))
def test_vcf_filters(self):
self.assertEqual(expected_samples_filters,
list(self.samples_reader.header.filters))
def test_vcf_samples(self):
self.assertEqual(list(self.sites_reader.header.sample_names), [])
self.assertEqual(
list(self.samples_reader.header.sample_names), ['NA12878_18_99'])
def test_vcf_query(self):
range1 = ranges.parse_literal('chr3:100,000-500,000')
iterable = self.samples_reader.query(range1)
self.assertEqual(test_utils.iterable_len(iterable), 4)
def test_vcf_from_string(self):
v = self.samples_reader.from_string(
'chr3\t370537\trs142286746\tC\tCA,CAA\t350.73\tPASS\t'
'AC=1,1;AF=0.500,0.500;AN=2;DB;DP=16;ExcessHet=3.0103;'
'FS=0.000;MLEAC=1,1;MLEAF=0.500,0.500;MQ=60.00;QD=26.98;'
'SOR=1.179;VQSLOD=2.88;culprit=FS\tGT:AD:DP:GQ:PL\t'
'1/2:0,6,7:13:99:388,188,149,140,0,116')
self.assertEqual(v.reference_name, 'chr3')
self.assertEqual(v.start, 370536)
self.assertEqual(list(v.names), ['rs142286746'])
self.assertEqual(v.reference_bases, 'C')
self.assertEqual(list(v.alternate_bases), ['CA', 'CAA'])
self.assertEqual(len(v.calls), 1)
def test_vcf_from_string_raises_on_bad_input(self):
with self.assertRaises(ValueError):
self.samples_reader.from_string('BAD NOT A VCF RECORD\n;;')
def test_from_file_raises_with_missing_source(self):
with self.assertRaisesRegexp(ValueError,
'Not found: Could not open missing.vcf'):
vcf_reader.VcfReader.from_file('missing.vcf', self.options)
def test_ops_on_closed_reader_raise(self):
with self.samples_reader:
pass
# At this point the reader is closed.
with self.assertRaisesRegexp(ValueError, 'Cannot Iterate a closed'):
self.samples_reader.iterate()
with self.assertRaisesRegexp(ValueError, 'Cannot Query a closed'):
self.samples_reader.query(
ranges.parse_literal('chr1:10,000,000-10,000,100'))
def test_query_on_unindexed_reader_raises(self):
window = ranges.parse_literal('chr1:10,000,000-10,000,100')
unindexed_file = test_utils.genomics_core_testdata('test_samples.vcf')
with vcf_reader.VcfReader.from_file(unindexed_file, self.options) as reader:
with self.assertRaisesRegexp(ValueError, 'Cannot query without an index'):
reader.query(window)
def test_query_raises_with_bad_range(self):
with self.assertRaisesRegexp(ValueError, 'Unknown reference_name'):
self.samples_reader.query(ranges.parse_literal('XXX:1-10'))
with self.assertRaisesRegexp(ValueError, 'Malformed region'):
self.samples_reader.query(ranges.parse_literal('chr1:0-5'))
with self.assertRaisesRegexp(ValueError, 'Malformed region'):
self.samples_reader.query(ranges.parse_literal('chr1:6-5'))
with self.assertRaisesRegexp(ValueError, 'Malformed region'):
self.samples_reader.query(ranges.parse_literal('chr1:10-5'))
def test_context_manager(self):
with vcf_reader.VcfReader.from_file(self.sites_vcf, self.options) as f:
self.assertEqual(expected_sites_contigs, list(f.header.contigs))
# Commented out because we in fact don't detect the malformed VCF yet. It is
# unclear if it's even possible to detect the issue with the API provided by
# htslib.
# def test_vcf_iterate_raises_on_malformed_record(self):
# malformed = test_utils.genomics_core_testdata('malformed.vcf')
# reader = vcf_reader.VcfReader.from_file(malformed, self.unindexed_options)
# iterable = iter(reader.iterate())
# self.assertIsNotNone(next(iterable))
# with self.assertRaises(ValueError):
# print(list(iterable))
if __name__ == '__main__':
absltest.main()
|
the-stack_106_31842 | import os
import struct
import threading
from collections import namedtuple
from io import BytesIO
from tarfile import TarFile, TarInfo
from c3nav.mapdata.utils.cache import AccessRestrictionAffected, GeometryIndexed, MapHistory
CachePackageLevel = namedtuple('CachePackageLevel', ('history', 'restrictions'))
class CachePackage:
def __init__(self, bounds, levels=None):
self.bounds = bounds
self.levels = {} if levels is None else levels
def add_level(self, level_id: int, history: MapHistory, restrictions: AccessRestrictionAffected):
self.levels[level_id] = CachePackageLevel(history, restrictions)
def save(self, filename=None, compression=None):
if filename is None:
from django.conf import settings
filename = os.path.join(settings.CACHE_ROOT, 'package.tar')
if compression is not None:
filename += '.' + compression
filemode = 'w'
if compression is not None:
filemode += ':' + compression
with TarFile.open(filename, filemode) as f:
self._add_bytesio(f, 'bounds', BytesIO(struct.pack('<iiii', *(int(i*100) for i in self.bounds))))
for level_id, level_data in self.levels.items():
self._add_geometryindexed(f, 'history_%d' % level_id, level_data.history)
self._add_geometryindexed(f, 'restrictions_%d' % level_id, level_data.restrictions)
def _add_bytesio(self, f: TarFile, filename: str, data: BytesIO):
data.seek(0, os.SEEK_END)
tarinfo = TarInfo(name=filename)
tarinfo.size = data.tell()
data.seek(0)
f.addfile(tarinfo, data)
def _add_geometryindexed(self, f: TarFile, filename: str, obj: GeometryIndexed):
data = BytesIO()
obj.write(data)
self._add_bytesio(f, filename, data)
def save_all(self, filename=None):
for compression in (None, 'gz', 'xz'):
self.save(filename, compression)
@classmethod
def read(cls, f):
f = TarFile.open(fileobj=f)
files = {info.name: info for info in f.getmembers()}
bounds = tuple(i/100 for i in struct.unpack('<iiii', f.extractfile(files['bounds']).read()))
levels = {}
for filename in files:
if not filename.startswith('history_'):
continue
level_id = int(filename[8:])
levels[level_id] = CachePackageLevel(
history=MapHistory.read(f.extractfile(files['history_%d' % level_id])),
restrictions=AccessRestrictionAffected.read(f.extractfile(files['restrictions_%d' % level_id]))
)
return cls(bounds, levels)
@classmethod
def open(cls, filename=None):
if filename is None:
from django.conf import settings
filename = os.path.join(settings.CACHE_ROOT, 'package.tar')
return cls.read(open(filename, 'rb'))
cached = None
cache_key = None
cache_lock = threading.Lock()
@classmethod
def open_cached(cls):
with cls.cache_lock:
from c3nav.mapdata.models import MapUpdate
cache_key = MapUpdate.current_processed_cache_key()
if cls.cache_key != cache_key:
cls.cache_key = cache_key
cls.cached = None
if cls.cached is None:
cls.cached = cls.open()
return cls.cached
def bounds_valid(self, minx, miny, maxx, maxy):
return (minx <= self.bounds[2] and maxx >= self.bounds[0] and
miny <= self.bounds[3] and maxy >= self.bounds[1])
|
the-stack_106_31843 | from abc import ABC
import tensorflow as tf
print(tf.__version__)
class Controller(tf.keras.layers.Layer):
def __init__(self, init):
super(Controller, self).__init__(name='Controller')
self.w = tf.Variable(initial_value=init, dtype='float32', trainable=True)
def call(self, Model, x):
y = Model(x, tf.math.tanh(self.w))
return y
# resolution^2 deve essere divisibile per num_heads
class LHC_Module(tf.keras.layers.Layer):
def __init__(self, pool_size, head_emb_dim, num_heads, num_channels, resolution, kernel_size, norm_c, name):
super(LHC_Module, self).__init__()
self.pool_size = pool_size
self.head_emb_dim = head_emb_dim
self.num_heads = num_heads
self.num_channels = num_channels
self.resolution = resolution
self.kernel_size = kernel_size
self.norm_c = norm_c
self.Poolq = tf.keras.layers.AvgPool2D(pool_size=(self.pool_size, self.pool_size),
strides=(1, 1),
padding='same')
self.Poolk = tf.keras.layers.MaxPool2D(pool_size=(self.pool_size, self.pool_size),
strides=(1, 1),
padding='same')
self.Wqk = [tf.keras.layers.Dense(units=self.head_emb_dim, activation='linear') for _ in range(self.num_heads)]
self.Wp = tf.keras.layers.Dense(units=self.num_channels, activation='sigmoid')
self.Wv = tf.keras.layers.Conv2D(filters=self.num_channels,
kernel_size=self.kernel_size,
strides=(1, 1),
padding='same',
activation='linear')
self.Poolv = tf.keras.layers.AvgPool2D(pool_size=(3, 3),
strides=(1, 1),
padding='same')
self.sum = tf.keras.layers.Add()
self.Name_1_ = 'LHC_1_'+name
def VectScaledDotProdAttention(self, query, key, value):
scores = tf.linalg.matmul(query, key, transpose_b=True) # (batch_size, num_heads, num_channels, num_channels)
scores_p = tf.math.reduce_mean(scores, axis=3) # (batch_size, num_heads, num_channels)
scores_p = self.Wp(scores_p) # (batch_size, num_heads, num_channels)
scores_p = tf.expand_dims(scores_p, axis=-1) # (batch_size, num_heads, num_channels, 1)
norm_scores = tf.math.divide(scores, tf.math.pow(tf.dtypes.cast(key.shape[3], tf.float32), self.norm_c + scores_p)) # (batch_size, num_heads, num_channels, num_channels)
weights = tf.nn.softmax(norm_scores, axis=3) # (batch_size, num_heads, num_channels, num_channels)
attentions = tf.linalg.matmul(weights, value) # (batch_size, num_heads, num_channels, head_res_dim)
return attentions
def call(self, x, weight_att=0):
batch_size = tf.shape(x)[0]
num_channels = self.num_channels
resolution = self.resolution
head_res_dim = (resolution * resolution) // self.num_heads
query = x # (batch_size, resolution, resolution, num_channels)
query = self.Poolq(query) # (batch_size, resolution, resolution, num_channels)
query = tf.reshape(query, shape=(batch_size, resolution * resolution, num_channels)) # (batch_size, resolution^2, num_channels)
query = tf.transpose(query, perm=[0, 2, 1]) # (batch_size, num_channels, resolution^2)
query = tf.reshape(query, shape=(batch_size, num_channels, self.num_heads, head_res_dim)) # (batch_size, num_channels, num_heads, head_res_dim)
query = tf.transpose(query, perm=[0, 2, 1, 3]) # (batch_size, num_heads, num_channels, head_res_dim)
q = [None] * self.num_heads
for i in range(self.num_heads):
q[i] = self.Wqk[i](query[:, i, :, :]) # (batch_size, num_channels, head_emb_dim)
q[i] = tf.expand_dims(q[i], axis=1) # (batch_size, 1, num_channels, head_emb_dim)
query = tf.concat(q, axis=1) # (batch_size, num_heads, num_channels, head_emb_dim)
key = x # (batch_size, resolution, resolution, num_channels)
key = self.Poolk(key) # (batch_size, resolution, resolution, num_channels)
key = tf.reshape(key, shape=(batch_size, resolution * resolution, num_channels)) # (batch_size, resolution^2, num_channels)
key = tf.transpose(key, perm=[0, 2, 1]) # (batch_size, num_channels, resolution^2)
key = tf.reshape(key, shape=(batch_size, num_channels, self.num_heads, head_res_dim)) # (batch_size, num_channels, num_heads, head_res_dim)
key = tf.transpose(key, perm=[0, 2, 1, 3]) # (batch_size, num_heads, num_channels, head_res_dim)
k = [None] * self.num_heads
for i in range(self.num_heads):
k[i] = self.Wqk[i](key[:, i, :, :]) # (batch_size, num_channels, head_emb_dim)
k[i] = tf.expand_dims(k[i], axis=1) # (batch_size, 1, num_channels, head_emb_dim)
key = tf.concat(k, axis=1) # (batch_size, num_heads, num_channels, head_emb_dim)
value = self.Wv(x) # (batch_size, resolution, resolution, num_channels)
value = self.Poolv(value) # (batch_size, resolution, resolution, num_channels)
value = tf.reshape(value, shape=(batch_size, resolution * resolution, num_channels)) # (batch_size, resolution^2, num_channels)
value = tf.transpose(value, perm=[0, 2, 1]) # (batch_size, num_channels, resolution^2)
value = tf.reshape(value, shape=(batch_size, num_channels, self.num_heads, head_res_dim)) # (batch_size, num_channels, num_heads, head_res_dim)
value = tf.transpose(value, perm=[0, 2, 1, 3]) # (batch_size, num_heads, num_channels, head_res_dim)
attentions = self.VectScaledDotProdAttention(query, key, value) # (batch_size, num_heads, num_channels, head_res_dim)
attentions = tf.transpose(attentions, perm=[0, 2, 1, 3]) # (batch_size, num_channels, num_heads, head_res_dim)
attention = tf.reshape(attentions, shape=(batch_size, num_channels, resolution * resolution)) # (batch_size, num_channels, resolution^2)
attention = tf.transpose(attention, perm=[0, 2, 1]) # (batch_size, resolution^2, num_channels)
attention = tf.reshape(attention, shape=(batch_size, resolution, resolution, num_channels)) # (batch_size, resolution, resolution, num_channels)
out = self.sum([x, attention*(1 + weight_att)]) # (batch_size, resolution, resolution, num_channels)
return out
class LHCResBlockSmall(tf.keras.Model, ABC):
def __init__(self, filters, kernels, strides, identity, resolution, att_num_channel, num_heads, att_embed_dim, att_kernel_size, pool_size, norm_c, name):
super(LHCResBlockSmall, self).__init__(name='LHCResBlockSmall')
self.Identity = identity
self.bn1 = tf.keras.layers.BatchNormalization(epsilon=2e-05, name=name+"_BN1")
self.relu1 = tf.keras.layers.Activation(activation='relu', name=name+"_Relu1")
self.pad1 = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name+'_Padding1')
self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels[0], strides=strides[0], padding='valid', activation='linear', use_bias=False, name=name+'_Conv1')
self.bn2 = tf.keras.layers.BatchNormalization(epsilon=2e-05, name=name+"_BN2")
self.relu2 = tf.keras.layers.Activation(activation='relu', name=name+"_Relu2")
self.pad2 = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name+'_Padding2')
self.conv2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels[1], strides=strides[1], padding='valid', activation='linear', use_bias=False, name=name+'_Conv2')
if self.Identity:
self.convId = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels[2], strides=strides[2], padding='valid', activation='linear', use_bias=False, name=name+'_ConvId')
self.LHC_Module = LHC_Module(pool_size=pool_size,
resolution=resolution,
num_channels=att_num_channel,
num_heads=num_heads,
head_emb_dim=att_embed_dim,
kernel_size=att_kernel_size,
norm_c=norm_c,
name=name)
self.add = tf.keras.layers.Add()
def call(self, x, weight_att):
if self.Identity:
y = self.bn1(x)
y = self.relu1(y)
xb = y
y = self.pad1(y)
y = self.conv1(y)
y = self.bn2(y)
y = self.relu2(y)
y = self.pad2(y)
y = self.conv2(y)
y2 = self.convId(xb)
y = self.add([y, y2])
y = self.LHC_Module(y, weight_att)
return y
else:
y = self.bn1(x)
y = self.relu1(y)
y = self.pad1(y)
y = self.conv1(y)
y = self.bn2(y)
y = self.relu2(y)
y = self.pad2(y)
y = self.conv2(y)
y = self.add([y, x])
y = self.LHC_Module(y, weight_att)
return y
def import_w(self, layers):
for i in range(len(layers)):
for j in range(len(layers[i].weights)):
self.layers[i].weights[j].assign(layers[i].weights[j])
class LHCResBlockSmall0(tf.keras.Model, ABC):
def __init__(self, input_shape, resolution, att_num_channel, num_heads, att_embed_dim, att_kernel_size, pool_size, norm_c):
super(LHCResBlockSmall0, self).__init__(name='LHCResBlockSmall0')
self.Input = tf.keras.layers.InputLayer(input_shape=input_shape)
self.bn1 = tf.keras.layers.BatchNormalization(epsilon=2e-05, scale=False, name='Block0_BN1')
self.pad1 = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='Block0_Padding1')
self.conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), padding='valid', activation='linear', use_bias=False, name='Block0_Conv1')
self.bn2 = tf.keras.layers.BatchNormalization(epsilon=2e-05, name='Block0_BN2')
self.relu1 = tf.keras.layers.Activation(activation='relu', name='Block0_Relu1')
self.pad2 = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='Block0_Padding2')
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='Block0_MaxPool1')
self.LHC_Module = LHC_Module(pool_size=pool_size,
resolution=resolution,
num_channels=att_num_channel,
num_heads=num_heads,
head_emb_dim=att_embed_dim,
kernel_size=att_kernel_size,
norm_c=norm_c,
name='Module_0')
def call(self, x, weight_att):
x1 = self.Input(x)
x1 = self.bn1(x1)
x1 = self.pad1(x1)
x1 = self.conv1(x1)
x1 = self.bn2(x1)
x1 = self.relu1(x1)
x1 = self.pad2(x1)
x1 = self.pool1(x1)
x1 = self.LHC_Module(x1, weight_att)
return x1
def import_w(self, layers):
for i in range(len(layers)):
for j in range(len(layers[i].weights)):
self.layers[i].weights[j].assign(layers[i].weights[j])
class ResBlockSmall(tf.keras.Model, ABC):
def __init__(self, filters, kernels, strides, identity, name):
super(ResBlockSmall, self).__init__(name='ResBlockSmall')
self.Identity = identity
self.bn1 = tf.keras.layers.BatchNormalization(epsilon=2e-05, name=name+"_BN1")
self.relu1 = tf.keras.layers.Activation(activation='relu', name=name+"_Relu1")
self.pad1 = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name+'_Padding1')
self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels[0], strides=strides[0], padding='valid', activation='linear', use_bias=False, name=name+'_Conv1')
self.bn2 = tf.keras.layers.BatchNormalization(epsilon=2e-05, name=name+"_BN2")
self.relu2 = tf.keras.layers.Activation(activation='relu', name=name+"_Relu2")
self.pad2 = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name+'_Padding2')
self.conv2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels[1], strides=strides[1], padding='valid', activation='linear', use_bias=False, name=name+'_Conv2')
if self.Identity:
self.convId = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels[2], strides=strides[2], padding='valid', activation='linear', use_bias=False, name=name+'_ConvId')
self.add = tf.keras.layers.Add()
def call(self, x):
if self.Identity:
y = self.bn1(x)
y = self.relu1(y)
xb = y
y = self.pad1(y)
y = self.conv1(y)
y = self.bn2(y)
y = self.relu2(y)
y = self.pad2(y)
y = self.conv2(y)
y2 = self.convId(xb)
y = self.add([y, y2])
return y
else:
y = self.bn1(x)
y = self.relu1(y)
y = self.pad1(y)
y = self.conv1(y)
y = self.bn2(y)
y = self.relu2(y)
y = self.pad2(y)
y = self.conv2(y)
y = self.add([y, x])
return y
def import_w(self, layers):
for i in range(len(layers)):
for j in range(len(layers[i].weights)):
self.layers[i].weights[j].assign(layers[i].weights[j])
class LHC_ResNet34(tf.keras.Model, ABC):
def __init__(self, input_shape, num_classes, att_params, controller_init):
super(LHC_ResNet34, self).__init__(name='LHC_ResNet34')
self.bypass_controller = False
self.Input = tf.keras.layers.InputLayer(input_shape=input_shape)
self.conv1 = LHCResBlockSmall0(input_shape=input_shape,
resolution=56,
att_num_channel=64,
num_heads=att_params['num_heads'][0],
att_embed_dim=att_params['att_embed_dim'][0],
att_kernel_size=att_params['kernel_size'][0],
pool_size=att_params['pool_size'][0],
norm_c=att_params['norm_c'][0])
self.conv2_1 = ResBlockSmall(filters=64, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=True, name='stage1_unit1')
self.conv2_2 = ResBlockSmall(filters=64, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage1_unit2')
self.conv2_3 = LHCResBlockSmall(filters=64,
kernels=((3, 3), (3, 3), (1, 1)),
strides=((1, 1), (1, 1), (1, 1)),
identity=False,
resolution=56,
att_num_channel=64,
num_heads=att_params['num_heads'][1],
att_embed_dim=att_params['att_embed_dim'][1],
att_kernel_size=att_params['kernel_size'][1],
pool_size=att_params['pool_size'][1],
norm_c=att_params['norm_c'][1],
name='stage1_unit3')
self.conv3_1 = ResBlockSmall(filters=128, kernels=((3, 3), (3, 3), (1, 1)), strides=((2, 2), (1, 1), (2, 2)), identity=True, name='stage2_unit1')
self.conv3_2 = ResBlockSmall(filters=128, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage2_unit2')
self.conv3_3 = ResBlockSmall(filters=128, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage2_unit3')
self.conv3_4 = LHCResBlockSmall(filters=128,
kernels=((3, 3), (3, 3), (1, 1)),
strides=((1, 1), (1, 1), (1, 1)),
identity=False,
resolution=28,
att_num_channel=128,
num_heads=att_params['num_heads'][2],
att_embed_dim=att_params['att_embed_dim'][2],
att_kernel_size=att_params['kernel_size'][2],
pool_size=att_params['pool_size'][2],
norm_c=att_params['norm_c'][2],
name='stage2_unit4')
self.conv4_1 = ResBlockSmall(filters=256, kernels=((3, 3), (3, 3), (1, 1)), strides=((2, 2), (1, 1), (2, 2)), identity=True, name='stage3_unit1')
self.conv4_2 = ResBlockSmall(filters=256, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage3_unit2')
self.conv4_3 = ResBlockSmall(filters=256, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage3_unit3')
self.conv4_4 = ResBlockSmall(filters=256, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage3_unit4')
self.conv4_5 = ResBlockSmall(filters=256, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage3_unit5')
self.conv4_6 = LHCResBlockSmall(filters=256,
kernels=((3, 3), (3, 3), (1, 1)),
strides=((1, 1), (1, 1), (1, 1)),
identity=False,
resolution=14,
att_num_channel=256,
num_heads=att_params['num_heads'][3],
att_embed_dim=att_params['att_embed_dim'][3],
att_kernel_size=att_params['kernel_size'][3],
pool_size=att_params['pool_size'][3],
norm_c=att_params['norm_c'][3],
name='stage3_unit6')
self.conv5_1 = ResBlockSmall(filters=512, kernels=((3, 3), (3, 3), (1, 1)), strides=((2, 2), (1, 1), (2, 2)), identity=True, name='stage4_unit1')
self.conv5_2 = ResBlockSmall(filters=512, kernels=((3, 3), (3, 3), (1, 1)), strides=((1, 1), (1, 1), (1, 1)), identity=False, name='stage4_unit2')
self.conv5_3 = LHCResBlockSmall(filters=512,
kernels=((3, 3), (3, 3), (1, 1)),
strides=((1, 1), (1, 1), (1, 1)),
identity=False,
resolution=7,
att_num_channel=512,
num_heads=att_params['num_heads'][4],
att_embed_dim=att_params['att_embed_dim'][4],
att_kernel_size=att_params['kernel_size'][4],
pool_size=att_params['pool_size'][4],
norm_c=att_params['norm_c'][4],
name='stage4_unit3')
self.bn = tf.keras.layers.BatchNormalization(epsilon=2e-05, name='bn')
self.relu = tf.keras.layers.Activation(activation='relu', name='relu')
self.pool = tf.keras.layers.GlobalAveragePooling2D()
self.fc1 = tf.keras.layers.Dense(units=4096, activation='relu')
self.dp1 = tf.keras.layers.Dropout(0.4)
self.fc2 = tf.keras.layers.Dense(units=1024, activation='relu')
self.dp2 = tf.keras.layers.Dropout(0.4)
self.fc3 = tf.keras.layers.Dense(units=num_classes, activation='softmax')
# a = [0, -0.2, -0.4, -0.4, -1]
# a = [0, -0.2, -0.4, -0.4, -0.8]
# a = [0, -0.2, -0.4, -0.4, 0.2]
# a = [0, 0, 0, -1, 0]
self.controller1 = Controller(init=controller_init[0])
self.controller2 = Controller(init=controller_init[1])
self.controller3 = Controller(init=controller_init[2])
self.controller4 = Controller(init=controller_init[3])
self.controller5 = Controller(init=controller_init[4])
def import_w(self, model):
self.conv1.import_w(model.layers[1].layers[0:8])
self.conv2_1.import_w(model.layers[1].layers[8:18-1])
self.conv2_2.import_w(model.layers[1].layers[18:27-1])
self.conv2_3.import_w(model.layers[1].layers[27:36-1])
self.conv3_1.import_w(model.layers[1].layers[36:46-1])
self.conv3_2.import_w(model.layers[1].layers[46:55-1])
self.conv3_3.import_w(model.layers[1].layers[55:64-1])
self.conv3_4.import_w(model.layers[1].layers[64:73-1])
self.conv4_1.import_w(model.layers[1].layers[73:83-1])
self.conv4_2.import_w(model.layers[1].layers[83:92-1])
self.conv4_3.import_w(model.layers[1].layers[92:101-1])
self.conv4_4.import_w(model.layers[1].layers[101:110-1])
self.conv4_5.import_w(model.layers[1].layers[110:119-1])
self.conv4_6.import_w(model.layers[1].layers[119:128-1])
self.conv5_1.import_w(model.layers[1].layers[128:138-1])
self.conv5_2.import_w(model.layers[1].layers[138:147-1])
self.conv5_3.import_w(model.layers[1].layers[147:156-1])
for i in range(len(self.bn.weights)):
self.bn.weights[i].assign(model.layers[1].layers[156].weights[i])
for i in range(len(self.relu.weights)):
self.relu.weights[i].assign(model.layers[1].layers[157].weights[i])
for i in range(len(self.pool.weights)):
self.pool.weights[i].assign(model.layers[2].weights[i])
for i in range(len(self.fc1.weights)):
self.fc1.weights[i].assign(model.layers[3].weights[i])
for i in range(len(self.dp1.weights)):
self.dp1.weights[i].assign(model.layers[4].weights[i])
for i in range(len(self.fc2.weights)):
self.fc2.weights[i].assign(model.layers[5].weights[i])
for i in range(len(self.dp2.weights)):
self.dp2.weights[i].assign(model.layers[6].weights[i])
for i in range(len(self.fc3.weights)):
self.fc3.weights[i].assign(model.layers[7].weights[i])
def import_weights_from_lhc(self, model):
self.conv1.set_weights(model.conv1.get_weights())
self.conv2_1.set_weights(model.conv2_1.get_weights())
self.conv2_2.set_weights(model.conv2_2.get_weights())
self.conv2_3.set_weights(model.conv2_3.get_weights())
self.conv3_1.set_weights(model.conv3_1.get_weights())
self.conv3_2.set_weights(model.conv3_2.get_weights())
self.conv3_3.set_weights(model.conv3_3.get_weights())
self.conv3_4.set_weights(model.conv3_4.get_weights())
self.conv4_1.set_weights(model.conv4_1.get_weights())
self.conv4_2.set_weights(model.conv4_2.get_weights())
self.conv4_3.set_weights(model.conv4_3.get_weights())
self.conv4_4.set_weights(model.conv4_4.get_weights())
self.conv4_5.set_weights(model.conv4_5.get_weights())
self.conv4_6.set_weights(model.conv4_6.get_weights())
self.conv5_1.set_weights(model.conv5_1.get_weights())
self.conv5_2.set_weights(model.conv5_2.get_weights())
self.conv5_3.set_weights(model.conv5_3.get_weights())
self.bn.set_weights(model.bn.get_weights())
self.relu.set_weights(model.relu.get_weights())
self.pool.set_weights(model.pool.get_weights())
self.fc1.set_weights(model.fc1.get_weights())
self.dp1.set_weights(model.dp1.get_weights())
self.fc2.set_weights(model.fc2.get_weights())
self.dp2.set_weights(model.dp2.get_weights())
self.fc3.set_weights(model.fc3.get_weights())
def freeze_lhc(self):
self.conv1.trainable = False
self.conv2_1.trainable = False
self.conv2_2.trainable = False
self.conv2_3.trainable = False
self.conv3_1.trainable = False
self.conv3_2.trainable = False
self.conv3_3.trainable = False
self.conv3_4.trainable = False
self.conv4_1.trainable = False
self.conv4_2.trainable = False
self.conv4_3.trainable = False
self.conv4_4.trainable = False
self.conv4_5.trainable = False
self.conv4_6.trainable = False
self.conv5_1.trainable = False
self.conv5_2.trainable = False
self.conv5_3.trainable = False
self.bn.trainable = False
self.relu.trainable = False
self.pool.trainable = False
self.fc1.trainable = False
self.dp1.trainable = False
self.fc2.trainable = False
self.dp2.trainable = False
self.fc3.trainable = False
self.controller1.trainable = True
self.controller2.trainable = True
self.controller3.trainable = True
self.controller4.trainable = True
self.controller5.trainable = True
def call(self, x):
x = self.Input(x)
x = self.controller1(self.conv1, x)
x = self.conv2_1(x)
x = self.conv2_2(x)
x = self.controller2(self.conv2_3, x)
x = self.conv3_1(x)
x = self.conv3_2(x)
x = self.conv3_3(x)
x = self.controller3(self.conv3_4, x)
x = self.conv4_1(x)
x = self.conv4_2(x)
x = self.conv4_3(x)
x = self.conv4_4(x)
x = self.conv4_5(x)
x = self.controller4(self.conv4_6, x)
x = self.conv5_1(x)
x = self.conv5_2(x)
x = self.controller5(self.conv5_3, x)
x = self.bn(x)
x = self.relu(x)
x = self.pool(x)
x = self.fc1(x)
x = self.dp1(x)
x = self.fc2(x)
x = self.dp2(x)
x = self.fc3(x)
return x
|
the-stack_106_31844 | from collections import Iterable
def as_iterable(object):
if isinstance(object, Iterable):
return object
else:
return [object]
class Logger:
def __init__(self, source, processors, sink, timer):
self.source = source
self.processors = as_iterable(processors)
self.sink = sink
self.timer = timer
def run(self):
self.timer.reset()
success = True
while self.timer(success):
data = self.source.read()
for proc in self.processors:
data = proc(data)
success = self.sink.write(data)
def abort(self):
self.timer.abort()
|
the-stack_106_31846 | # -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
import sys
import os
import datetime
import logging
import math
import pickle
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import LAC
import numpy as np
from paddle import fluid
from paddle.fluid import dygraph
from paddle.fluid import layers
from ddparser.parser import epoch_train
from ddparser.parser import epoch_evaluate
from ddparser.parser import epoch_predict
from ddparser.parser import save
from ddparser.parser import load
from ddparser.parser import decode
from ddparser.parser import ArgConfig
from ddparser.parser import Environment
from ddparser.parser import Model
from ddparser.parser.data_struct import Corpus
from ddparser.parser.data_struct import TextDataset
from ddparser.parser.data_struct import batchify
from ddparser.parser.data_struct import Field
from ddparser.parser.data_struct import utils
from ddparser.parser.data_struct import Metric
"""
程序入口,定义了训练,评估,预测等函数
"""
def train(env):
"""Train"""
args = env.args
logging.info("loading data.")
train = Corpus.load(args.train_data_path, env.fields)
dev = Corpus.load(args.valid_data_path, env.fields)
test = Corpus.load(args.test_data_path, env.fields)
logging.info("init dataset.")
train = TextDataset(train, env.fields, args.buckets)
dev = TextDataset(dev, env.fields, args.buckets)
test = TextDataset(test, env.fields, args.buckets)
logging.info("set the data loaders.")
train.loader = batchify(train, args.batch_size, args.use_data_parallel,
True)
dev.loader = batchify(dev, args.batch_size)
test.loader = batchify(test, args.batch_size)
logging.info(f"{'train:':6} {len(train):5} sentences, "
f"{len(train.loader):3} batches, "
f"{len(train.buckets)} buckets")
logging.info(f"{'dev:':6} {len(dev):5} sentences, "
f"{len(dev.loader):3} batches, "
f"{len(train.buckets)} buckets")
logging.info(f"{'test:':6} {len(test):5} sentences, "
f"{len(test.loader):3} batches, "
f"{len(train.buckets)} buckets")
logging.info("Create the model")
model = Model(args, env.WORD.embed)
# init parallel strategy
if args.use_data_parallel:
strategy = dygraph.parallel.prepare_context()
model = dygraph.parallel.DataParallel(model, strategy)
if args.use_cuda:
grad_clip = fluid.clip.GradientClipByNorm(clip_norm=args.clip)
else:
grad_clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=args.clip)
decay = dygraph.ExponentialDecay(learning_rate=args.lr,
decay_steps=args.decay_steps,
decay_rate=args.decay)
optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=decay,
beta1=args.mu,
beta2=args.nu,
epsilon=args.epsilon,
parameter_list=model.parameters(),
grad_clip=grad_clip)
total_time = datetime.timedelta()
best_e, best_metric = 1, Metric()
puncts = dygraph.to_variable(env.puncts)
logging.info("start training.")
for epoch in range(1, args.epochs + 1):
start = datetime.datetime.now()
# train one epoch and update the parameter
logging.info(f"Epoch {epoch} / {args.epochs}:")
epoch_train(args, model, optimizer, train.loader, epoch)
if args.local_rank == 0:
loss, dev_metric = epoch_evaluate(args, model, dev.loader, puncts)
logging.info(f"{'dev:':6} Loss: {loss:.4f} {dev_metric}")
loss, test_metric = epoch_evaluate(args, model, test.loader,
puncts)
logging.info(f"{'test:':6} Loss: {loss:.4f} {test_metric}")
t = datetime.datetime.now() - start
# save the model if it is the best so far
if dev_metric > best_metric and epoch > args.patience // 10:
best_e, best_metric = epoch, dev_metric
save(args.model_path, args, model, optimizer)
logging.info(f"{t}s elapsed (saved)\n")
else:
logging.info(f"{t}s elapsed\n")
total_time += t
if epoch - best_e >= args.patience:
break
if args.local_rank == 0:
model = load(args.model_path)
loss, metric = epoch_evaluate(args, model, test.loader, puncts)
logging.info(
f"max score of dev is {best_metric.score:.2%} at epoch {best_e}")
logging.info(
f"the score of test at epoch {best_e} is {metric.score:.2%}")
logging.info(f"average time of each epoch is {total_time / epoch}s")
logging.info(f"{total_time}s elapsed")
def evaluate(env):
"""Evaluate"""
args = env.args
puncts = dygraph.to_variable(env.puncts)
logging.info("Load the dataset")
evaluates = Corpus.load(args.test_data_path, env.fields)
dataset = TextDataset(evaluates, env.fields, args.buckets)
# set the data loader
dataset.loader = batchify(dataset, args.batch_size)
logging.info(f"{len(dataset)} sentences, "
f"{len(dataset.loader)} batches, "
f"{len(dataset.buckets)} buckets")
logging.info("Load the model")
model = load(args.model_path)
logging.info("Evaluate the dataset")
start = datetime.datetime.now()
loss, metric = epoch_evaluate(args, model, dataset.loader, puncts)
total_time = datetime.datetime.now() - start
logging.info(f"Loss: {loss:.4f} {metric}")
logging.info(f"{total_time}s elapsed, "
f"{len(dataset) / total_time.total_seconds():.2f} Sents/s")
def predict(env):
"""Predict"""
args = env.args
logging.info("Load the dataset")
if args.prob:
env.fields = env.fields._replace(PHEAD=Field('prob'))
predicts = Corpus.load(args.infer_data_path, env.fields)
dataset = TextDataset(predicts, [env.WORD, env.FEAT], args.buckets)
# set the data loader
dataset.loader = batchify(dataset, args.batch_size)
logging.info(f"{len(dataset)} sentences, "
f"{len(dataset.loader)} batches")
logging.info("Load the model")
model = load(args.model_path)
model.args = args
logging.info("Make predictions on the dataset")
start = datetime.datetime.now()
pred_arcs, pred_rels, pred_probs = epoch_predict(env, args, model,
dataset.loader)
total_time = datetime.datetime.now() - start
# restore the order of sentences in the buckets
indices = np.argsort(
np.array([i for bucket in dataset.buckets.values() for i in bucket]))
predicts.head = [pred_arcs[i] for i in indices]
predicts.deprel = [pred_rels[i] for i in indices]
if args.prob:
predicts.prob = [pred_probs[i] for i in indices]
logging.info(f"Save the predicted result to {args.infer_result_path}")
predicts.save(args.infer_result_path)
logging.info(f"{total_time}s elapsed, "
f"{len(dataset) / total_time.total_seconds():.2f} Sents/s")
def predict_query(env):
"""Predict one query"""
args = env.args
logging.info("Load the model")
model = load(args.model_path)
lac_mode = "seg" if args.feat != "pos" else "lac"
lac = LAC.LAC(mode=lac_mode)
if args.prob:
env.fields = env.fields._replace(PHEAD=Field('prob'))
while True:
query = input()
if not query:
logging.info("quit!")
return
if len(query) > 200:
logging.info("The length of the query should be less than 200!")
continue
start = datetime.datetime.now()
lac_results = lac.run([query])
predicts = Corpus.load_lac_results(lac_results, env.fields)
dataset = TextDataset(predicts, [env.WORD, env.FEAT])
# set the data loader
dataset.loader = batchify(dataset,
args.batch_size,
use_multiprocess=False,
sequential_sampler=True)
pred_arcs, pred_rels, pred_probs = epoch_predict(
env, args, model, dataset.loader)
predicts.head = pred_arcs
predicts.deprel = pred_rels
if args.prob:
predicts.prob = pred_probs
predicts.print()
total_time = datetime.datetime.now() - start
logging.info(
f"{total_time}s elapsed, "
f"{len(dataset) / total_time.total_seconds():.2f} Sents/s, {total_time.total_seconds() / len(dataset) * 1000:.2f} ms/Sents"
)
class DDParser(object):
"""
DDParser
Args:
use_cuda: BOOL, 是否使用gpu
tree: BOOL, 是否返回树结构
prob: BOOL, 是否返回弧的概率
use_pos: BOOL, 是否返回词性标签(仅parse函数生效)
model_files_path: str, 模型地址, 为None时下载默认模型
buckets: BOOL, 是否对样本分桶. 若buckets=True,则会对inputs按长度分桶,处理长度不均匀的输入速度更新快,default=False
batch_size: INT, 批尺寸, 当buckets为False时,每个batch大小均等于batch_size; 当buckets为True时,每个batch的大小约为'batch_size / 当前桶句子的平均长度'。
当default=None时,分桶batch_size默认等于1000,不分桶默认等于50。
"""
def __init__(self,
use_cuda=False,
tree=True,
prob=False,
use_pos=False,
model_files_path=None,
buckets=False,
batch_size=None):
if model_files_path is None:
model_files_path = self._get_abs_path('./model_files/baidu')
if not os.path.exists(model_files_path):
try:
utils.download_model_from_url(model_files_path)
except Exception as e:
logging.error("Failed to download model, please try again")
logging.error(f"error: {e}")
return
args = [
f"--model_files={model_files_path}",
f"--config_path={self._get_abs_path('config.ini')}"
]
if use_cuda:
args.append("--use_cuda")
if tree:
args.append("--tree")
if prob:
args.append("--prob")
if batch_size:
args.append(f"--batch_size={batch_size}")
args = ArgConfig(args)
# Don't instantiate the log handle
args.log_path = None
self.env = Environment(args)
self.args = self.env.args
fluid.enable_imperative(self.env.place)
self.model = load(self.args.model_path)
self.lac = None
self.use_pos = use_pos
# buckets=None if not buckets else defaults
if not buckets:
self.args.buckets = None
if args.prob:
self.env.fields = self.env.fields._replace(PHEAD=Field('prob'))
if self.use_pos:
self.env.fields = self.env.fields._replace(CPOS=Field('postag'))
# set default batch size if batch_size is None and not buckets
if batch_size is None and not buckets:
self.args.batch_size = 50
def parse(self, inputs):
"""
预测未切词的句子。
Args:
x: list(str) | str, 未分词的句子,类型为str或list
Returns:
outputs: list, 依存分析结果
Example:
>>> ddp = DDParser()
>>> inputs = "百度是一家高科技公司"
>>> ddp.parse(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB']}]
>>> inputs = ["百度是一家高科技公司", "他送了一本书"]
>>> ddp.parse(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB']},
{'word': ['他', '送', '了', '一本', '书'], 'head': [2, 0, 2, 5, 2], 'deprel': ['SBV', 'HED', 'MT', 'ATT', 'VOB']}]
>>> ddp = DDParser(prob=True, use_pos=True)
>>> inputs = "百度是一家高科技公司"
>>> ddp.parse(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'postag': ['ORG', 'v', 'm', 'n', 'n'],
'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB'], 'prob': [1.0, 1.0, 1.0, 1.0, 1.0]}]
"""
if not self.lac:
self.lac = LAC.LAC(mode='lac' if self.use_pos else "seg",
use_cuda=self.args.use_cuda)
if not inputs:
return
if isinstance(inputs, str):
inputs = [inputs]
if all([isinstance(i, str) and i for i in inputs]):
lac_results = []
position = 0
while position < len(inputs):
lac_results += self.lac.run(inputs[position:position +
self.args.batch_size])
position += self.args.batch_size
predicts = Corpus.load_lac_results(lac_results, self.env.fields)
else:
logging.warning("please check the foramt of your inputs.")
return
dataset = TextDataset(predicts, [self.env.WORD, self.env.FEAT],
self.args.buckets)
# set the data loader
dataset.loader = batchify(
dataset,
self.args.batch_size,
use_multiprocess=False,
sequential_sampler=True if not self.args.buckets else False)
pred_arcs, pred_rels, pred_probs = epoch_predict(
self.env, self.args, self.model, dataset.loader)
if self.args.buckets:
indices = np.argsort(
np.array([
i for bucket in dataset.buckets.values() for i in bucket
]))
else:
indices = range(len(pred_arcs))
predicts.head = [pred_arcs[i] for i in indices]
predicts.deprel = [pred_rels[i] for i in indices]
if self.args.prob:
predicts.prob = [pred_probs[i] for i in indices]
outputs = predicts.get_result()
return outputs
def parse_seg(self, inputs: list):
"""
预测已切词的句子。
Args:
x: list(list(str)), 已分词的句子,类型为list
Returns:
outputs: list, 依存分析结果
Example:
>>> ddp = DDParser()
>>> inputs = [['百度', '是', '一家', '高科技', '公司'], ['他', '送', '了', '一本', '书']]
>>> ddp.parse_seg(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB']},
{'word': ['他', '送', '了', '一本', '书'], 'head': [2, 0, 2, 5, 2], 'deprel': ['SBV', 'HED', 'MT', 'ATT', 'VOB']}]
>>> ddp = DDParser(prob=True)
>>> inputs = [['百度', '是', '一家', '高科技', '公司']]
>>> ddp.parse_seg(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2],
'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB'], 'prob': [1.0, 1.0, 1.0, 1.0, 1.0]}]
"""
if not inputs:
return
if all([isinstance(i, list) and i for i in inputs]):
predicts = Corpus.load_word_segments(inputs, self.env.fields)
else:
logging.warning("please check the foramt of your inputs.")
return
dataset = TextDataset(predicts, [self.env.WORD, self.env.FEAT],
self.args.buckets)
# set the data loader
dataset.loader = batchify(
dataset,
self.args.batch_size,
use_multiprocess=False,
sequential_sampler=True if not self.args.buckets else False)
pred_arcs, pred_rels, pred_probs = epoch_predict(
self.env, self.args, self.model, dataset.loader)
if self.args.buckets:
indices = np.argsort(
np.array([
i for bucket in dataset.buckets.values() for i in bucket
]))
else:
indices = range(len(pred_arcs))
predicts.head = [pred_arcs[i] for i in indices]
predicts.deprel = [pred_rels[i] for i in indices]
if self.args.prob:
predicts.prob = [pred_probs[i] for i in indices]
outputs = predicts.get_result()
if outputs[0].get("postag", None):
for output in outputs:
del output["postag"]
return outputs
def _get_abs_path(self, path):
return os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), path))
if __name__ == '__main__':
logging.info("init arguments.")
args = ArgConfig()
logging.info("init environment.")
env = Environment(args)
logging.info(f"Override the default configs\n{env.args}")
logging.info(f"{env.WORD}\n{env.FEAT}\n{env.ARC}\n{env.REL}")
logging.info(f"Set the max num of threads to {env.args.threads}")
logging.info(
f"Set the seed for generating random numbers to {env.args.seed}")
logging.info(f"Run the subcommand in mode {env.args.mode}")
fluid.enable_imperative(env.place)
mode = env.args.mode
if mode == "train":
train(env)
elif mode == "evaluate":
evaluate(env)
elif mode == "predict":
predict(env)
elif mode == "predict_q":
predict_query(env)
else:
logging.error(f"Unknown task mode: {mode}.")
|
the-stack_106_31848 | #!usr/bin/python3
"""ThreadMessage object representation.
Documentation example:
.. code-block:: javascript
{
"id": 67,
"type": "message",
"attributes": {
"message": "I choose you!",
"message_html": "I choose you!",
"posted_at": "2019-04-03T09:33:05+03:00",
"attachments": [],
"participants": {
"from": {
"id": 38444,
"type": "employer",
"login": "jeweller",
"first_name": "Oleg",
"last_name": "V.",
"avatar": {
"small": {
"url": "https://content.freelancehunt.com/profile/photo/50/jeweller.png",
"width": 50,
"height": 50
},
"large": {
"url": "https://content.freelancehunt.com/profile/photo/225/jeweller.png",
"width": 255,
"height": 255
}
},
"self": "https://api.freelancehunt.com/v2/employers/38444"
},
"to": {
"id": 5725,
"type": "freelancer",
"login": "alex_yar",
"first_name": "Andrey",
"last_name": "Y.",
"avatar": {
"small": {
"url": "https://content.freelancehunt.com/profile/photo/50/alex_yar.png",
"width": 50,
"height": 50
},
"large": {
"url": "https://content.freelancehunt.com/profile/photo/225/alex_yar.png",
"width": 255,
"height": 255
}
},
"self": "https://api.freelancehunt.com/v2/freelancers/5725"
}
}
}
}
"""
from datetime import datetime
from typing import Type, Optional
from ..core import FreelancehuntObject
from ..utils.errors import BadRequestError
from .user import Profile
__all__ = ('ThreadMessage',)
class ThreadMessage(FreelancehuntObject):
"""Provide operations with ThreadMessage.
:param int id: thread unique identifier
:param str message: message text
:param str message_html: message text in html
:param datetime posted_at: the message post date
:param dict attachments: message's attachments
:param Profile sender: message creator information
:param Profile recipient: message recipient information
"""
def __init__(self,
id: int,
posted_at: str,
message: str,
message_html: str,
sender: Profile,
recipient: Profile,
attachments: Optional[list] = None,
thread: Optional[dict] = None,
**kwargs):
"""Create object to provide operations with ThreadMessage.
:param int id: thread unique identifier
:param str message: message text
:param str message_html: message text in html
:param str posted_at: string representation of the message post date
:param dict attachments: message's attachments
:param Profile sender: message creator information
:param Profile recipient: message recipient information
"""
super().__init__(**kwargs)
self.id = id
self.posted_at = datetime.fromisoformat(posted_at)
self.message = message
self.message_html = message_html
self.attachments = attachments # TODO: Implement attachments parsing
# Framework objects
self.sender = sender
self.recipient = recipient
# Will be parsed to objects
self._thread = thread
# Custom attributes
self._create_msg_url = f"/threads/{self._thread.get('id')}" if self._thread else None
def answer(self, message_html: str) -> Type["ThreadMessage"]:
"""Answer to this message in current thread.
:param str message_html: message text to send
:raises ValueError: Thread not linked to this message!
:raises BadRequestError: Message not sended!
:return: new message object
"""
if not self._create_msg_url:
raise ValueError('Thread not linked to this message!')
message = self._post(self._create_msg_url, {"message_html": message_html})
if not message:
raise BadRequestError(
f"Message not send to '{self._create_msg_url}' with text '{message_html}'!"
)
message.update({"thread": self._thread})
return ThreadMessage.de_json(**message)
@classmethod
def de_json(cls, **data) -> Type["ThreadMessage"]:
"""Parse json data from API responce and make object of this class.
:return: object of this class.
:rtype: `ThreadMessage`
"""
if not data:
return None
participants = data["participants"]
sender = participants.get("from")
if sender:
data["sender"] = Profile.de_json(**sender)
recipient = participants.get("to")
if recipient:
data["recipient"] = Profile.de_json(**recipient)
meta = data.get("meta")
if meta:
data["thread"] = meta["thread"]
return cls(**data)
|
the-stack_106_31851 | # -*- coding: utf-8 -*-
"""
This module defines images used by image reader, image properties
are set by user or read from image header.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from abc import ABCMeta, abstractmethod
import nibabel as nib
import numpy as np
import tensorflow as tf
from six import with_metaclass, string_types
import niftynet.io.misc_io as misc
from niftynet.io.misc_io import resolve_file_name
from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig
class Loadable(with_metaclass(ABCMeta, object)):
"""
interface of loadable data
"""
@abstractmethod
def get_data(self):
"""
loads a numpy array from the image object
if the array has less than 5 dimensions
it extends the array to 5d
(corresponding to 3 spatial dimensions, temporal dim, modalities)
ndims > 5 not currently supported
"""
raise NotImplementedError
class DataFromFile(Loadable):
"""
Data from file should have a valid file path
(are files on hard drive) and a name.
"""
def __init__(self, file_path, name='loadable_data'):
self._name = None
self._file_path = None
# assigning using property setters
self.file_path = file_path
self.name = name
self._dtype = None
@property
def dtype(self):
"""
data type property of the input images.
:return: a tuple of input image data types
``len(self.dtype) == len(self.file_path)``
"""
if not self._dtype:
try:
self._dtype = tuple(
misc.load_image(_file).header.get_data_dtype()
for _file in self.file_path)
except (IOError, TypeError, AttributeError):
tf.logging.warning('could not decide image data type')
self._dtype = (np.dtype(np.float32),) * len(self.file_path)
return self._dtype
@property
def file_path(self):
"""
A tuple of valid image filenames, this property always returns
a tuple, length of the tuple is one for single image,
length of the tuple is larger than one for single image from
multiple files.
:return: a tuple of file paths
"""
return self._file_path
@file_path.setter
def file_path(self, path_array):
if isinstance(path_array, string_types):
path_array = (path_array,)
home_folder = NiftyNetGlobalConfig().get_niftynet_home_folder()
try:
self._file_path = tuple(resolve_file_name(path, ('.', home_folder))
for path in path_array)
except (TypeError, AssertionError, AttributeError, IOError):
tf.logging.fatal(
"unrecognised file path format, should be a valid filename,"
"or a sequence of filenames %s", path_array)
raise IOError
@property
def name(self):
"""
A tuple of image names, this property always returns
a tuple, length of the tuple is one for single image,
length of the tuple is larger than one for single image from
multiple files.
:return: a tuple of image name tags
"""
return self._name
@name.setter
def name(self, name_array):
try:
if len(self.file_path) == len(name_array):
self._name = name_array
return
except (TypeError, AssertionError):
pass
self._name = (name_array,)
def get_data(self):
raise NotImplementedError
class SpatialImage2D(DataFromFile):
"""
2D images, axcodes specifications are ignored when
loading. (Resampling to new pixdims is currently not supported).
"""
def __init__(self,
file_path,
name,
interp_order,
output_pixdim,
output_axcodes):
DataFromFile.__init__(self, file_path=file_path, name=name)
self._original_affine = None
self._original_pixdim = None
self._original_shape = None
self._interp_order = None
self._output_pixdim = None
self._output_axcodes = None
# assigning with property setters
self.interp_order = interp_order
self.output_pixdim = output_pixdim
self.output_axcodes = output_axcodes
self._load_header()
@property
def shape(self):
"""
This function read image shape info from the headers
The lengths in the fifth dim of multiple images are summed
as a multi-mod representation.
The fourth dim corresponding to different time sequences
is ignored.
:return: a tuple of integers as image shape
"""
if self._original_shape is None:
try:
self._original_shape = tuple(
misc.load_image(_file).header['dim'][1:6]
for _file in self.file_path)
except (IOError, KeyError, AttributeError, IndexError):
tf.logging.fatal(
'unknown image shape from header %s', self.file_path)
raise ValueError
try:
non_modality_shapes = set(
[tuple(shape[:4].tolist())
for shape in self._original_shape])
assert len(non_modality_shapes) == 1
except (TypeError, IndexError, AssertionError):
tf.logging.fatal("could not combining multimodal images: "
"shapes not consistent %s -- %s",
self.file_path, self._original_shape)
raise ValueError
n_modalities = \
np.sum([int(shape[4]) for shape in self._original_shape])
self._original_shape = non_modality_shapes.pop() + (n_modalities,)
return self._original_shape
def _load_header(self):
"""
read original header for pixdim and affine info
:return:
"""
self._original_pixdim = []
self._original_affine = []
for file_i in self.file_path:
_obj = misc.load_image(file_i)
try:
misc.correct_image_if_necessary(_obj)
self._original_pixdim.append(_obj.header.get_zooms()[:3])
self._original_affine.append(_obj.affine)
except (TypeError, IndexError, AttributeError):
tf.logging.fatal('could not read header from %s', file_i)
raise ValueError
# self._original_pixdim = tuple(self._original_pixdim)
# self._original_affine = tuple(self._original_affine)
@property
def original_pixdim(self):
"""
pixdim info from the image header.
:return: a tuple of pixdims, with each element as pixdims
of an image file
"""
try:
assert self._original_pixdim[0] is not None
except (IndexError, AssertionError):
self._load_header()
return self._original_pixdim
@property
def original_affine(self):
"""
affine info from the image header.
:return: a tuple of affine, with each element as an affine
matrix of an image file
"""
try:
assert self._original_affine[0] is not None
except (IndexError, AssertionError):
self._load_header()
return self._original_affine
@property
def original_axcodes(self):
"""
axcodes info from the image header
more info: http://nipy.org/nibabel/image_orientation.html
:return: a tuple of axcodes, with each element as axcodes
of an image file
"""
try:
return tuple(nib.aff2axcodes(affine)
for affine in self.original_affine)
except IndexError:
tf.logging.fatal('unknown affine in header %s: %s',
self.file_path, self.original_affine)
raise
@property
def interp_order(self):
"""
interpolation order specified by user.
:return: a tuple of integers, with each element as an
interpolation order of an image file
"""
return self._interp_order
@interp_order.setter
def interp_order(self, interp_order):
try:
if len(interp_order) == len(self.file_path):
self._interp_order = tuple(int(order) for order in interp_order)
return
except (TypeError, ValueError):
pass
try:
interp_order = int(interp_order)
self._interp_order = (int(interp_order),) * len(self.file_path)
except (TypeError, ValueError):
tf.logging.fatal(
"output interp_order should be an integer or"
"a sequence of integers that matches len(self.file_path)")
raise ValueError
@property
def output_pixdim(self):
"""
output pixdim info specified by user
set to None for using the original pixdim in image header
otherwise get_data() transforms image array according to this value.
:return: a tuple of pixdims, with each element as pixdims
of an image file
"""
tf.logging.warning("resampling 2D images not implemented")
return (None,) * len(self.file_path)
@output_pixdim.setter
def output_pixdim(self, output_pixdim):
try:
if len(output_pixdim) == len(self.file_path):
self._output_pixdim = []
for i, _ in enumerate(self.file_path):
if output_pixdim[i] is None:
self._output_pixdim.append(None)
else:
self._output_pixdim.append(
tuple(float(pixdim) for pixdim in output_pixdim[i]))
# self._output_pixdim = tuple(self._output_pixdim)
return
except (TypeError, ValueError):
pass
try:
if output_pixdim is not None:
output_pixdim = tuple(float(pixdim) for pixdim in output_pixdim)
self._output_pixdim = (output_pixdim,) * len(self.file_path)
except (TypeError, ValueError):
tf.logging.fatal(
'could not set output pixdim '
'%s for %s', output_pixdim, self.file_path)
raise
@property
def output_axcodes(self):
"""
output axcodes info specified by user
set to None for using the original axcodes in image header,
otherwise get_data() change axes of the image array
according to this value.
:return: a tuple of pixdims, with each element as pixdims
of an image file
"""
tf.logging.warning("reorienting 2D images not implemented")
return (None,) * len(self.file_path)
@output_axcodes.setter
def output_axcodes(self, output_axcodes):
try:
if len(output_axcodes) == len(self.file_path):
self._output_axcodes = []
for i, _ in enumerate(self.file_path):
if output_axcodes[i] is None:
self._output_axcodes.append(None)
else:
self._output_axcodes.append(
tuple(output_axcodes[i]))
# self._output_axcodes = tuple(self._output_axcodes)
return
except (TypeError, ValueError):
pass
try:
if output_axcodes is None:
output_axcodes = (None,)
else:
output_axcodes = (output_axcodes,)
self._output_axcodes = output_axcodes * len(self.file_path)
except (TypeError, ValueError):
tf.logging.fatal(
'could not set output pixdim '
'%s for %s', output_axcodes, self.file_path)
raise
def get_data(self):
if len(self._file_path) > 1:
# 2D image from multiple files
raise NotImplementedError
image_obj = misc.load_image(self.file_path[0])
image_data = image_obj.get_data()
image_data = misc.expand_to_5d(image_data)
return image_data
class SpatialImage3D(SpatialImage2D):
"""
3D image from a single, supports resampling and reorientation
(3D image from a set of 2D slices is currently not supported).
"""
def __init__(self,
file_path,
name,
interp_order,
output_pixdim,
output_axcodes):
SpatialImage2D.__init__(self,
file_path=file_path,
name=name,
interp_order=interp_order,
output_pixdim=output_pixdim,
output_axcodes=output_axcodes)
self._load_header()
# pylint: disable=no-member
@SpatialImage2D.output_pixdim.getter
def output_pixdim(self):
if self._output_pixdim is None:
self.output_pixdim = None
return self._output_pixdim
# pylint: disable=no-member
@SpatialImage2D.output_axcodes.getter
def output_axcodes(self):
if self._output_axcodes is None:
self.output_axcodes = None
return self._output_axcodes
@property
def shape(self):
image_shape = super(SpatialImage3D, self).shape
spatial_shape = image_shape[:3]
rest_shape = image_shape[3:]
if self.original_affine[0] is not None and self.output_axcodes[0]:
src_ornt = nib.orientations.axcodes2ornt(self.original_axcodes[0])
dst_ornt = nib.orientations.axcodes2ornt(self.output_axcodes[0])
if np.any(np.isnan(dst_ornt)) or np.any(np.isnan(src_ornt)):
tf.logging.fatal(
'unknown output axcodes %s for %s',
self.output_axcodes, self.original_axcodes)
raise ValueError
transf = nib.orientations.ornt_transform(src_ornt, dst_ornt)
spatial_transf = transf[:, 0].astype(np.int).tolist()
new_shape = [0, 0, 0]
for i, k in enumerate(spatial_transf):
new_shape[k] = spatial_shape[i]
spatial_shape = tuple(new_shape)
if self.original_pixdim[0] and self.output_pixdim[0]:
try:
zoom_ratio = np.divide(self.original_pixdim[0][:3],
self.output_pixdim[0][:3])
spatial_shape = tuple(int(round(ii * jj)) for ii, jj in
zip(spatial_shape, zoom_ratio))
except (ValueError, IndexError):
tf.logging.fatal(
'unknown pixdim %s: %s',
self.original_pixdim, self.output_pixdim)
raise ValueError
return spatial_shape + rest_shape
def get_data(self):
if len(self._file_path) > 1:
# 3D image from multiple 2d files
mod_list = []
for mod in range(len(self.file_path)):
mod_2d = SpatialImage2D(
file_path=(self.file_path[mod],),
name=(self.name[mod],),
interp_order=(self.interp_order[mod],),
output_pixdim=(self.output_pixdim[mod],),
output_axcodes=(self.output_axcodes[mod],))
mod_data_5d = mod_2d.get_data()
mod_list.append(mod_data_5d)
try:
image_data = np.concatenate(mod_list, axis=4)
except ValueError:
tf.logging.fatal(
"multi-modal data shapes not consistent -- trying to "
"concat {}.".format([mod.shape for mod in mod_list]))
raise
return image_data
# assuming len(self._file_path) == 1
image_obj = misc.load_image(self.file_path[0])
image_data = image_obj.get_data()
image_data = misc.expand_to_5d(image_data)
if self.original_axcodes[0] and self.output_axcodes[0]:
image_data = misc.do_reorientation(
image_data, self.original_axcodes[0], self.output_axcodes[0])
if self.original_pixdim[0] and self.output_pixdim[0]:
# verbose: warning when interpolate_order>1 for integers
image_data = misc.do_resampling(image_data,
self.original_pixdim[0],
self.output_pixdim[0],
self.interp_order[0])
return image_data
class SpatialImage4D(SpatialImage3D):
"""
4D image from a set of 3D volumes,
supports resampling and reorientation.
The 3D volumes are concatenated in the fifth dim (modality dim)
(4D image from a single file is currently not supported)
"""
def __init__(self,
file_path,
name,
interp_order,
output_pixdim,
output_axcodes):
SpatialImage3D.__init__(self,
file_path=file_path,
name=name,
interp_order=interp_order,
output_pixdim=output_pixdim,
output_axcodes=output_axcodes)
def get_data(self):
if len(self.file_path) == 1:
# 4D image from a single file
raise NotImplementedError(
"loading 4D image (time sequence) is not supported")
# assuming len(self._file_path) > 1
mod_list = []
for mod in range(len(self.file_path)):
mod_3d = SpatialImage3D(file_path=(self.file_path[mod],),
name=(self.name[mod],),
interp_order=(self.interp_order[mod],),
output_pixdim=(self.output_pixdim[mod],),
output_axcodes=(self.output_axcodes[mod],))
mod_data_5d = mod_3d.get_data()
mod_list.append(mod_data_5d)
try:
image_data = np.concatenate(mod_list, axis=4)
except ValueError:
tf.logging.fatal(
"multi-modal data shapes not consistent -- trying to "
"concatenate {}.".format([mod.shape for mod in mod_list]))
raise
return image_data
class SpatialImage5D(SpatialImage3D):
"""
5D image from a single file,
resampling and reorientation are implemented as
operations on each 3D slice individually.
(5D image from a set of 4D files is currently not supported)
"""
def __init__(self,
file_path,
name,
interp_order,
output_pixdim,
output_axcodes):
SpatialImage3D.__init__(self,
file_path=file_path,
name=name,
interp_order=interp_order,
output_pixdim=output_pixdim,
output_axcodes=output_axcodes)
def _load_single_5d(self, idx=0):
if len(self._file_path) > 1:
# 3D image from multiple 2d files
raise NotImplementedError
# assuming len(self._file_path) == 1
image_obj = misc.load_image(self.file_path[idx])
image_data = image_obj.get_data()
image_data = misc.expand_to_5d(image_data)
assert image_data.shape[3] == 1, "time sequences not supported"
if self.original_axcodes[idx] and self.output_axcodes[idx]:
output_image = []
for t_pt in range(image_data.shape[3]):
mod_list = []
for mod in range(image_data.shape[4]):
spatial_slice = image_data[..., t_pt:t_pt + 1, mod:mod + 1]
spatial_slice = misc.do_reorientation(
spatial_slice,
self.original_axcodes[idx],
self.output_axcodes[idx])
mod_list.append(spatial_slice)
output_image.append(np.concatenate(mod_list, axis=4))
image_data = np.concatenate(output_image, axis=3)
if self.original_pixdim[idx] and self.output_pixdim[idx]:
assert len(self._original_pixdim[idx]) == \
len(self.output_pixdim[idx]), \
"wrong pixdim format original {} output {}".format(
self._original_pixdim[idx], self.output_pixdim[idx])
# verbose: warning when interpolate_order>1 for integers
output_image = []
for t_pt in range(image_data.shape[3]):
mod_list = []
for mod in range(image_data.shape[4]):
spatial_slice = image_data[..., t_pt:t_pt + 1, mod:mod + 1]
spatial_slice = misc.do_resampling(
spatial_slice,
self.original_pixdim[idx],
self.output_pixdim[idx],
self.interp_order[idx])
mod_list.append(spatial_slice)
output_image.append(np.concatenate(mod_list, axis=4))
image_data = np.concatenate(output_image, axis=3)
return image_data
def get_data(self):
if len(self._file_path) == 1:
return self._load_single_5d()
else:
raise NotImplementedError('concatenating 5D images not supported.')
# image_data = []
# for idx in range(len(self._file_path)):
# image_data.append(self._load_single_5d(idx))
# image_data = np.concatenate(image_data, axis=4)
# return image_data
class ImageFactory(object):
"""
Create image instance according to number of dimensions
specified in image headers.
"""
INSTANCE_DICT = {2: SpatialImage2D,
3: SpatialImage3D,
4: SpatialImage4D,
5: SpatialImage5D}
@classmethod
def create_instance(cls, file_path, **kwargs):
"""
Read image headers and create image instance.
:param file_path: a file path or a sequence of file paths
:param kwargs: output properties for transforming the image data
array into a desired format
:return: an image instance
"""
if file_path is None:
tf.logging.fatal('No file_path provided, '
'please check input sources in config file')
raise ValueError
image_type = None
try:
if os.path.isfile(file_path):
ndims = misc.infer_ndims_from_file(file_path)
image_type = cls.INSTANCE_DICT.get(ndims, None)
except TypeError:
pass
if image_type is None:
try:
home_folder = NiftyNetGlobalConfig().get_niftynet_home_folder()
file_path = [resolve_file_name(path, ('.', home_folder))
for path in file_path]
ndims = misc.infer_ndims_from_file(file_path[0])
ndims = ndims + (1 if len(file_path) > 1 else 0)
image_type = cls.INSTANCE_DICT.get(ndims, None)
except AssertionError:
tf.logging.fatal('Could not load file: %s', file_path)
raise IOError
if image_type is None:
tf.logging.fatal('Not supported image type: %s', file_path)
raise NotImplementedError
return image_type(file_path, **kwargs)
|
the-stack_106_31852 | import argparse
import collections
import torch
import numpy as np
import data_loader.data_loaders as module_data
import model.loss as module_loss
import model.metric as module_metric
import model.model as module_arch
from parse_config import ConfigParser
from trainer import Trainer
from utils import prepare_device
# fix random seeds for reproducibility
SEED = 123
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
def main(config):
logger = config.get_logger('train')
# setup data_loader instances
data_loader = config.init_obj('data_loader', module_data)
valid_data_loader = data_loader.split_validation()
# build models architecture, then print to console
model = config.init_obj('arch', module_arch)
logger.info(model)
# model load state dict
state_dict = torch.load(config.resume)
model.load_state_dict(state_dict)
# prepare for (multi-device) GPU training
device, device_ids = prepare_device(config['n_gpu'])
model = model.to(device)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)
# get function handles of loss and metrics
criterion = getattr(module_loss, config['loss'])
metrics = [getattr(module_metric, met) for met in config['metrics']]
# build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
# freeze some layers for transfer learning
# for name, param in model.named_parameters():
# if not ('output' in name):
# param.requires_grad = False
# add the requires_grad parameter to optimizer
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.init_obj('optimizer', torch.optim, trainable_params)
lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer)
trainer = Trainer(model, criterion, metrics, optimizer,
config=config,
device=device,
data_loader=data_loader,
valid_data_loader=None,
lr_scheduler=lr_scheduler)
trainer.train()
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
# custom cli options to modify configuration from default values given in json file.
CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
options = [
CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),
CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size')
]
config = ConfigParser.from_args(args, options)
main(config)
|
the-stack_106_31853 | """Downloading data from the M4 competition
"""
import os
import requests
def download(datapath, url, name, split=None):
os.makedirs(datapath, exist_ok=True)
if split is not None:
namesplit = split + "/" + name
else:
namesplit = name
url = url.format(namesplit)
file_path = os.path.join(datapath, name) + ".csv"
if os.path.exists(file_path):
print(name+" already exists")
return
print('Downloading ' + url)
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return
if __name__ == "__main__":
data_frequencies = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']
datapath = "./dataset/"
url = "https://github.com/Mcompetitions/M4-methods/raw/master/Dataset/{}.csv"
download(datapath, url, 'M4-info')
for freq in data_frequencies:
for split in ['train', 'test']:
download(datapath+split, url, '{}-{}'.format(freq, split), split.capitalize())
|
the-stack_106_31856 | # -*- coding: utf-8 -*-
# Copyright (C) 2020-2021 by SCICO Developers
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
r"""Extensions of numpy ndarray class.
.. testsetup::
>>> import scico.numpy as snp
>>> from scico.blockarray import BlockArray
>>> import numpy as np
>>> import jax.numpy
The class :class:`.BlockArray` is a `jagged array
<https://en.wikipedia.org/wiki/Jagged_array>`_ that aims to mimic the :class:`numpy.ndarray`
interface where appropriate.
A :class:`.BlockArray` object consists of a tuple of `DeviceArray` objects that share their memory
buffers with non-overlapping, contiguous regions of a common one-dimensional `DeviceArray`.
A :class:`.BlockArray` contains the following size attributes:
* `shape`: A tuple of tuples containing component dimensions.
* `size`: The sum of the size of each component block; this is the length of the underlying
one-dimensional `DeviceArray`.
* `num_blocks`: The number of components (blocks) that comprise the :class:`.BlockArray`.
Motivating Example
------------------
Consider a two dimensional array :math:`\mb{x} \in \mathbb{R}^{n \times m}`.
We compute the discrete differences of :math:`\mb{x}` in the horizontal and vertical directions,
generating two new arrays: :math:`\mb{x}_h \in \mathbb{R}^{n \times (m-1)}` and :math:`\mb{x}_v \in
\mathbb{R}^{(n-1) \times m}`.
As these arrays are of different sizes, we cannot combine them into a single `ndarray`. Instead,
we might vectorize each array and concatenate the resulting vectors, leading to :math:`\mb{\bar{x}}
\in \mathbb{R}^{n(m-1) + m(n-1)}`, which can be stored as a one-dimensional `ndarray`.
Unfortunately, this makes it hard to access the individual components :math:`\mb{x}_h` and
:math:`\mb{x}_v`.
Instead, we can form a :class:`.BlockArray`: :math:`\mb{x}_B = [\mb{x}_h, \mb{x}_v]`
::
>>> n = 32
>>> m = 16
>>> x_h = np.random.randn(n, m-1)
>>> x_v = np.random.randn(n-1, m)
# Form the blockarray
>>> x_B = BlockArray.array([x_h, x_v])
# The blockarray shape is a tuple of tuples
>>> x_B.shape
((32, 15), (31, 16))
# Each block component can be easily accessed
>>> x_B[0].shape
(32, 15)
>>> x_B[1].shape
(31, 16)
Constructing a BlockArray
-------------------------
Construct from a tuple of arrays (either `ndarray` or `DeviceArray`)
####################################################################
.. doctest::
>>> from scico.blockarray import BlockArray
>>> import numpy as np
>>> x0 = np.random.randn(32, 32)
>>> x1 = np.random.randn(16)
>>> X = BlockArray.array((x0, x1))
>>> X.shape
((32, 32), (16,))
>>> X.size
1040
>>> X.num_blocks
2
| While :func:`.BlockArray.array` will accept either `ndarray` or `DeviceArray` as input, the
resulting :class:`.BlockArray` will be backed by a `DeviceArray` memory buffer.
| **Note**: constructing a :class:`.BlockArray` always involves a copy to a new `DeviceArray` memory buffer.
| **Note**: by default, the resulting :class:`.BlockArray` cast to single precision and will have
dtype `float32` or `complex64`.
Construct from a single vector and tuple of shapes
##################################################
::
>>> x_flat = np.random.randn(1040)
>>> shape_tuple = ((32, 32), (16,))
>>> X = BlockArray.array_from_flattened(x_flat, shape_tuple=shape_tuple)
>>> X.shape
((32, 32), (16,))
Operating on a BlockArray
-------------------------
Indexed Updating
----------------
BlockArrays support the JAX DeviceArray `indexed update syntax
<https://jax.readthedocs.io/en/latest/jax.ops.html#indexed-update-operators>`_
The index must be of the form [ibk] or [ibk,idx],
where `ibk` is the index of the block to be updated, and `idx` is a
general index of the elements to be updated in that block. In particular, `ibk`
cannot be a `slice`. The general index `idx` can be omitted, in which case
an entire block is updated.
============================== ==============================================
Alternate syntax Equivalent in-place expression
============================== ==============================================
``x.at[ibk, idx].set(y)`` ``x[ibk, idx] = y``
``x.at[ibk, idx].add(y)`` ``x[ibk, idx] += y``
``x.at[ibk, idx].multiply(y)`` ``x[ibk, idx] *= y``
``x.at[ibk, idx].divide(y)`` ``x[ibk, idx] /= y``
``x.at[ibk, idx].power(y)`` ``x[ibk, idx] **= y``
``x.at[ibk, idx].min(y)`` ``x[ibk, idx] = np.minimum(x[idx], y)``
``x.at[ibk, idx].max(y)`` ``x[ibk, idx] = np.maximum(x[idx], y)``
============================== ==============================================
Arithmetic and Broadcasting
###########################
Suppose :math:`\mb{x}` is a BlockArray with shape :math:`((n, n), (m,))`.
::
>>> x1 = np.random.randn(4, 4)
>>> x2 = np.random.randn(5)
>>> x = BlockArray.array( (x1, x2) )
>>> x.shape
((4, 4), (5,))
>>> x.num_blocks
2
>>> x.size # 4*4 + 5
21
Illustrated for the operation ``+``, but equally valid for operators
``+, -, *, /, //, **, <, <=, >, >=, ==``
Operations with BlockArrays with same number of blocks
******************************************************
Let :math:`\mb{y}` be a BlockArray with the same number of blocks as :math:`\mb{x}`.
.. math::
\mb{x} + \mb{y}
=
\begin{bmatrix}
\mb{x}[0] + \mb{y}[0] \\
\mb{x}[1] + \mb{y}[1] \\
\end{bmatrix}
This operation depends on pair of blocks from :math:`\mb{x}` and :math:`\mb{y}`
being broadcastable against each other.
Operations with a scalar
************************
The scalar is added to each element of the :class:`.BlockArray`:
.. math::
\mb{x} + 1
=
\begin{bmatrix}
\mb{x}[0] + 1 \\
\mb{x}[1] + 1\\
\end{bmatrix}
::
>>> y = x + 1
>>> np.testing.assert_allclose(y[0], x[0] + 1)
>>> np.testing.assert_allclose(y[1], x[1] + 1)
Operations with a 1D `ndarray` of size equal to `num_blocks`
************************************************************
The *i*\th scalar is added to the *i*\th block of the :class:`.BlockArray`:
.. math::
\mb{x}
+
\begin{bmatrix}
1 \\
2
\end{bmatrix}
=
\begin{bmatrix}
\mb{x}[0] + 1 \\
\mb{x}[1] + 2\\
\end{bmatrix}
::
>>> y = x + np.array([1, 2])
>>> np.testing.assert_allclose(y[0], x[0] + 1)
>>> np.testing.assert_allclose(y[1], x[1] + 2)
Operations with an ndarray of `size` equal to :class:`.BlockArray` size
***********************************************************************
We first cast the `ndarray` to a BlockArray with same shape as :math:`\mb{x}`, then apply the
operation on the resulting BlockArrays. With ``y.size = x.size``, we have:
.. math::
\mb{x}
+
\mb{y}
=
\begin{bmatrix}
\mb{x}[0] + \mb{y}[0] \\
\mb{x}[1] + \mb{y}[1]\\
\end{bmatrix}
Equivalently, the BlockArray is first flattened, then added to the flattened `ndarray`, and the
result is reformed into a BlockArray with the same shape as :math:`\mb{x}`
MatMul
######
Between two BlockArrays
***********************
The matmul is computed between each block of the two BlockArrays.
The BlockArrays must have the same number of blocks, and each pair of blocks
must be broadcastable.
.. math::
\mb{x} @ \mb{y}
=
\begin{bmatrix}
\mb{x}[0] @ \mb{y}[0] \\
\mb{x}[1] @ \mb{y}[1]\\
\end{bmatrix}
Between BlockArray and Ndarray/DeviceArray
******************************************
This operation is not defined.
Between BlockArray and :class:`.LinearOperator`
***********************************************
.. todo::
Improve this
The :class:`.Operator` and :class:`.LinearOperator` classes are designed to work on :class:`.BlockArray`. The shapes must conform:
::
x = BlockArray.array((np.random.randn(32, 32), np.random.randn(16)))
A # LinearOperator with A.shape[1] == x.shape
A @ x
NumPy ufuncs
############
`NumPy universal functions (ufuncs) <https://numpy.org/doc/stable/reference/ufuncs.html>`_ are
functions that operate on an `ndarray` on an element-by-element fashion and support array
broadcasting. Examples of ufuncs are ``abs``, ``sign``, ``conj``, and ``exp``.
The JAX library implements most NumPy ufuncs in the :mod:`jax.numpy` module.
However, as JAX does not support subclassing of `DeviceArray`, the JAX ufuncs
cannot be used on :class:`.BlockArray`. As a workaround, we have wrapped several
JAX ufuncs for use on :class:`.BlockArray`; these are located in the
:mod:`scico.numpy` module.
Reductions
##########
Reductions are functions that take an array-like as an input and return an array of lower
dimension. Examples include ``mean``, ``sum``, ``norm``. BlockArray reductions are located in the
:mod:`scico.numpy` module
:class:`.BlockArray` tries to mirror `ndarray` reduction semantics where possible, but
cannot provide a one-to-one match as the block components may be of different size.
Consider the example BlockArray
.. math::
\mb{x} = \begin{bmatrix}
\begin{bmatrix}
1 & 1 \\
1 & 1
\end{bmatrix} \\
\begin{bmatrix}
2 \\
2
\end{bmatrix}
\end{bmatrix}.
We have
.. doctest::
>>> import scico.numpy as snp
>>> x = BlockArray.array((np.ones((2,2)), 2*np.ones((2))))
>>> x.shape
((2, 2), (2,))
>>> x.size
6
>>> x.num_blocks
2
If no axis is specified, the reduction is applied to the flattened array:
.. doctest::
>>> snp.sum(x, axis=None).item()
8.0
Reducing along the 0-th axis crushes the `BlockArray` down into a single `DeviceArray`
and requires all blocks to have the same shape otherwise, an error is raised.
.. doctest::
>>> snp.sum(x, axis=0)
Traceback (most recent call last):
ValueError: Evaluating sum of BlockArray along axis=0 requires all blocks to be same shape; got ((2, 2), (2,))
>>> y = BlockArray.array((np.ones((2,2)), 2*np.ones((2, 2))))
>>> snp.sum(y, axis=0)
DeviceArray([[3., 3.],
[3., 3.]], dtype=float32)
Reducing along axis :math:`n` is equivalent to reducing each component along axis :math:`n-1`:
.. math::
\text{sum}(x, axis=1) = \begin{bmatrix}
\begin{bmatrix}
2 \\
2
\end{bmatrix} \\
\begin{bmatrix}
4 \\
\end{bmatrix}
\end{bmatrix}
If a component does not have axis :math:`n-1`, the reduction is not applied to that component. In this example,
``x[1].ndim == 1``, so no reduction is applied to block ``x[1]``.
.. math::
\text{sum}(x, axis=2) = \begin{bmatrix}
\begin{bmatrix}
2 \\
2
\end{bmatrix} \\
\begin{bmatrix}
2 \\
2
\end{bmatrix}
\end{bmatrix}
Code version
.. doctest::
>>> snp.sum(x, axis=1) # doctest: +SKIP
BlockArray([[2, 2],
[4,] ])
>>> snp.sum(x, axis=2) # doctest: +SKIP
BlockArray([ [2, 2],
[2,] ])
"""
from __future__ import annotations
from functools import wraps
from typing import Iterator, List, Optional, Tuple, Union
import numpy as np
import jax
import jax.numpy as jnp
from jax import core
from jax.interpreters import xla
from jax.interpreters.xla import DeviceArray
from jax.tree_util import register_pytree_node, tree_flatten
from jaxlib.xla_extension import Buffer
from scico.typing import Axes, BlockShape, DType, JaxArray, Shape
from scico.util import is_nested
_arraylikes = (Buffer, DeviceArray, np.ndarray)
__author__ = """\n""".join(["Luke Pfister <[email protected]>", "Brendt Wohlberg <[email protected]>"])
def atleast_1d(*arys):
"""Convert inputs to arrays with at least one dimension.
BlockArrays are returned unmodified.
LAX-backend implementation of :func:`atleast_1d`.
The JAX version of this function will return a copy rather than a view of the input.
*Original docstring below.*
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Args:
arys1, arys2, ... : One or more input arrays (array_like).
Returns:
An array, or list of arrays, each with ``a.ndim >= 1``. Copies are made only if
necessary.
"""
if len(arys) == 1:
arr = arys[0]
return arr if isinstance(arr, BlockArray) else jnp.atleast_1d(arr)
else:
out = []
for arr in arys:
if isinstance(arr, BlockArray):
out.append(arr)
else:
out.append(jnp.atleast_1d(arr))
return out
def reshape(
a: Union[JaxArray, BlockArray], newshape: Union[Shape, BlockShape]
) -> Union[JaxArray, BlockArray]:
"""Gives a new shape to an array without changing its data.
Args:
a : Array to be reshaped.
newshape: The new shape should be compatible with the original shape. If an integer,
then the result will be a 1-D array of that length. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining
dimensions. If a tuple of tuple of ints, a :class:`.BlockArray` is returned.
Returns:
The reshaped array. Unlike :func:`numpy.reshape`, a copy is always returned.
"""
if is_nested(newshape):
# x is a blockarray
return BlockArray.array_from_flattened(a, newshape)
else:
return jnp.reshape(a, newshape)
def block_sizes(shape: Union[Shape, BlockShape]) -> Axes:
r"""Computes the 'sizes' of (possibly nested) block shapes.
This function computes ``block_sizes(z.shape) == (_.size for _ in z)``
Args:
shape: A shape tuple; possibly containing nested tuples.
Examples:
.. doctest::
>>> import scico.numpy as snp
>>> x = BlockArray.ones( ( (4, 4), (2,)))
>>> x.size
18
>>> y = snp.ones((3, 3))
>>> y.size
9
>>> z = BlockArray.array([x, y])
>>> block_sizes(z.shape)
(18, 9)
>>> zz = BlockArray.array([z, z])
>>> block_sizes(zz.shape)
(27, 27)
"""
if isinstance(shape, BlockArray):
raise TypeError(
"Expected a `shape` (possibly nested tuple of ints); got :class:`.BlockArray`."
)
out = []
if is_nested(shape):
# shape is nested -> at least one element came from a blockarray
for y in shape:
if is_nested(y):
# recursively calculate the block size until we arrive at
# a tuple (shape of a non-block array)
while is_nested(y):
y = block_sizes(y)
out.append(np.sum(y)) # adjacent block sizes are added together
else:
# this is a tuple; size given by product of elements
out.append(np.prod(y))
return tuple(out)
else:
# shape is a non-nested tuple; return the product
return np.prod(shape)
def _flatten_blockarrays(inp, *args, **kwargs):
"""Flattens any blockarrays present in inp, args, or kwargs"""
def _flatten_if_blockarray(inp):
if isinstance(inp, BlockArray):
return inp._data
else:
return inp
inp_ = _flatten_if_blockarray(inp)
args_ = (_flatten_if_blockarray(_) for _ in args)
kwargs_ = {key: _flatten_if_blockarray(val) for key, val in kwargs.items()}
return inp_, args_, kwargs_
def _block_array_ufunc_wrapper(func):
"""Wraps a "ufunc" to allow for joint operation on `DeviceArray` and `BlockArray`"""
@wraps(func)
def wrapper(inp, *args, **kwargs):
all_args = (inp,) + args + tuple(kwargs.items())
if any([isinstance(_, BlockArray) for _ in all_args]):
# If 'inp' is a BlockArray, call func on inp._data
# Then return a BlockArray of the same shape as inp
inp_, args_, kwargs_ = _flatten_blockarrays(inp, *args, **kwargs)
flat_out = func(inp_, *args_, **kwargs_)
return BlockArray.array_from_flattened(flat_out, inp.shape)
else:
# Otherwise call the function normally
return func(inp, *args, **kwargs)
if not hasattr(func, "__doc__") or func.__doc__ is None:
return wrapper
else:
wrapper.__doc__ = (
f":func:`{func.__name__}` wrapped to operate on :class:`BlockArray`"
+ "\n\n"
+ func.__doc__
)
return wrapper
def _block_array_reduction_wrapper(func):
"""Wraps a reduction (eg sum, norm) to allow for joint operation on `DeviceArray` and
`BlockArray`"""
@wraps(func)
def wrapper(inp, *args, axis=None, **kwargs):
all_args = (inp,) + args + tuple(kwargs.items())
if any([isinstance(_, BlockArray) for _ in all_args]):
if axis is None:
# Treat as a single long vector
inp_, args_, kwargs_ = _flatten_blockarrays(inp, *args, **kwargs)
return func(inp_, *args_, **kwargs_)
elif type(axis) == tuple:
raise Exception(
f"""Evaluating {func.__name__} on a BlockArray with a tuple argument to
axis is not currently supported"""
)
else:
if axis == 0: # reduction along block axis
# reduction along axis=0 only makes sense if all blocks are the same shape
# so we can convert to a standard DeviceArray of shape (inp.num_blocks, ...)
# and reduce along axis = 0
if all([bk_shape == inp.shape[0] for bk_shape in inp.shape]):
view_shape = (inp.num_blocks,) + inp.shape[0]
return func(inp._data.reshape(view_shape), *args, axis=0, **kwargs)
else:
raise ValueError(
f"Evaluating {func.__name__} of BlockArray along axis=0 requires "
f"all blocks to be same shape; got {inp.shape}"
)
else:
# Reduce each block individually along axis-1
out = []
for bk in inp:
if isinstance(bk, BlockArray):
# This block is itself a blockarray, so call this wrapped reduction
# on axis-1
tmp = _block_array_reduction_wrapper(func)(
bk, *args, axis=axis - 1, **kwargs
)
else:
if axis - 1 >= bk.ndim:
# Trying to reduce along a dim that doesn't exist for this block,
# so just return the block.
# ie broadcast to shape (..., 1) and reduce along axis=-1
tmp = bk
else:
tmp = func(bk, *args, axis=axis - 1, **kwargs)
out.append(atleast_1d(tmp))
return BlockArray.array(out)
elif axis is None:
# 'axis' might not be a valid kwarg (eg dot, vdot), so don't pass it
return func(inp, *args, **kwargs)
else:
return func(inp, *args, axis=axis, **kwargs)
if not hasattr(func, "__doc__") or func.__doc__ is None:
return wrapper
else:
wrapper.__doc__ = (
f":func:`{func.__name__}` wrapped to operate on :class:`BlockArray`"
+ "\n\n"
+ func.__doc__
)
return wrapper
def _block_array_matmul_wrapper(func):
@wraps(func)
def wrapper(self, other):
if isinstance(self, BlockArray):
if isinstance(other, BlockArray):
# Both blockarrays, work block by block
return BlockArray.array([func(x, y) for x, y in zip(self, other)])
else:
raise TypeError(
f"Operation {func.__name__} not implemented between {type(self)} and {type(other)}"
)
else:
return func(self, other)
if not hasattr(func, "__doc__") or func.__doc__ is None:
return wrapper
else:
wrapper.__doc__ = (
f":func:`{func.__name__}` wrapped to operate on :class:`BlockArray`"
+ "\n\n"
+ func.__doc__
)
return wrapper
def _block_array_binary_op_wrapper(func):
"""Returns a decorator that performs type and shape checking for :class:`.BlockArray`
arithmetic
"""
@wraps(func)
def wrapper(self, other):
if isinstance(other, BlockArray):
if other.shape == self.shape:
# Same shape blocks, can operate on flattened arrays
return BlockArray.array_from_flattened(func(self._data, other._data), self.shape)
elif other.num_blocks == self.num_blocks:
# Will work as long as the shapes are broadcastable
return BlockArray.array([func(x, y) for x, y in zip(self, other)])
else:
raise ValueError(
f"operation not valid on operands with shapes {self.shape} {other.shape}"
)
elif any([isinstance(other, _) for _ in _arraylikes]):
if other.size == 1:
# Same as operating on a scalar
return BlockArray.array_from_flattened(func(self._data, other), self.shape)
elif other.size == self.size:
# A little fast and loose, treat the block array as a length self.size vector
return BlockArray.array_from_flattened(func(self._data, other), self.shape)
elif other.size == self.num_blocks:
return BlockArray.array([func(blk, other_) for blk, other_ in zip(self, other)])
else:
raise ValueError(
f"operation not valid on operands with shapes {self.shape} {other.shape}"
)
elif jnp.isscalar(other) or isinstance(other, core.Tracer):
return BlockArray.array_from_flattened(func(self._data, other), self.shape)
else:
raise TypeError(
f"Operation {func.__name__} not implemented between {type(self)} and {type(other)}"
)
if not hasattr(func, "__doc__") or func.__doc__ is None:
return wrapper
else:
wrapper.__doc__ = (
f":func:`{func.__name__}` wrapped to operate on :class:`BlockArray`"
+ "\n\n"
+ func.__doc__
)
return wrapper
class _AbstractBlockArray(core.ShapedArray):
"""Abstract BlockArray class for JAX tracing.
See https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html
"""
array_abstraction_level = 0 # Same as jax.core.ConcreteArray
def __init__(self, shapes, dtype):
sizes = block_sizes(shapes)
size = np.sum(sizes)
super(_AbstractBlockArray, self).__init__((size,), dtype)
#: Abstract data value
self._data_aval: core.ShapedArray = core.ShapedArray((size,), dtype)
#: Array dtype
self.dtype: DType = dtype
#: Shape of each block
self.shapes: BlockShape = shapes
#: Size of each block
self.sizes: Shape = sizes
#: Array specifying boundaries of components as indices in base array
self.bndpos: np.ndarray = np.r_[0, np.cumsum(sizes)]
@core.aval_property
def data(self):
return bk_data_p.bind(self)
# Register BlockArray._data as a primitive
bk_data_p = core.Primitive("bk_data")
@bk_data_p.def_impl
def _bk_data_impl(mat):
return mat._data
# The Jax class is heavily inspired by SparseArray/AbstractSparseArray here:
# https://github.com/google/jax/blob/7724322d1c08c13008815bfb52759a29c2a6823b/tests/custom_object_test.py
class BlockArray:
"""A tuple of :class:`jax.interpreters.xla.DeviceArray` objects.
A tuple of `DeviceArray` objects that all share their memory buffers with
non-overlapping, contiguous regions of a common one-dimensional `DeviceArray`.
It can be used as the common one-dimensional array via the :func:`BlockArray.ravel`
method, or individual component arrays can be accessed individually.
"""
# Ensure we use BlockArray.__radd__,__rmul__, etc for binary operations of the form
# op(np.ndarray, BlockArray)
# See https://docs.scipy.org/doc/numpy-1.10.1/user/c-info.beyond-basics.html#ndarray.__array_priority__
__array_priority__ = 1
def __init__(self, aval: _AbstractBlockArray, data: JaxArray):
"""BlockArray init method.
Args:
aval: `Abstract value`_ associated to this array (shape+dtype+weak_type)
data: The underlying contiguous, flattened `DeviceArray`.
.. _Abstract value: https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html
"""
self._aval = aval
self._data = data
def __repr__(self):
return "scico.blockarray.BlockArray: \n" + self._data.__repr__()
def __getitem__(self, idx: Union[int, Ellipsis]) -> JaxArray:
if isinstance(idx, slice):
raise TypeError(f"Slicing not supported on block index")
elif idx == Ellipsis:
return reshape(self._data, self.shape)
elif idx < 0:
idx = self.num_blocks + idx
return reshape(self._data[self.bndpos[idx] : self.bndpos[idx + 1]], self.shape[idx])
@_block_array_matmul_wrapper
def __matmul__(self, other: Union[np.ndarray, BlockArray, JaxArray]) -> JaxArray:
return self @ other
@_block_array_matmul_wrapper
def __rmatmul__(self, other: Union[np.ndarray, BlockArray, JaxArray]) -> JaxArray:
return other @ self
@_block_array_binary_op_wrapper
def __sub__(a, b):
return a - b
@_block_array_binary_op_wrapper
def __rsub__(a, b):
return b - a
@_block_array_binary_op_wrapper
def __mul__(a, b):
return a * b
@_block_array_binary_op_wrapper
def __rmul__(a, b):
return a * b
@_block_array_binary_op_wrapper
def __add__(a, b):
return a + b
@_block_array_binary_op_wrapper
def __radd__(a, b):
return a + b
@_block_array_binary_op_wrapper
def __truediv__(a, b):
return a / b
@_block_array_binary_op_wrapper
def __rtruediv__(a, b):
return b / a
@_block_array_binary_op_wrapper
def __floordiv__(a, b):
return a // b
@_block_array_binary_op_wrapper
def __rfloordiv__(a, b):
return b // a
@_block_array_binary_op_wrapper
def __pow__(a, b):
return a ** b
@_block_array_binary_op_wrapper
def __rpow__(a, b):
return b ** a
@_block_array_binary_op_wrapper
def __gt__(a, b):
return a > b
@_block_array_binary_op_wrapper
def __ge__(a, b):
return a >= b
@_block_array_binary_op_wrapper
def __lt__(a, b):
return a < b
@_block_array_binary_op_wrapper
def __le__(a, b):
return a <= b
@_block_array_binary_op_wrapper
def __eq__(a, b):
return a == b
@_block_array_binary_op_wrapper
def __ne__(a, b):
return a != b
def __iter__(self) -> Iterator[int]:
for i in range(self.num_blocks):
yield self[i]
@property
def blocks(self) -> Iterator[int]:
"""Returns an iterator yielding component blocks."""
return self.__iter__()
@property
def bndpos(self) -> np.ndarray:
"""Array specifying boundaries of components as indices in base array."""
return self._aval.bndpos
@property
def dtype(self) -> DType:
"""Array dtype"""
return self._data.dtype
@property
def device_buffer(self) -> Buffer:
"""The :class:`jaxlib.xla_extension.Buffer` that backs the underlying data array"""
return self._data.device_buffer
@property
def size(self) -> int:
"""Total number of elements in the array."""
return self._aval.size
@property
def num_blocks(self) -> int:
"""Number of :class:`.BlockArray` components."""
return len(self.shape)
@property
def ndim(self) -> Shape:
"""Tuple of component ndims."""
return tuple(len(c) for c in self.shape)
@property
def shape(self) -> BlockShape:
"""Tuple of component shapes."""
return self._aval.shapes
@property
def split(self) -> Tuple[JaxArray, ...]:
"""Tuple of component arrays."""
return tuple(self[k] for k in range(self.num_blocks))
def conj(self) -> BlockArray:
"""Returns a :class:`.BlockArray` with complex-conjugated elements."""
# Much faster than BlockArray.array([_.conj() for _ in self.blocks])
return BlockArray.array_from_flattened(self.ravel().conj(), self.shape)
@property
def real(self) -> BlockArray:
"""Returns a :class:`.BlockArray` with the real part of this array."""
return BlockArray.array_from_flattened(self.ravel().real, self.shape)
@property
def imag(self) -> BlockArray:
"""Returns a :class:`.BlockArray` with the imaginary part of this array."""
return BlockArray.array_from_flattened(self.ravel().imag, self.shape)
@classmethod
def array(
cls, alst: List[Union[np.ndarray, JaxArray]], dtype: Optional[np.dtype] = None
) -> BlockArray:
"""Construct a :class:`.BlockArray` from a list or tuple of existing array-like.
Args:
alst : Initializers for array components.
Can be :class:`numpy.ndarray` or :class:`jax.interpreters.xla.DeviceArray`
dtype : Data type of array. If none, dtype is derived from dtype of initializers
Returns:
:class:`.BlockArray` initialized from `alst` tuple
"""
if isinstance(alst, (tuple, list)) is False:
raise TypeError("Input to `array` must be a list or tuple of existing arrays")
if dtype is None:
present_types = jax.tree_flatten(jax.tree_map(lambda x: x.dtype, alst))[0]
dtype = np.find_common_type(present_types, [])
# alst can be a list/tuple of arrays, or a list/tuple containing list/tuples of arrays
# consider alst to be a tree where leaves are arrays (possibly abstract arrays)
# use tree_map to find the shape of each leaf
# `shapes` will be a tuple of ints and tuples containing ints (possibly nested further)
# ensure any scalar leaves are converted to (1,) arrays
def shape_atleast_1d(x):
return x.shape if x.shape != () else (1,)
shapes = tuple(
jax.tree_map(shape_atleast_1d, alst, is_leaf=lambda x: not isinstance(x, (list, tuple)))
)
_aval = _AbstractBlockArray(shapes, dtype)
data_ravel = jnp.hstack(jax.tree_map(lambda x: x.ravel(), jax.tree_flatten(alst)[0]))
return cls(_aval, data_ravel)
@classmethod
def array_from_flattened(
cls, data_ravel: Union[np.ndarray, JaxArray], shape_tuple: BlockShape
) -> BlockArray:
"""Construct a :class:`.BlockArray` from a flattened array and tuple of shapes.
Args:
data_ravel: Flattened data array
shape_tuple: Tuple of tuples containing desired block shapes.
Returns:
:class:`.BlockArray` initialized from `data_ravel` and `shape_tuple`
"""
if not isinstance(data_ravel, DeviceArray):
data_ravel = jax.device_put(data_ravel)
shape_tuple_size = np.sum(block_sizes(shape_tuple))
if shape_tuple_size != data_ravel.size:
raise ValueError(
f"""The specified shape_tuple is incompatible with provided data_ravel
shape_tuple = {shape_tuple}
shape_tuple_size = {shape_tuple_size}
len(data_ravel) = {len(data_ravel)}
"""
)
_aval = _AbstractBlockArray(shape_tuple, dtype=data_ravel.dtype)
return cls(_aval, data_ravel)
@classmethod
def ones(cls, shape_tuple: BlockShape, dtype: DType = np.float32) -> BlockArray:
"""
Return a new :class:`.BlockArray` with given block shapes and type, filled with ones.
Args:
shape_tuple: Tuple of shapes for component blocks
dtype: Desired data-type for the :class:`.BlockArray`. Default is `numpy.float32`.
Returns:
:class:`.BlockArray` of ones with the given component shapes and dtype
"""
_aval = _AbstractBlockArray(shape_tuple, dtype=dtype)
data_ravel = jnp.ones(_aval.size, dtype=dtype)
return cls(_aval, data_ravel)
@classmethod
def zeros(cls, shape_tuple: BlockShape, dtype: DType = np.float32) -> BlockArray:
"""
Return a new :class:`.BlockArray` with given block shapes and type, filled with zeros.
Args:
shape_tuple: Tuple of shapes for component blocks
dtype: Desired data-type for the :class:`.BlockArray`. Default is `numpy.float32`.
Returns:
:class:`.BlockArray` of zeros with the given component shapes and dtype
"""
_aval = _AbstractBlockArray(shape_tuple, dtype=dtype)
data_ravel = jnp.zeros(_aval.size, dtype=dtype)
return cls(_aval, data_ravel)
@classmethod
def empty(cls, shape_tuple: BlockShape, dtype: DType = np.float32) -> BlockArray:
"""
Return a new :class:`.BlockArray` with given block shapes and type, filled with zeros.
Note: like :func:`jax.numpy.empty`, this does not return an uninitalized array.
Args:
shape_tuple: Tuple of shapes for component blocks
dtype: Desired data-type for the :class:`.BlockArray`. Default is `numpy.float32`.
Returns:
:class:`.BlockArray` of zeros with the given component shapes and dtype.
"""
_aval = _AbstractBlockArray(shape_tuple, dtype=dtype)
data_ravel = jnp.empty(_aval.size, dtype=dtype)
return cls(_aval, data_ravel)
@classmethod
def full(
cls,
shape_tuple: BlockShape,
fill_value: Union[float, complex, int],
dtype: DType = np.float32,
) -> BlockArray:
"""
Return a new :class:`.BlockArray` with given block shapes and type, filled with
`fill_value`.
Args:
shape_tuple: Tuple of shapes for component blocks.
fill_value: Fill value
dtype: Desired data-type for the BlockArray. The default,
None, means `np.array(fill_value).dtype`.
Returns:
:class:`.BlockArray` with the given component shapes and dtype and all entries
equal to `fill_value`.
"""
if dtype is None:
dtype = np.asarray(fill_value).dtype
_aval = _AbstractBlockArray(shape_tuple, dtype=dtype)
data_ravel = jnp.full(_aval.size, fill_value=fill_value, dtype=dtype)
return cls(_aval, data_ravel)
def copy(self) -> BlockArray:
"""Returns a copy of this :class:`.BlockArray`.
This method is not implemented for BlockArray.
See Also:
:meth:`.to_numpy`: Converts a :class:`.BlockArray` into a flattened NumPy array.
"""
# jax DeviceArray copies return a NumPy ndarray. This blockarray class must be backed
# by a DeviceArray, so cannot be converted to a NumPy-backed BlockArray. The BlockArray
# .to_numpy() method returns a flattened ndarray.
#
# This method may be implemented in the future if jax DeviceArray .copy() is modified to
# returns another DeviceArray.
raise NotImplementedError
def to_numpy(self) -> np.ndarray:
"""Returns a :class:`numpy.ndarray` containing the flattened form of this
:class:`.BlockArray`."""
if isinstance(self._data, DeviceArray):
host_arr = jax.device_get(self._data.copy())
else:
host_arr = self._data.copy()
return host_arr
def blockidx(self, idx: int) -> jax._src.ops.scatter._Indexable:
"""Returns :class:`jax.ops.index` for a given component block.
Args:
idx: Desired block index
Returns:
:class:`jax.ops.index` pointing to desired block
"""
return jax.ops.index[self.bndpos[idx] : self.bndpos[idx + 1]]
def ravel(self) -> JaxArray:
"""Return a copy of ``self._data`` as a contiguous, flattened `DeviceArray`.
Note that a copy, rather than a view, of the underlying array is returned.
This is consistent with :func:`jax.numpy.ravel`.
Returns:
Copy of underlying flattened array
"""
return self._data[:]
def flatten(self) -> JaxArray:
"""Return a copy of ``self._data`` as a contiguous, flattened `DeviceArray`.
Note that a copy, rather than a view, of the underlying array is returned.
This is consistent with :func:`jax.numpy.ravel`.
Returns:
Copy of underlying flattened array
"""
return self._data[:]
def sum(self, axis=None, keepdims=False):
"""Return the sum of the blockarray elements over the given axis.
Refer to :func:`scico.numpy.sum` for full documentation.
"""
# Can't just call scico.numpy.sum due to pesky circular import...
return _block_array_reduction_wrapper(jnp.sum)(self, axis=axis, keepdims=keepdims)
## Register BlockArray as a Jax type
# Our BlockArray is just a single large vector with some extra sugar
class _ConcreteBlockArray(_AbstractBlockArray):
pass
def _block_array_result_handler(device, _aval):
def build_block_array(data_buf):
data = xla.DeviceArray(_aval._data_aval, device, None, data_buf)
return BlockArray(_aval, data)
return build_block_array
def _block_array_shape_handler(a):
return (xla.xc.Shape.array_shape(a._data_aval.dtype, a._data_aval.shape),)
def _block_array_device_put_handler(a, device):
return (xla.xb.get_device_backend(device).buffer_from_pyval(a._data, device),)
core.pytype_aval_mappings[BlockArray] = lambda x: x._aval
core.raise_to_shaped_mappings[_AbstractBlockArray] = lambda _aval, _: _aval
xla.pytype_aval_mappings[BlockArray] = lambda x: x._aval
xla.canonicalize_dtype_handlers[BlockArray] = lambda x: x
xla.device_put_handlers[BlockArray] = _block_array_device_put_handler
xla.xla_result_handlers[_AbstractBlockArray] = _block_array_result_handler
xla.xla_shape_handlers[_AbstractBlockArray] = _block_array_shape_handler
## Handlers to use jax.device_put on BlockArray
def _block_array_tree_flatten(block_arr):
"""Flattens a :class:`.BlockArray` pytree.
See :func:`jax.tree_util.tree_flatten`.
Args:
block_arr (:class:`.BlockArray`): :class:`.BlockArray` to flatten
Returns:
children (tuple): :class:`.BlockArray` leaves
aux_data (tuple): Extra metadata used to reconstruct BlockArray.
"""
data_children, data_aux_data = tree_flatten(block_arr._data)
return (data_children, block_arr._aval)
def _block_array_tree_unflatten(aux_data, children):
"""Constructs a :class:`.BlockArray` from a flattened pytree.
See jax.tree_utils.tree_unflatten
Args:
aux_data (tuple): Metadata needed to construct block array
children (tuple): Contains block array elements
Returns:
block_arr: Constructed :class:`.BlockArray`
"""
return BlockArray(aux_data, children[0])
register_pytree_node(BlockArray, _block_array_tree_flatten, _block_array_tree_unflatten)
# Syntactic sugar for the .at operations
# see https://github.com/google/jax/blob/56e9f7cb92e3a099adaaca161cc14153f024047c/jax/_src/numpy/lax_numpy.py#L5900
class _BlockArrayIndexUpdateHelper:
"""The helper class for the `at` property to call indexed update functions.
The `at` property is syntactic sugar for calling the indexed update
functions as is done in jax. The index must be of the form [ibk] or [ibk,idx],
where `ibk` is the index of the block to be updated, and `idx` is a
general index of the elements to be updated in that block.
In particular:
- ``x = x.at[ibk].set(y)`` is an equivalent of ``x[ibk] = y``.
- ``x = x.at[ibk,idx].set(y)`` is an equivalent of ``x[ibk,idx] = y``.
The methods ``set, add, multiply, divide, power, maximum, minimum`` are supported.
"""
__slots__ = ("_block_array",)
def __init__(self, block_array):
self._block_array = block_array
def __getitem__(self, index):
if isinstance(index, tuple):
if isinstance(index[0], slice):
raise TypeError(f"Slicing not supported along block index")
return _BlockArrayIndexUpdateRef(self._block_array, index)
def __repr__(self):
print(f"_BlockArrayIndexUpdateHelper({repr(self._block_array)})")
class _BlockArrayIndexUpdateRef:
"""Helper object to call indexed update functions for an (advanced) index.
This object references a source block array and a specific indexer, with the
first integer specifying the block being updated, and rest being the indexer
into the array of that block.
Methods on this object return copies of the source block array that have
been modified at the positions specified by the indexer in the given block.
"""
__slots__ = ("_block_array", "bk_index", "index")
def __init__(self, block_array, index):
self._block_array = block_array
if isinstance(index, int):
self.bk_index = index
self.index = Ellipsis
elif index == Ellipsis:
self.bk_index = Ellipsis
self.index = Ellipsis
else:
self.bk_index = index[0]
self.index = index[1:]
def __repr__(self):
return f"_BlockArrayIndexUpdateRef({repr(self._block_array)}, {repr(self.bk_index)}, {repr(self.index)})"
def _index_wrapper(self, func_str, values):
bk_index = self.bk_index
index = self.index
arr_tuple = self._block_array.split
if bk_index == Ellipsis:
# TODO does this result in multiple copies? One per sub-blockarray,
# then one to combine into a nested BA?
retval = BlockArray.array([getattr(_.at[index], func_str)(values) for _ in arr_tuple])
else:
retval = BlockArray.array(
arr_tuple[:bk_index]
+ (getattr(arr_tuple[bk_index].at[index], func_str)(values),)
+ arr_tuple[bk_index + 1 :]
)
return retval
def set(self, values):
"""Pure equivalent of ``x[idx] = y``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = y``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("set", values)
def add(self, values):
"""Pure equivalent of ``x[idx] += y``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] += y``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("add", values)
def multiply(self, values):
"""Pure equivalent of ``x[idx] *= y``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] *= y``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("multiply", values)
def divide(self, values):
"""Pure equivalent of ``x[idx] /= y``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] /= y``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("divide", values)
def power(self, values):
"""Pure equivalent of ``x[idx] **= y``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] **= y``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("power", values)
def min(self, values):
"""Pure equivalent of ``x[idx] = minimum(x[idx], y)``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = minimum(x[idx], y)``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("min", values)
def max(self, values):
"""Pure equivalent of ``x[idx] = maximum(x[idx], y)``.
Returns the value of ``x`` that would result from the NumPy-style
:mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = maximum(x[idx], y)``.
See :mod:`jax.ops` for details.
"""
return self._index_wrapper("max", values)
setattr(BlockArray, "at", property(_BlockArrayIndexUpdateHelper))
|
the-stack_106_31857 | # -*- coding: utf-8 -*-
from django.db import models, migrations
def add_cache(apps, schema_editor):
Article = apps.get_model("news", "Article")
for article in Article.objects.all():
version_count = article.version_set.count()
if version_count == 0:
article.delete()
continue
article._latest_date = \
article.version_set.latest('date').date
article._sum_versions = version_count
article.save()
class Migration(migrations.Migration):
dependencies = [
('news', '0016_auto_20160407_1442'),
]
operations = [
migrations.RunPython(add_cache),
]
|
the-stack_106_31858 | #name: KNN
#description: Imputes (numerical) missing values using the kNN algorithm
#reference: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
#language: python
#tags: demo, hide-suggestions
#sample: demog.csv
#input: dataframe data [Input data table with NA elements]
#input: column_list imputeColumns {type:numerical} [Impute data table columns]
#input: column_list dataColumns [Input data table columns]
#input: int neighbours = 5 [Number of Nearest Neighbours used]
#output: dataframe data_out {action:replace(data)} [Output data table without NA elements]
import numpy as np
import pandas as pd
from fancyimpute import KNN
# Convert categories into numbers
for col in data.columns:
if (data[col].dtype == 'object'):
data[col]= data[col].astype('category')
data[col] = data[col].cat.codes
# Convert to numpy array
columns = data.columns
data = data.as_matrix()
data_out = KNN(k=neighbours).fit_transform(data)
# Convert back to Pandas DataFrame
data_out = pd.DataFrame(data_out, columns=columns)
data_out = data_out[imputeColumns]
|
the-stack_106_31859 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from functools import lru_cache
from typing import Optional, Set
import jinja2
from docutils import nodes
from docutils.nodes import Element
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import StringList
from provider_yaml_utils import ( # pylint: disable=no-name-in-module
get_provider_yaml_paths,
load_package_data,
)
from sphinx.util import nested_parse_with_titles
from sphinx.util.docutils import switch_source_input
CMD_OPERATORS_AND_HOOKS = "operators-and-hooks"
CMD_TRANSFERS = 'transfers'
"""
Directives for rendering tables with operators.
To test the template rendering process, you can also run this script as a standalone program.
PYTHONPATH=$PWD/../ python exts/operators_and_hooks_ref.py --help
"""
DEFAULT_HEADER_SEPARATOR = "="
CURRENT_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir, os.pardir))
DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
@lru_cache(maxsize=None)
def _get_jinja_env():
loader = jinja2.FileSystemLoader(CURRENT_DIR, followlinks=True)
env = jinja2.Environment(loader=loader, undefined=jinja2.StrictUndefined)
return env
def _render_template(template_name, **kwargs):
return _get_jinja_env().get_template(template_name).render(**kwargs)
def _docs_path(filepath: str):
if not filepath.startswith("/docs/"):
raise Exception(f"The path must starts with '/docs/'. Current value: {filepath}")
if not filepath.endswith(".rst"):
raise Exception(f"The path must ends with '.rst'. Current value: {filepath}")
len_rst = len(".rst")
filepath = filepath[:-len_rst]
filepath = os.path.join(ROOT_DIR, filepath.lstrip('/'))
return os.path.relpath(filepath, DOCS_DIR)
def _prepare_resource_index(package_data, resource_type):
return {
integration["integration-name"]: {**integration, 'package-name': provider['package-name']}
for provider in package_data
for integration in provider.get(resource_type, [])
}
def _prepare_operators_data(tags: Optional[Set[str]]):
package_data = load_package_data()
all_integrations = _prepare_resource_index(package_data, "integrations")
if tags is None:
to_display_integration = all_integrations
else:
to_display_integration = [
integration for integration in all_integrations.values() if tags.intersection(integration["tags"])
]
all_operators_by_integration = _prepare_resource_index(package_data, "operators")
all_hooks_by_integration = _prepare_resource_index(package_data, "hooks")
all_sensors_by_integration = _prepare_resource_index(package_data, "hooks")
results = []
for integration in to_display_integration:
item = {
"integration": integration,
}
operators = all_operators_by_integration.get(integration['integration-name'])
sensors = all_sensors_by_integration.get(integration['integration-name'])
hooks = all_hooks_by_integration.get(integration['integration-name'])
if 'how-to-guide' in item['integration']:
item['integration']['how-to-guide'] = [_docs_path(d) for d in item['integration']['how-to-guide']]
if operators:
item['operators'] = operators
if sensors:
item['hooks'] = sensors
if hooks:
item['hooks'] = hooks
if operators or sensors or hooks:
results.append(item)
return sorted(results, key=lambda d: d["integration"]["integration-name"].lower())
def _render_operator_content(*, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
tabular_data = _prepare_operators_data(tags)
return _render_template(
"operators_and_hooks_ref.rst.jinja2", items=tabular_data, header_separator=header_separator
)
def _prepare_transfer_data(tags: Optional[Set[str]]):
package_data = load_package_data()
all_operators_by_integration = _prepare_resource_index(package_data, "integrations")
# Add edge case
for name in ["SQL", "Local"]:
all_operators_by_integration[name] = {"integration-name": name}
all_transfers = [
{
**transfer,
'package-name': provider['package-name'],
'source-integration': all_operators_by_integration[transfer['source-integration-name']],
'target-integration': all_operators_by_integration[transfer['target-integration-name']],
}
for provider in package_data
for transfer in provider.get("transfers", [])
]
if tags is None:
to_display_transfers = all_transfers
else:
to_display_transfers = [
transfer
for transfer in all_transfers
if tags.intersection(transfer['source-integration'].get('tags', set()))
or tags.intersection(transfer['target-integration'].get('tags', set()))
]
for transfer in to_display_transfers:
if 'how-to-guide' not in transfer:
continue
transfer['how-to-guide'] = _docs_path(transfer['how-to-guide'])
return to_display_transfers
def _render_transfer_content(*, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
tabular_data = _prepare_transfer_data(tags)
return _render_template(
"operators_and_hooks_ref-transfers.rst.jinja2", items=tabular_data, header_separator=header_separator
)
class BaseJinjaReferenceDirective(Directive):
"""The base directive for OperatorsHooksReferenceDirective and TransfersReferenceDirective"""
optional_arguments = 1
option_spec = {"tags": directives.unchanged, 'header-separator': directives.unchanged_required}
def run(self):
tags_arg = self.options.get("tags")
tags = {t.strip() for t in tags_arg.split(",")} if tags_arg else None
header_separator = self.options.get('header-separator')
new_content = self.render_content(tags=tags, header_separator=header_separator)
with switch_source_input(self.state, self.content):
new_content = StringList(new_content.splitlines(), source='')
node = nodes.section() # type: Element
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, new_content, node)
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for filepath in get_provider_yaml_paths():
self.state.document.settings.record_dependencies.add(filepath)
return node.children
def render_content(self, *, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
"""Return content in RST format"""
raise NotImplementedError("Tou need to override render_content method.")
class OperatorsHooksReferenceDirective(BaseJinjaReferenceDirective):
"""Generates a list of operators, sensors, hooks"""
def render_content(self, *, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
return _render_operator_content(
tags=tags,
header_separator=header_separator,
)
class TransfersReferenceDirective(BaseJinjaReferenceDirective):
"""Generate a list of transfer operators"""
def render_content(self, *, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
return _render_transfer_content(
tags=tags,
header_separator=header_separator,
)
def setup(app):
"""Setup plugin"""
app.add_directive('operators-hooks-ref', OperatorsHooksReferenceDirective)
app.add_directive('transfers-ref', TransfersReferenceDirective)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Render tables with integrations.')
parser.add_argument(
'--tag',
dest='tags',
action="append",
help='If passed, displays integrations that have a matching tag.',
)
parser.add_argument('--header-separator', default=DEFAULT_HEADER_SEPARATOR)
subparsers = parser.add_subparsers(help='sub-command help', metavar="COMMAND")
subparsers.required = True
parser_a = subparsers.add_parser(CMD_OPERATORS_AND_HOOKS)
parser_a.set_defaults(cmd=CMD_OPERATORS_AND_HOOKS)
parser_b = subparsers.add_parser(CMD_TRANSFERS)
parser_b.set_defaults(cmd=CMD_TRANSFERS)
args = parser.parse_args()
print(args)
if args.cmd == CMD_OPERATORS_AND_HOOKS:
content = _render_operator_content(
tags=set(args.tags) if args.tags else None, header_separator=args.header_separator
)
else:
content = _render_transfer_content(
tags=set(args.tags) if args.tags else None, header_separator=args.header_separator
)
print(content)
|
the-stack_106_31860 | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/1/26 10:58
Desc: 金融期权数据
http://www.sse.com.cn/assortment/options/price/
"""
import pandas as pd
import requests
from akshare.option.cons import (
SH_OPTION_URL_50,
SH_OPTION_PAYLOAD,
SH_OPTION_PAYLOAD_OTHER,
SH_OPTION_URL_KING_50,
SH_OPTION_URL_300,
SH_OPTION_URL_KING_300,
CFFEX_OPTION_URL_300,
)
def option_finance_underlying(symbol: str = "50ETF") -> pd.DataFrame:
"""
期权标的当日行情, 目前只有 华夏上证50ETF, 华泰柏瑞沪深300ETF 两个产品
http://www.sse.com.cn/assortment/options/price/
:param symbol: 50ETF 或 300ETF
:type symbol: str
:return: 期权标的当日行情
:rtype: pandas.DataFrame
"""
if symbol == "50ETF":
res = requests.get(SH_OPTION_URL_50, params=SH_OPTION_PAYLOAD)
data_json = res.json()
raw_data = pd.DataFrame(data_json["list"])
raw_data.at[0, 0] = "510050"
raw_data.at[0, 8] = pd.to_datetime(
str(data_json["date"]) + str(data_json["time"]), format="%Y%m%d%H%M%S"
)
raw_data.columns = [
"代码",
"名称",
"当前价",
"涨跌",
"涨跌幅",
"振幅",
"成交量(手)",
"成交额(万元)",
"更新日期",
]
return raw_data
else:
res = requests.get(SH_OPTION_URL_300, params=SH_OPTION_PAYLOAD)
data_json = res.json()
raw_data = pd.DataFrame(data_json["list"])
raw_data.at[0, 0] = "510300"
raw_data.at[0, 8] = pd.to_datetime(
str(data_json["date"]) + str(data_json["time"]), format="%Y%m%d%H%M%S"
)
raw_data.columns = [
"代码",
"名称",
"当前价",
"涨跌",
"涨跌幅",
"振幅",
"成交量(手)",
"成交额(万元)",
"更新日期",
]
return raw_data
def option_finance_board(symbol: str = "嘉实沪深300ETF期权", end_month: str = "2103") -> pd.DataFrame:
"""
期权的当日具体的行情数据, 主要为三个: 华夏上证50ETF期权, 华泰柏瑞沪深300ETF期权, 嘉实沪深300ETF期权, 沪深300股指期权
http://www.sse.com.cn/assortment/options/price/
http://www.szse.cn/market/product/option/index.html
http://www.cffex.com.cn/hs300gzqq/
:param symbol: 华夏上证50ETF期权 or 华泰柏瑞沪深300ETF期权 or 嘉实沪深300ETF期权 or 沪深300股指期权
:type symbol: str
:param end_month: 2003; 2020年3月到期的期权
:type end_month: str
:return: 当日行情
:rtype: pandas.DataFrame
"""
end_month = end_month[-2:]
if symbol == "华夏上证50ETF期权":
res = requests.get(
SH_OPTION_URL_KING_50.format(end_month), params=SH_OPTION_PAYLOAD_OTHER
)
data_json = res.json()
raw_data = pd.DataFrame(data_json["list"])
raw_data.index = [str(data_json["date"]) + str(data_json["time"])] * data_json[
"total"
]
raw_data.columns = ["合约交易代码", "当前价", "涨跌幅", "前结价", "行权价"]
raw_data["数量"] = [data_json["total"]] * data_json["total"]
return raw_data
elif symbol == "华泰柏瑞沪深300ETF期权":
res = requests.get(
SH_OPTION_URL_KING_300.format(end_month), params=SH_OPTION_PAYLOAD_OTHER
)
data_json = res.json()
raw_data = pd.DataFrame(data_json["list"])
raw_data.index = [str(data_json["date"]) + str(data_json["time"])] * data_json[
"total"
]
raw_data.columns = ["合约交易代码", "当前价", "涨跌幅", "前结价", "行权价"]
raw_data["数量"] = [data_json["total"]] * data_json["total"]
return raw_data
elif symbol == "嘉实沪深300ETF期权":
url = "http://www.szse.cn/api/report/ShowReport/data"
params = {
"SHOWTYPE": "JSON",
"CATALOGID": "ysplbrb",
"TABKEY": "tab1",
"PAGENO": "1",
"random": "0.10642298535346595",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = data_json[0]["metadata"]["pagecount"]
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
"SHOWTYPE": "JSON",
"CATALOGID": "ysplbrb",
"TABKEY": "tab1",
"PAGENO": page,
"random": "0.10642298535346595",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json[0]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"合约编码",
"合约简称",
"标的名称",
"类型",
"行权价",
"合约单位",
"期权行权日",
"行权交收日",
]
big_df["期权行权日"] = pd.to_datetime(big_df["期权行权日"])
big_df["end_month"] = big_df["期权行权日"].dt.month.astype(str).str.zfill(2)
big_df = big_df[big_df["end_month"] == end_month]
del big_df["end_month"]
return big_df
elif symbol == "沪深300股指期权":
raw_df = pd.read_table(CFFEX_OPTION_URL_300, sep=",")
raw_df["end_month"] = (
raw_df["instrument"].str.split("-", expand=True).iloc[:, 0].str.slice(4,)
)
raw_df = raw_df[raw_df["end_month"] == end_month]
del raw_df["end_month"]
return raw_df
if __name__ == "__main__":
option_finance_underlying_df = option_finance_underlying(symbol="300ETF")
print(option_finance_underlying_df)
option_finance_board_df = option_finance_board(symbol="华夏上证50ETF期权", end_month="2003")
print(option_finance_board_df)
option_finance_board_df = option_finance_board(symbol="嘉实沪深300ETF期权", end_month="2103")
print(option_finance_board_df)
option_finance_board_df = option_finance_board(symbol="华泰柏瑞沪深300ETF期权", end_month="2103")
print(option_finance_board_df)
option_finance_board_df = option_finance_board(symbol="沪深300股指期权", end_month="2003")
print(option_finance_board_df)
|
the-stack_106_31861 | """
CircuitPython Touch Input Example - Blinking an LED using a capacitive touch pad.
This example is meant for boards that have capacitive touch pads, and no simple way to wire up
a button. If there is a simple way to wire up a button, or a button built into the board, use
the standard Digital Input template and example.
Update TOUCH_PAD_PIN to the pin for the capacitive touch pad you wish you use.
For example:
If are using a BLM Badge and plan to use the first pad, change TOUCH_PAD_PIN to CAP1.
"""
import board
import digitalio
import touchio
led = digitalio.DigitalInOut(board.D13)
led.direction = digitalio.Direction.OUTPUT
touch = touchio.TouchIn(board.TOUCH_PAD_PIN)
while True:
if touch.value:
led.value = True
else:
led.value = False
|
the-stack_106_31863 | """Implementation of the Perdomo et. al model of strategic classification.
The data is from the Kaggle Give Me Some Credit dataset:
https://www.kaggle.com/c/GiveMeSomeCredit/data,
and the dynamics are taken from:
Perdomo, Juan C., Tijana Zrnic, Celestine Mendler-Dünner, and Moritz Hardt.
"Performative Prediction." arXiv preprint arXiv:2002.06673 (2020).
"""
import copy
import dataclasses
from typing import Callable, List
import whynot as wn
import whynot.traceable_numpy as np
from whynot.dynamics import BaseConfig, BaseIntervention, BaseState
from whynot.simulators.credit.dataloader import CreditData
@dataclasses.dataclass
class Config(BaseConfig):
# pylint: disable-msg=too-few-public-methods
"""Parameterization of Credit simulator dynamics.
Examples
--------
>>> # Configure simulator for run for 10 iterations
>>> config = Config(start_time=0, end_time=10, delta_t=1)
"""
# Dynamics parameters
#: Subset of the features that can be manipulated by the agent
changeable_features: np.ndarray = np.array([0, 5, 7])
#: Model how much the agent adapt her features in response to a classifier
epsilon: float = 0.1
#: Parameters for logistic regression classifier used by the institution
theta: np.ndarray = np.ones((11, 1))
#: L2 penalty on the logistic regression loss
l2_penalty: float = 0.0
#: Whether or not dynamics have memory
memory: bool = False
# Simulator book-keeping
#: Start time of the simulator
start_time: int = 0
#: End time of the simulator
end_time: int = 5
#: Spacing of the evaluation grid
delta_t: int = 1
@dataclasses.dataclass
class State(BaseState):
# pylint: disable-msg=too-few-public-methods
"""State of the Credit model."""
#: Matrix of agent features (see https://www.kaggle.com/c/GiveMeSomeCredit/data)
features: np.ndarray = np.ones((13, 11))
#: Vector indicating whether or not the agent experiences financial distress
labels: np.ndarray = np.zeros((13, 1))
def values(self):
"""Return the state as a dictionary of numpy arrays."""
return {name: getattr(self, name) for name in self.variable_names()}
class Intervention(BaseIntervention):
# pylint: disable-msg=too-few-public-methods
"""Parameterization of an intervention in the Credit model.
An intervention changes a subset of the configuration variables in the
specified year. The remaining variables are unchanged.
Examples
--------
>>> # Starting at time 25, update the classifier to random chance.
>>> config = Config()
>>> Intervention(time=25, theta=np.zeros_like(config.theta))
"""
def __init__(self, time=30, **kwargs):
"""Specify an intervention in credit.
Parameters
----------
time: int
Time of intervention in simulator dynamics.
kwargs: dict
Only valid keyword arguments are parameters of Config.
"""
super(Intervention, self).__init__(Config, time, **kwargs)
def strategic_logistic_loss(config, features, labels, theta):
"""Evaluate the performative loss for logistic regression classifier."""
config = config.update(Intervention(theta=theta))
# Compute adjusted data
strategic_features = agent_model(features, config)
# compute log likelihood
num_samples = strategic_features.shape[0]
logits = strategic_features @ config.theta
log_likelihood = (1.0 / num_samples) * np.sum(
-1.0 * np.multiply(labels, logits) + np.log(1 + np.exp(logits))
)
# Add regularization (without considering the bias)
regularization = (config.l2_penalty / 2.0) * np.linalg.norm(config.theta[:-1]) ** 2
return log_likelihood + regularization
def agent_model(features, config):
"""Compute agent reponse to the classifier and adapt features accordingly.
TODO: For now, the best-response model corresponds to best-response with
linear utility and quadratic costs. We should expand this to cover a rich
set of agent models beyond linear/quadratic, and potentially beyond
best-response.
"""
# Move everything by epsilon in the direction towards better classification
strategic_features = np.copy(features)
theta_strat = config.theta[config.changeable_features].flatten()
strategic_features[:, config.changeable_features] -= config.epsilon * theta_strat
return strategic_features
def dynamics(state, time, config, intervention=None):
"""Perform one round of interaction between the agents and the credit scorer.
Parameters
----------
state: whynot.simulators.credit.State
Agent state at time TIME
time: int
Current round of interaction
config: whynot.simulators.credit.Config
Configuration object controlling the interaction, e.g. classifier
and agent model
intervention: whynot.simulators.credit.Intervention
Intervention object specifying when and how to update the dynamics.
Returns
-------
state: whynot.simulators.credit.State
Agent state after one step of strategic interaction.
"""
if intervention and time >= intervention.time:
config = config.update(intervention)
if config.memory:
features, labels = state
else:
features, labels = CreditData.features, CreditData.labels
# Update features in response to classifier. Labels are fixed.
strategic_features = agent_model(features, config)
return strategic_features, labels
def simulate(initial_state, config, intervention=None, seed=None):
"""Simulate a run of the Credit model.
Parameters
----------
initial_state: whynot.credit.State
config: whynot.credit.Config
Base parameters for the simulator run
intervention: whynot.credit.Intervention
(Optional) Parameters specifying a change in dynamics
seed: int
Unused since the simulator is deterministic.
Returns
-------
run: whynot.dynamics.Run
Simulator rollout
"""
# Iterate the discrete dynamics
times = [config.start_time]
states = [initial_state]
state = copy.deepcopy(initial_state)
for step in range(config.start_time, config.end_time):
next_state = dynamics(state.values(), step, config, intervention)
state = State(*next_state)
states.append(state)
times.append(step + 1)
return wn.dynamics.Run(states=states, times=times)
if __name__ == "__main__":
print(simulate(State(), Config(end_time=2)))
|
the-stack_106_31864 | from typing import Dict, List, Optional, Any, Union
from pydantic import BaseModel, validator
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
from tracardi.domain.event import Event
from tracardi.domain.profile import Profile
from tracardi.domain.session import Session
class Configuration(BaseModel):
append: Optional[Dict[str, Any]] = {}
remove: Optional[Dict[str, Union[Any, List[Any]]]] = {}
@validator("remove")
def validate_remove(cls, value, values):
if 'append' not in values and 'remove' not in values:
raise ValueError("Please define `append` or `remove` in config section.")
return value
def validate(config: dict):
return Configuration(**config)
class AppendTraitAction(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload: dict):
dot = self._get_dot_accessor(payload if isinstance(payload, dict) else None)
for destination, value in self.config.append.items():
value = dot[value]
if destination in dot:
if not isinstance(dot[destination], list):
# Make it a list with original value
dot[destination] = [dot[destination]]
if value not in dot[destination]:
dot[destination].append(value)
else:
dot[destination] = value
for destination, value in self.config.remove.items():
value = dot[value]
if destination in dot:
if not isinstance(dot[destination], list):
raise ValueError("Can not remove from non-list data.")
if isinstance(value, list):
for v in value:
if v in dot[destination]:
dot[destination].remove(v)
elif value in dot[destination]:
dot[destination].remove(value)
if self.event.metadata.profile_less is False:
if not isinstance(dot.profile['traits']['private'], dict):
raise ValueError("Error when appending [email protected] to value `{}`. "
"Private must have key:value pair. "
"E.g. `name`: `{}`".format(dot.profile['traits']['private'],
dot.profile['traits']['private']))
if not isinstance(dot.profile['traits']['public'], dict):
raise ValueError(
"Error when appending [email protected] to value `{}`. Public must have key:value pair. "
"E.g. `name`: `{}`".format(dot.profile['traits']['public'], dot.profile['traits']['public']))
profile = Profile(**dot.profile)
self.profile.replace(profile)
else:
if dot.profile:
self.console.warning("Profile changes were discarded in node `Append/Remove Trait`. "
"This event is profile less so there is no profile.")
event = Event(**dot.event)
self.event.replace(event)
if 'id' in dot.session:
session = Session(**dot.session)
self.session.replace(session)
self.update_profile()
return Result(port="payload", value=payload)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module='tracardi.process_engine.action.v1.traits.append_trait_action',
className='AppendTraitAction',
inputs=['payload'],
outputs=["payload"],
init={
"append": {
"target1": "source1",
"target2": "source2",
},
"remove": {
"target": ["item1", "item2"]
}
},
version='0.1',
license="MIT",
author="Risto Kowaczewski"
),
metadata=MetaData(
name='Append/Remove Trait',
desc='Appends/Removes trait to/from existing profile trait.',
icon='append',
group=["Data processing"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes any JSON-like object.")
},
outputs={
"payload": PortDoc(desc="This port returns given payload with traits appended or removed according"
" to configuration.")
}
)
)
)
|
the-stack_106_31865 | import cv2
import os.path as op
import numpy as np
import pandas as pd
from PIL import Image
import torch
import torch.nn as nn
from torchvision import transforms, datasets
from torch.utils.data import Dataset
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import ImageFolder
import random
from .align import Alignment
from universal_utils import read_lines_into_list
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
class myImageFolder(ImageFolder):
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
def __init__(self, root, transform=None, target_transform=None):
super(myImageFolder, self).__init__(root, transform, target_transform)
class SiameseDFWImageFolder(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, imgs_folder_dir, transform, dataset_type="training"):
assert dataset_type in ["training", "testing"]
print('>>> In SIFolder, imgfolderdir=', imgs_folder_dir)
self.root = imgs_folder_dir
self.dataset_type = dataset_type
matrix_txt_path = os.path.join(self.root, 'Mask_matrices', dataset_type,
f"{dataset_type}_data_mask_matrix.txt")
self.data_mask_matrix = np.loadtxt(matrix_txt_path)
img_path_list_path = os.path.join(self.root, f"{dataset_type.capitalize()}_data_face_name.txt")
self.img_path_list = read_lines_into_list(img_path_list_path)
self.img_label_list, self.name2label = self.img_path_to_label_list(self.img_path_list)
self.transform = transform
# ############################################
# self.wFace_dataset = ImageFolder(imgs_folder_dir, transform)
self.class_num = len(self.name2label)
# ##################################
# # self.memoryAll = False
# self.train_labels = np.array(self.wFace_dataset.targets, dtype=int)
# print('>>> self.train_labels:', self.train_labels[1000:1010])
# self.train_data = self.wFace_dataset
# self.labels_set = set(self.train_labels)
# self.label_to_indices = {label:
# np.where(self.train_labels
# == label)[0]
# for label in self.labels_set}
# print('>>> Init SiameseDFWImageFolder done!')
def __getitem__(self, idx):
'''
img1 = (feat_fc, feat_grid)
'''
# print('>>> In getItem, idx = ', idx)
# Sample the 1-st image
img1_path = os.path.join(self.root, self.img_path_list[idx])
img1 = self.load_transformed_img_tensor(img1_path)
label1 = self.img_label_list[idx]
# Sample the 2-nd image
# is_the_same_id is a bool that determines whether returning one pair with the same identity.
is_the_same_id = np.random.randint(0, 2)
############
img2_path = self.get_siamese_path(idx, is_the_same_id)
img2_path = os.path.join(self.root, img2_path)
# print("In getitem, img2_path: ", img2_path)
# print("In getitem, img1_path: ", img1_path)
img2 = self.load_transformed_img_tensor(img2_path)
label2 = self.img_path_to_label(img2_path)
###################################
# img1, label1 = self.train_data[index] # , self.train_labels[index].item()
# if target == 1:
# siamese_index = index
# while siamese_index == index:
# siamese_index = np.random.choice(self.label_to_indices[label1])
# else:
# siamese_label = np.random.choice(
# list(self.labels_set - set([label1])))
# siamese_index = np.random.choice(
# self.label_to_indices[siamese_label])
# img2, label2 = self.train_data[siamese_index]
return img1, img2, label1, label2
def __len__(self):
return len(self.img_path_list)
def img_path_to_label_list(self, path_list):
label_list = []
name_list = []
name2label = {}
for path in path_list:
# path e.g. Training_data/Matthew_McConaughey/Matthew_McConaughey_h_002.jpg
# Assume that Imposter Impersonator is one unique identity
if "_I_" in path:
name = path.split('/')[-1][:-8]
else:
name = path.split('/')[1]
if not name in name_list:
name_list.append(name)
name2label[name] = len(name_list) - 1
label = name2label[name]
label_list.append(label)
return label_list, name2label
def img_path_to_label(self, path):
# path e.g. data/dfw/Training_data/Matthew_McConaughey/Matthew_McConaughey_h_003.jpg
if "_I_" in path:
name = path.split('/')[-1][:-8]
else:
name = path.split('/')[3]
return self.name2label[name]
def load_transformed_img_tensor(self, path):
img = datasets.folder.default_loader(path)
# XXX
t = transforms.Resize([112, 112])
img = t(img)
# print(img)
# print('>>>>> In load_tr, img.size =', img.size())
if self.transform is not None:
img = self.transform(img)
else:
raise NotImplementedError
return img
def get_siamese_path(self, idx, is_the_same_id):
'''
Input:
'''
candidate = self.data_mask_matrix[idx]
positions = []
# print(">>>> Is the same", is_the_same_id)
if is_the_same_id:
targets = [1, 2]
for target in targets:
pos = np.where(candidate == target)[0]
pos = list(pos)
# print(">>>> candidate=", candidate)
# print(">>>> pos= ", pos)
positions += pos
# _I.jpg case (no identical id)
if len(positions) == 0:
pos3 = np.where(candidate == 3)[0]
pos3 = list(pos3)
positions += pos3
else:
pos3 = np.where(candidate == 3)[0]
pos4 = np.where(candidate == 4)[0]
pos3 = list(pos3)
pos4 = list(pos4)
# print(">>>> candidate=", candidate)
# print(">>>> pos3= ", pos3)
# print(">>>> pos4= ", pos4)
# _I.jpg case
if len(pos4) > 0:
pos4 = random.sample(pos4, max(len(pos3), 1)) # at least take 1 sample
positions += pos4
positions += pos3
assert len(positions) > 0
siamese_idx = random.choice(positions)
return self.img_path_list[siamese_idx]
class SiameseImageFolder(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, imgs_folder_dir, transform):
print('>>> In SIFolder, imgfolderdir=', imgs_folder_dir)
self.root = imgs_folder_dir
self.wFace_dataset = ImageFolder(imgs_folder_dir, transform)
self.class_num = len(self.wFace_dataset.classes)
print('>>> self.class_num = ', self.class_num)
# self.memoryAll = False
self.train_labels = np.array(self.wFace_dataset.targets, dtype=int)
print('>>> self.train_labels:', self.train_labels[1000:1010])
self.train_data = self.wFace_dataset
# XXX
# if self.memoryAll:
# self.train_data = self.wFace_dataset.train_data
self.labels_set = set(self.train_labels)
self.label_to_indices = {label:
np.where(self.train_labels
== label)[0]
for label in self.labels_set}
print('>>> Init SiameseImageFolder done!')
def __getitem__(self, index):
'''
img1 = (feat_fc, feat_grid)
'''
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index] # , self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(
list(self.labels_set - set([label1])))
siamese_index = np.random.choice(
self.label_to_indices[siamese_label])
img2, label2 = self.train_data[siamese_index]
# XXX stack
# stack (img1, img2), (label1, label2), cos_gt
return img1, img2, label1, label2
def __len__(self):
return len(self.wFace_dataset)
class SiameseWholeFace(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
# @property
# def train_data(self):
# warnings.warn("train_data has been renamed data")
# return self.wFace_dataset
# @property
# def test_data(self):
# warnings.warn("test_data has been renamed data")
# return self.wFace_dataset
def __init__(self, wFace_dataset):
self.wFace_dataset = wFace_dataset
self.train = self.wFace_dataset.train
self.memoryAll = self.wFace_dataset.memoryAll
if self.train:
self.train_labels = self.wFace_dataset.train_labels
self.train_data = self.wFace_dataset
if self.memoryAll:
self.train_data = self.wFace_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label:
np.where(self.train_labels.numpy()
== label)[0]
for label in self.labels_set}
else:
# generate fixed pairs for testing
# TODO: @property like MNIST
self.test_labels = self.wFace_dataset.test_labels
self.test_data = self.wFace_dataset
if self.memoryAll:
self.test_data = self.wFace_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label:
np.where(self.test_labels.numpy()
== label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
positive_pairs = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
1]
for i in range(0, len(self.test_data), 2)]
negative_pairs = [[i,
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
]),
0]
for i in range(1, len(self.test_data), 2)]
self.test_pairs = positive_pairs + negative_pairs
print('>>> Init SiameseWholeFace done!')
def __getitem__(self, index):
'''
img1 = (feat_fc, feat_grid)
'''
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
# [Depreciated] feat1 1 is of size [21504]
# feat1, feat2 = img1.view(-1), img2.view(-1)
# cosine = cos(feat1, feat2).numpy()
# target = cosine
feat_grid_1 , feat_fc_1 = img1
feat_grid_2 , feat_fc_2 = img2
return (feat_grid_1, feat_fc_1, feat_grid_2, feat_fc_2), target
def __len__(self):
return len(self.wFace_dataset)
class SiameseENM(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, ENM_dataset):
self.ENM_dataset = ENM_dataset
self.train = self.ENM_dataset.train
# self.train = False
if self.train:
self.train_labels = self.ENM_dataset.train_labels
self.train_data = self.ENM_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label:
np.where(self.train_labels.numpy()
== label)[0]
for label in self.labels_set}
else:
# generate fixed pairs for testing
# TODO: @property like MNIST
self.test_labels = self.ENM_dataset.test_labels
self.test_data = self.ENM_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label:
np.where(self.test_labels.numpy()
== label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
positive_pairs = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
1]
for i in range(0, len(self.test_data), 2)]
negative_pairs = [[i,
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
]),
0]
for i in range(1, len(self.test_data), 2)]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
return (img1, img2), target
def __len__(self):
return len(self.ENM_dataset)
class TripletENM(Dataset):
"""
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(self, ENM_dataset):
self.ENM_dataset = ENM_dataset
self.train = self.ENM_dataset.train
if self.train:
self.train_labels = self.ENM_dataset.train_labels
self.train_data = self.ENM_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
self.test_labels = self.ENM_dataset.test_labels
self.test_data = self.ENM_dataset.test_data
# generate fixed triplets for testing
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
triplets = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
])
]
for i in range(len(self.test_data))]
self.test_triplets = triplets
def __getitem__(self, index):
if self.train:
img1, label1 = self.train_data[index], self.train_labels[index].item()
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
return (img1, img2, img3), []
def __len__(self):
return len(self.ENM_dataset)
class SiameseMNIST(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, mnist_dataset):
self.mnist_dataset = mnist_dataset
self.train = self.mnist_dataset.train
self.transform = self.mnist_dataset.transform
if self.train:
self.train_labels = self.mnist_dataset.train_labels
self.train_data = self.mnist_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
# generate fixed pairs for testing
self.test_labels = self.mnist_dataset.test_labels
self.test_data = self.mnist_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
positive_pairs = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
1]
for i in range(0, len(self.test_data), 2)]
negative_pairs = [[i,
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
]),
0]
for i in range(1, len(self.test_data), 2)]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
img1 = Image.fromarray(img1.numpy(), mode='L')
img2 = Image.fromarray(img2.numpy(), mode='L')
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
return (img1, img2), target
def __len__(self):
return len(self.mnist_dataset)
class TripletMNIST(Dataset):
"""
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(self, mnist_dataset):
self.mnist_dataset = mnist_dataset
self.train = self.mnist_dataset.train
self.transform = self.mnist_dataset.transform
if self.train:
self.train_labels = self.mnist_dataset.train_labels
self.train_data = self.mnist_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set}
else:
self.test_labels = self.mnist_dataset.test_labels
self.test_data = self.mnist_dataset.test_data
# generate fixed triplets for testing
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(29)
triplets = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
])
]
for i in range(len(self.test_data))]
self.test_triplets = triplets
def __getitem__(self, index):
if self.train:
img1, label1 = self.train_data[index], self.train_labels[index].item()
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
img1 = Image.fromarray(img1.numpy(), mode='L')
img2 = Image.fromarray(img2.numpy(), mode='L')
img3 = Image.fromarray(img3.numpy(), mode='L')
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
return (img1, img2, img3), []
def __len__(self):
return len(self.mnist_dataset)
class BalancedBatchSampler(BatchSampler):
"""
BatchSampler - from a MNIST-like dataset, samples n_classes and within these classes samples n_samples.
Returns batches of size n_classes * n_samples
"""
def __init__(self, labels, n_classes, n_samples):
self.labels = labels
self.labels_set = list(set(self.labels.numpy()))
self.label_to_indices = {label: np.where(self.labels.numpy() == label)[0]
for label in self.labels_set}
for l in self.labels_set:
np.random.shuffle(self.label_to_indices[l])
self.used_label_indices_count = {label: 0 for label in self.labels_set}
self.count = 0
self.n_classes = n_classes
self.n_samples = n_samples
self.n_dataset = len(self.labels)
self.batch_size = self.n_samples * self.n_classes
def __iter__(self):
self.count = 0
while self.count + self.batch_size < self.n_dataset:
classes = np.random.choice(self.labels_set, self.n_classes, replace=False)
indices = []
for class_ in classes:
indices.extend(self.label_to_indices[class_][
self.used_label_indices_count[class_]:self.used_label_indices_count[
class_] + self.n_samples])
self.used_label_indices_count[class_] += self.n_samples
if self.used_label_indices_count[class_] + self.n_samples > len(self.label_to_indices[class_]):
np.random.shuffle(self.label_to_indices[class_])
self.used_label_indices_count[class_] = 0
yield indices
self.count += self.n_classes * self.n_samples
def __len__(self):
return self.n_dataset // self.batch_size
import os
import pickle
import torch
import warnings
from glob import glob
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from joblib import Parallel, delayed
class ENMDataset:
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(self, root_dir, featType='nose', train=True, valSize=0.2):
print('>>> Init ENM dataset for', featType,
root_dir, 'isTrain:', train)
self.featType = featType
self.id_paths = glob(os.path.join(root_dir, '*/'))
dataPaths = [glob(d + '*.pkl') for d in self.id_paths]
dataPaths = [item for sublist in dataPaths for item in sublist]
train_ps, val_ps = train_test_split(dataPaths,
test_size=valSize,
random_state=42)
self.train_id_paths, self.val_id_paths = train_ps, val_ps
# print(len(self.train_id_paths), len(self.val_id_paths))
# print(self.train_id_paths[:4])
# print(self.val_id_paths[:4])
self.train = train
if self.train:
dataPaths = self.train_id_paths
else:
dataPaths = self.val_id_paths
# print(len(dataPaths), dataPaths[:4])
trainFlag = 'train' if self.train else 'val'
self.pkl_fname = os.path.join(config.FEAT_DIR, self.featType + '_' + trainFlag + '.pkl')
if os.path.isfile(self.pkl_fname):
print('>>> Loading:', self.pkl_fname)
with open(self.pkl_fname, 'rb') as h:
self.data, self.targets = pickle.load(h)
else:
self.data, self.targets = self.loadData(dataPaths)
def loadData(self, paths):
# XXX train set: 12 min; val set: 3 min
data = []
targets = []
def loadPkl(p):
with open(p, 'rb') as h:
feat = pickle.load(h)
data.append(feat[self.featType])
targets.append(feat['label'])
Parallel(n_jobs=config.WORKERS, require='sharedmem', prefer="threads")(delayed(loadPkl)(p) for p in tqdm(paths))
# for p in tqdm(paths):
# with open(p, 'rb') as h:
# feat = pickle.load(h)
# data.append(feat[self.featType])
# targets.append(feat['label'])
# print(data[0])
# print(targets[0])
data = torch.stack(data, 0)
targets = torch.stack(targets, 0)
# print(data.size())
# print(targets.size())
# Save pkl
with open(self.pkl_fname, 'wb') as h:
pickle.dump((data, targets), h, protocol=pickle.HIGHEST_PROTOCOL)
return data, targets
def __getitem__(self, key): # XXX It seems useless?
return self.data[key], self.targets[key]
def __len__(self):
return len(self.data)
class FaceFeatDataset:
# XXX: self.target contain all labels;
# while _getitem_ can only get image feat
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data_mix
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data_mix
def __init__(self, root_dir, train=True, valSize=0.2, memoryAll=False):
print('>>> Init FaceFeat dataset for', root_dir, 'isTrain:', train, 'memoryAll:', memoryAll)
self.memoryAll = memoryAll
self.id_paths = glob(os.path.join(root_dir, '*/'))
dataPaths = [glob(d + '*.pkl') for d in self.id_paths]
dataPaths = [item for sublist in dataPaths for item in sublist]
train_ps, val_ps = train_test_split(dataPaths,
test_size=valSize,
random_state=42)
self.train_id_paths, self.val_id_paths = train_ps, val_ps
# print(len(self.train_id_paths), len(self.val_id_paths))
# print(self.train_id_paths[:4])
# print(self.val_id_paths[:4])
self.train = train
if self.train:
self.dataPaths = self.train_id_paths
else:
self.dataPaths = self.val_id_paths
trainFlag = 'train' if self.train else 'val'
# Load target labels
if not self.memoryAll:
self.pkl_fname = os.path.join(config.FEAT_DIR, 'target_' + trainFlag + '.pkl')
if os.path.isfile(self.pkl_fname):
print('>>> Loading:', self.pkl_fname)
with open(self.pkl_fname, 'rb') as h:
self.targets = pickle.load(h)
else:
self.targets = self.loadTargets(self.dataPaths)
else:
self.pkl_fname = os.path.join(config.FEAT_DIR, 'targetAndData_' + trainFlag + '.pkl')
if os.path.isfile(self.pkl_fname):
print('>>> Loading:', self.pkl_fname)
with open(self.pkl_fname, 'rb') as h:
self.data, self.data_fc, self.targets = pickle.load(h)
self.data_mix = DataMix(self.data, self.data_fc)
else:
self.data, self.data_fc, self.targets = self.loadData(self.dataPaths)
self.data_mix = DataMix(self.data, self.data_fc)
print('>>> Init done!')
def loadData(self, paths):
# XXX train set: 12 min; val set: 3 min
data = []
data_fc = []
targets = []
def loadPkl(p):
with open(p, 'rb') as h:
feat = pickle.load(h)
data.append(feat['wholeFaceFeat_42grid'])
data_fc.append(feat['wholeFaceFeat'])
targets.append(feat['label'])
del feat
print('>>> In faceFeatDataset, Loading ALL datas')
Parallel(n_jobs=6, require='sharedmem', prefer="threads")(delayed(loadPkl)(p) for p in tqdm(paths))
# for p in tqdm(paths):
# with open(p, 'rb') as h:
# feat = pickle.load(h)
# data.append(feat[self.featType])
# targets.append(feat['label'])
# print(data[0])
# print(targets[0])
data = torch.stack(data, 0)
data_fc = torch.stack(data_fc, 0)
targets = torch.stack(targets, 0)
# print(data.size())
# print(targets.size())
# Save pkl
with open(self.pkl_fname, 'wb') as h:
pickle.dump((data, data_fc, targets), h, protocol=pickle.HIGHEST_PROTOCOL)
return data, data_fc, targets
def loadPkl(self, p):
with open(p, 'rb') as h:
feat = pickle.load(h)
return feat['wholeFaceFeat_42grid'], feat['wholeFaceFeat'], feat['label']
def loadFeat(self, p):
with open(p, 'rb') as h:
feat = pickle.load(h)
return feat['wholeFaceFeat_42grid'], feat['wholeFaceFeat']
def loadTargets(self, paths):
# XXX train set: 12 min; val set: 3 min
targets = []
def loadpkl(p):
with open(p, 'rb') as h:
feat = pickle.load(h)
targets.append(feat['label'])
Parallel(n_jobs=config.WORKERS, require='sharedmem', prefer="threads")(delayed(loadpkl)(p) for p in tqdm(paths))
# print(targets[0])
targets = torch.stack(targets, 0)
# print(targets.size())
# Save pkl
with open(self.pkl_fname, 'wb') as h:
pickle.dump(targets, h, protocol=pickle.HIGHEST_PROTOCOL)
return targets
def __getitem__(self, key):
# XXX
raise NotImplementedError
if not self.memoryAll:
return self.loadFeat(self.dataPaths[key])
else:
return self.data[key], self.data_fc[key], self.targets[key]
def __len__(self):
return len(self.dataPaths)
class DataMix:
def __init__(self, data_grid, data_fc):
self.data_grid = data_grid
self.data_fc = data_fc
self._data_len = len(self.data_grid)
def __getitem__(self, key):
return (self.data_grid[key], self.data_fc[key])
def __len__(self):
return self._data_len
class CArrayDataset(Dataset):
def __init__(self, carray):
self.carray = carray
def __getitem__(self, idx):
return self.carray[idx]
def __len__(self):
return len(self.carray)
class IJBCVerificationBaseDataset(Dataset):
"""
Base class of IJB-C verification dataset to read neccesary
csv files and provide general functions.
"""
def __init__(self, ijbc_data_root, leave_ratio=1.0):
# read all csvs neccesary for verification
self.ijbc_data_root = ijbc_data_root
dtype_sid_tid = {'SUBJECT_ID': str, 'TEMPLATE_ID': str}
self.metadata = pd.read_csv(op.join(ijbc_data_root, 'protocols', 'ijbc_metadata_with_age.csv'),
dtype=dtype_sid_tid)
test1_dir = op.join(ijbc_data_root, 'protocols', 'test1')
self.enroll_templates = pd.read_csv(op.join(test1_dir, 'enroll_templates.csv'), dtype=dtype_sid_tid)
self.verif_templates = pd.read_csv(op.join(test1_dir, 'verif_templates.csv'), dtype=dtype_sid_tid)
self.match = pd.read_csv(op.join(test1_dir, 'match.csv'), dtype=str)
if leave_ratio < 1.0: # shrink the number of verified pairs
indice = np.arange(len(self.match))
np.random.seed(0)
np.random.shuffle(indice)
left_number = int(len(self.match) * leave_ratio)
self.match = self.match.iloc[indice[:left_number]]
def _get_both_entries(self, idx):
enroll_tid = self.match.iloc[idx]['ENROLL_TEMPLATE_ID']
verif_tid = self.match.iloc[idx]['VERIF_TEMPLATE_ID']
enroll_entries = self.enroll_templates[self.enroll_templates.TEMPLATE_ID == enroll_tid]
verif_entries = self.verif_templates[self.verif_templates.TEMPLATE_ID == verif_tid]
return enroll_entries, verif_entries
def _get_cropped_path_suffix(self, entry):
sid = entry['SUBJECT_ID']
filepath = entry['FILENAME']
img_or_frames, fname = op.split(filepath)
fname_index, _ = op.splitext(fname)
cropped_path_suffix = op.join(img_or_frames, f'{sid}_{fname_index}.jpg')
return cropped_path_suffix
def __len__(self):
return len(self.match)
class IJBCVerificationDataset(IJBCVerificationBaseDataset):
"""
IJB-C verification dataset (`test1` in the folder) who transforms
the cropped faces into tensors.
Note that entries in this verification dataset contains lots of
repeated faces. A better way to evaluate a model's score is to
precompute all faces features and store them into disks. (
see `IJBCAllCroppedFacesDataset` and `IJBCVerificationPathDataset`)
"""
def __init__(self, ijbc_data_root):
super().__init__(ijbc_data_root)
self.transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
def _get_cropped_face_image_by_entry(self, entry):
cropped_path_suffix = self._get_cropped_path_suffix(entry)
cropped_path = op.join(self.ijbc_data_root, 'cropped_faces', cropped_path_suffix)
return Image.open(cropped_path)
def _get_tensor_by_entries(self, entries):
faces_imgs = [self._get_cropped_face_image_by_entry(e) for idx, e in entries.iterrows()]
faces_tensors = [self.transforms(img) for img in faces_imgs]
return torch.stack(faces_tensors, dim=0)
def __getitem__(self, idx):
enroll_entries, verif_entries = self._get_both_entries(idx)
enroll_faces_tensor = self._get_tensor_by_entries(enroll_entries)
verif_faces_tensor = self._get_tensor_by_entries(verif_entries)
return {
"enroll_faces_tensor": enroll_faces_tensor,
"verif_faces_tensor": verif_faces_tensor
}
class IJBCVerificationPathDataset(IJBCVerificationBaseDataset):
"""
This dataset read the match file of verification set in IJB-C
(in the `test1` directory) and output the cropped faces' paths
of both enroll_template and verif_template for each match.
Models outside can use the path information to read their stored
features and compute the similarity score of enroll_template and
verif_template.
"""
def __init__(self, ijbc_data_root, occlusion_lower_bound=0, leave_ratio=1.0):
super().__init__(ijbc_data_root, leave_ratio=leave_ratio)
self.occlusion_lower_bound = occlusion_lower_bound
self.metadata['OCC_sum'] = self.metadata[[f'OCC{i}' for i in range(1, 19)]].sum(axis=1)
self.reindexed_meta = self.metadata.set_index(['SUBJECT_ID', 'FILENAME'])
def _filter_out_occlusion_insufficient_entries(self, entries):
if self.occlusion_lower_bound == 0:
return [entry for _, entry in entries.iterrows()]
out = []
for _, entry in entries.iterrows():
occlusion_sum = self.reindexed_meta.loc[(entry['SUBJECT_ID'], entry['FILENAME']), 'OCC_sum']
if occlusion_sum.values[0] >= self.occlusion_lower_bound:
out.append(entry)
return out
def __getitem__(self, idx):
enroll_entries, verif_entries = self._get_both_entries(idx)
is_same = (enroll_entries['SUBJECT_ID'].iloc[0] == verif_entries['SUBJECT_ID'].iloc[0])
is_same = 1 if is_same else 0
enroll_template_id = enroll_entries['TEMPLATE_ID'].iloc[0],
verif_template_id = verif_entries['TEMPLATE_ID'].iloc[0],
enroll_entries = self._filter_out_occlusion_insufficient_entries(enroll_entries)
verif_entries = self._filter_out_occlusion_insufficient_entries(verif_entries)
def path_suffixes(entries):
return [self._get_cropped_path_suffix(entry) for entry in entries]
return {
"enroll_template_id": enroll_template_id,
"verif_template_id": verif_template_id,
"enroll_path_suffixes": path_suffixes(enroll_entries),
"verif_path_suffixes": path_suffixes(verif_entries),
"is_same": is_same
}
class IJBVerificationPathDataset(Dataset):
"""
This dataset read the match file of verification set in ijb_dataset_root
(in the `meta` directory, the filename is sth. like
"ijbc_template_pair_label.txt") and output the cropped faces'
paths of both enroll_template and verif_template for each match.
Models outside can use the path information to read their stored
features and compute the similarity score of enroll_template and
verif_template.
"""
def __init__(self, ijb_dataset_root, leave_ratio=1.0, dataset_type='IJBB'):
# TODO implement the leave_ratio method
if dataset_type == 'IJBB':
match_filename = op.join(ijb_dataset_root, 'meta',
'ijbb_template_pair_label.txt')
elif dataset_type == 'IJBC':
match_filename = op.join(ijb_dataset_root, 'meta',
'ijbc_template_pair_label.txt')
else:
raise NotImplementedError
col_name = ["TEMPLATE_ID1", "TEMPLATE_ID2", "IS_SAME"]
self.match = pd.read_csv(match_filename, delim_whitespace=True,
header=None, dtype=str, names=col_name)
if leave_ratio < 1.0: # shrink the number of verified pairs
indice = np.arange(len(self.match))
np.random.seed(0)
np.random.shuffle(indice)
left_number = int(len(self.match) * leave_ratio)
self.match = self.match.iloc[indice[:left_number]]
def __getitem__(self, idx):
def path_suffixes(id_str):
path = f'{id_str}.jpg'
return [path]
id1 = self.match.iloc[idx]["TEMPLATE_ID1"]
id2 = self.match.iloc[idx]["TEMPLATE_ID2"]
return {
"enroll_template_id": id1,
"verif_template_id": id2,
"enroll_path_suffixes": path_suffixes(id1),
"verif_path_suffixes": path_suffixes(id2),
"is_same": self.match.iloc[idx]["IS_SAME"]
}
def __len__(self):
return len(self.match)
class IJBCAllCroppedFacesDataset(Dataset):
"""
This dataset loads all faces available in IJB-C and transform
them into tensors. The path for that face is output along with
its tensor.
This is for models to compute all faces' features and store them
into disks, otherwise the verification testing set contains too many
repeated faces that should not be computed again and again.
"""
def __init__(self, ijbc_data_root):
self.ijbc_data_root = ijbc_data_root
self.transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
self.all_cropped_paths_img = sorted(glob(
op.join(self.ijbc_data_root, 'cropped_faces', 'img', '*.jpg')))
self.len_set1 = len(self.all_cropped_paths_img)
self.all_cropped_paths_frames = sorted(glob(
op.join(self.ijbc_data_root, 'cropped_faces', 'frames', '*.jpg')))
def __getitem__(self, idx):
if idx < self.len_set1:
path = self.all_cropped_paths_img[idx]
else:
path = self.all_cropped_paths_frames[idx - self.len_set1]
img = Image.open(path).convert('RGB')
tensor = self.transforms(img)
return {
"tensor": tensor,
"path": path,
}
def __len__(self):
return len(self.all_cropped_paths_frames) + len(self.all_cropped_paths_img)
class IJBCroppedFacesDataset(Dataset):
"""
This dataset loads all faces available in IJB-B/C, align them,
and transform them into tensors.
The path for that face is output along with its tensor.
This is for models to compute all faces' features and store them
into disks, otherwise the verification testing set contains too many
repeated faces that should not be computed again and again.
"""
def __init__(self, ijbc_data_root, is_ijbb=True):
self.ijbc_data_root = ijbc_data_root
self.transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
self.img_dir = op.join(self.ijbc_data_root, 'loose_crop')
if is_ijbb:
landmark_txt = 'ijbb_name_5pts_score.txt'
else:
landmark_txt = 'ijbc_name_5pts_score.txt'
landmark_path = op.join(self.ijbc_data_root,
'meta', landmark_txt)
self.imgs_list, self.landmarks_list = self.loadImgPathAndLandmarks(landmark_path)
self.alignment = Alignment()
def loadImgPathAndLandmarks(self, path):
imgs_list = []
landmarks_list = []
with open(path) as img_list:
lines = img_list.readlines()
for line in lines:
name_lmk_score = line.strip().split(' ')
img_name = os.path.join(self.img_dir, name_lmk_score[0])
lmk = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32)
lmk = lmk.reshape( (5,2) )
imgs_list.append(img_name)
landmarks_list.append(lmk)
landmarks_list = np.array(landmarks_list)
return imgs_list, landmarks_list
def __getitem__(self, idx):
img_path = self.imgs_list[idx]
landmark = self.landmarks_list[idx]
img = cv2.imread(img_path)
# XXX cv2.cvtColor(img, cv2.COLOR_BGR2RGB) in the align function
img = self.alignment.align(img, landmark)
# img_feats.append(embedng.get(img,lmk))
img = Image.fromarray(img)
tensor = self.transforms(img)
return {
"tensor": tensor,
"path": img_path,
}
def __len__(self):
return len(self.imgs_list)
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def make_square_box(box):
width = box[2] - box[0]
height = box[3] - box[1]
if width > height:
diff = width - height
box[1] -= diff // 2
box[3] += diff // 2
elif height > width:
diff = height - width
box[0] -= diff // 2
box[2] += diff // 2
return box
class IJBAVerificationDataset(Dataset):
def __init__(self, ijba_data_root='/tmp3/zhe2325138/IJB/IJB-A/', split_name='split1',
only_first_image=False, aligned_facial_3points=False,
crop_face=True):
self.ijba_data_root = ijba_data_root
split_root = op.join(ijba_data_root, 'IJB-A_11_sets', split_name)
self.only_first_image = only_first_image
self.metadata = pd.read_csv(op.join(split_root,
f'verify_metadata_{split_name[5:]}.csv'))
self.metadata = self.metadata.set_index('TEMPLATE_ID')
self.comparisons = pd.read_csv(op.join(split_root,
f'verify_comparisons_{split_name[5:]}.csv'),
header=None)
self.transform = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
self.aligned_facial_3points = aligned_facial_3points
self.src_facial_3_points = self._get_source_facial_3points()
self.crop_face = crop_face
def _get_source_facial_3points(self, output_size=(112, 112)):
# set source landmarks based on 96x112 size
src = np.array([
[30.2946, 51.6963], # left eye
[65.5318, 51.5014], # right eye
[48.0252, 71.7366], # nose
# [33.5493, 92.3655], # left mouth
# [62.7299, 92.2041], # right mouth
], dtype=np.float32)
# scale landmarkS to match output size
src[:, 0] *= (output_size[0] / 96)
src[:, 1] *= (output_size[1] / 112)
return src
def _get_face_img_from_entry(self, entry, square=True):
fname = entry["FILE"]
if fname[:5] == 'frame':
fname = 'frames' + fname[5:] # to fix error in annotation =_=
img = Image.open(op.join(self.ijba_data_root, 'images', fname)).convert('RGB')
if self.aligned_facial_3points:
raise NotImplementedError
else:
if self.crop_face:
# left, upper, right, lower
face_box = [entry['FACE_X'],
entry['FACE_Y'],
entry['FACE_X'] + entry['FACE_WIDTH'],
entry['FACE_Y'] + entry['FACE_HEIGHT']]
face_box = make_square_box(face_box) if square else face_box
face_img = img.crop(face_box)
else:
face_img = img
return face_img
def _get_tensor_from_entries(self, entries):
imgs = [self._get_face_img_from_entry(entry) for _, entry in entries.iterrows()]
tensors = torch.stack([
self.transform(img) for img in imgs
])
return tensors
def __getitem__(self, idx):
t1, t2 = self.comparisons.iloc[idx]
t1_entries, t2_entries = self.metadata.loc[[t1]], self.metadata.loc[[t2]]
if self.only_first_image:
t1_entries, t2_entries = t1_entries.iloc[:1], t2_entries.iloc[:1]
t1_tensors = self._get_tensor_from_entries(t1_entries)
t2_tensors = self._get_tensor_from_entries(t2_entries)
if self.only_first_image:
t1_tensors, t2_tensors = t1_tensors.squeeze(0), t2_tensors.squeeze(0)
s1, s2 = t1_entries['SUBJECT_ID'].iloc[0], t2_entries['SUBJECT_ID'].iloc[0]
is_same = 1 if (s1 == s2) else 0
return {
"comparison_idx": idx,
"t1_tensors": t1_tensors,
"t2_tensors": t2_tensors,
"is_same": is_same,
}
def __len__(self):
return len(self.comparisons)
class ARVerificationAllPathDataset(Dataset):
'/tmp3/biolin/datasets/face/ARFace/test2'
def __init__(self, dataset_root='/tmp2/zhe2325138/dataset/ARFace/mtcnn_aligned_and_cropped/'):
self.dataset_root = dataset_root
self.face_image_paths = sorted(glob(op.join(self.dataset_root, '*.png')))
self.transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
def __getitem__(self, idx):
fpath = self.face_image_paths[idx]
fname, _ = op.splitext(op.basename(fpath))
image = Image.open(fpath)
image_tensor = self.transforms(image)
return {
'image_tensor': image_tensor,
'fname': fname
}
def __len__(self):
return len(self.face_image_paths)
if __name__ == "__main__":
with torch.no_grad():
# dataset = ENMDataset(config.FEAT_DIR, train=False, valSize=0.05)
dataset = FaceFeatDataset(config.FEAT_DIR, train=False, valSize=0.2)
print(dataset.train)
print(dataset)
# dataset1 = FaceFeatDataset(config.FEAT_DIR, train=True, valSize=0.2)
siamese_val_dataset = SiameseWholeFace(dataset)
(img1, img2), label = siamese_val_dataset[-1]
print(img1.size(), img2.size(), label)
for i in range(len(siamese_val_dataset)):
if i == 5: break
(img1, img2), label = siamese_val_dataset[i]
print(img1.size(), img2.size(), label)
|
the-stack_106_31867 | from pathlib import Path
from typing import Dict
def get_system_extra_hosts(extra_host_domain: str) -> Dict:
extra_hosts = {}
hosts_path = Path("/etc/hosts")
if hosts_path.exists() and extra_host_domain != "undefined":
with hosts_path.open() as hosts:
for line in hosts:
if extra_host_domain in line:
host = line.split()[1]
ip = line.split()[0]
extra_hosts[host] = ip
return extra_hosts
|
the-stack_106_31868 | # -*- coding: utf-8 -*-
"""This module is deprecated."""
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
from cloudant.query import Query
from cloudant.document import Document
import array_comparison as ac
import time
def get_database(dbname, user_name, password, url):
client = Cloudant(user_name, password, url=url)
client.connect()
return client.get(dbname, remote=True)
def add_gamestate_to_database(db, gamestate):
# Adds a new gamestate to the database.
print("Creating new entry to db")
gamestate_dict = {
"state": gamestate.state.tolist(),
"rounds": gamestate.rounds_played,
"weight": 0.7,
"play_count": 0
}
newDocument = db.create_document(gamestate_dict)
if newDocument.exists():
print("Document {0} created".format(gamestate.id))
def get_states_for_round(database, round_number):
# Query that returns all known moves for given round.
selector = {
"rounds": round_number
}
fields = [
"_id",
"state",
"weight",
"play_count"
]
query = Query(database, selector=selector, fields=fields)
return [{
"_id": res["_id"],
"state": res["state"],
"weight": res["weight"],
"play_count": res["play_count"]
} for res in query.result]
def get_state_from_database(db, gamestate):
states = get_states_for_round(db, gamestate.rounds_played)
for state in states:
if ac.are_sqr_arrays_equal(state["state"], gamestate.state):
return state
def update_document(db, state_dict):
with Document(db, state_dict["_id"]) as doc:
doc["weight"] = state_dict["weight"]
doc["play_count"] = state_dict["play_count"]
time.sleep(1)
|
the-stack_106_31869 | """
Copyright 2019-present Han Seokhyeon.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def save_data(loss_acc, test_loss, test_acc):
data = np.array(loss_acc)
test = np.zeros((2, data.shape[1]))
data = np.concatenate((data, test))
data[4, 0] = test_loss
data[5, 0] = test_acc
now = datetime.now()
filename = "./result/{}.csv".format(str(now)[:-7])
np.savetxt(filename, data, delimiter=',', newline='\n')
return
def plot_data(loss_acc, test_loss, test_acc):
data = np.array(loss_acc)
fig, ax1 = plt.subplots()
plt.title("Train data")
ax1.set_xlabel("Epoch")
# ax1.set_xticks(range(data.shape[1]))
ax1.set_ylabel("Loss")
ax1.plot(data[0], label='train')
ax1.plot(data[2], label='valid')
ax2 = ax1.twinx()
ax2.set_ylabel("Accuracy")
ax2.plot(data[1], label='train')
ax2.plot(data[3], label='valid')
plt.text(0.85, 0.5, "Test acc:{:.4f}".format(test_acc), ha='center', va='center', transform=ax2.transAxes)
plt.grid()
plt.legend()
plt.show()
now = datetime.now()
filename = "./result/{}.png".format(str(now)[:-7])
fig.savefig(filename)
return
|
the-stack_106_31870 | # Copyright 2022, Lefebvre Dalloz Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import zlib
import requests
from transformers import AutoTokenizer, PreTrainedTokenizer, TensorType
from transformer_deploy.benchmarks.utils import print_timings, setup_logging, track_infer_time
setup_logging()
tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained("philschmid/MiniLM-L6-H384-uncased-sst2")
tokens = tokenizer(
text="This live event is great. I will sign-up for Infinity.",
max_length=16,
truncation=True,
return_tensors=TensorType.NUMPY,
)
# https://github.com/triton-inference-server/server/blob/main/docs/protocol/extension_classification.md
url = "http://127.0.0.1:8000/v2/models/transformer_onnx_model/versions/1/infer"
message = {
"id": "42",
"inputs": [
{
"name": "input_ids",
"shape": tokens["input_ids"].shape,
"datatype": "INT64",
"data": tokens["input_ids"].tolist(),
},
{
"name": "token_type_ids",
"shape": tokens["token_type_ids"].shape,
"datatype": "INT64",
"data": tokens["token_type_ids"].tolist(),
},
{
"name": "attention_mask",
"shape": tokens["attention_mask"].shape,
"datatype": "INT64",
"data": tokens["attention_mask"].tolist(),
},
],
"outputs": [
{
"name": "output",
"parameters": {"binary_data": False},
}
],
}
time_buffer = list()
session = requests.Session()
for _ in range(10000):
bytes_message = bytes(json.dumps(message), encoding="raw_unicode_escape")
request_body = zlib.compress(bytes_message)
_ = session.post(
url,
data=request_body,
headers={
"Content-Encoding": "gzip",
"Accept-Encoding": "gzip",
"Inference-Header-Content-Length": str(len(bytes_message)),
},
)
for _ in range(100):
with track_infer_time(time_buffer):
bytes_message = bytes(json.dumps(message), encoding="raw_unicode_escape")
request_body = zlib.compress(bytes_message)
_ = session.post(
url,
data=request_body,
headers={
"Content-Encoding": "gzip",
"Accept-Encoding": "gzip",
"Inference-Header-Content-Length": str(len(bytes_message)),
},
)
print_timings(name="triton (onnx backend) - requests", timings=time_buffer)
|
the-stack_106_31871 | #!/usr/bin/env python3
import re
import sys
def imm_to_bin(num: str, bits: int, signed: bool=False) -> str:
"""Convert a number to a binary string
Args:
num: The number to be converted to binary.
Can be either Hexadecimal, decimal or binary.
bits: The number of bits of the binary value returned
signed: True if the resulting value should be signed
Returns:
String representing the number in binary
"""
# hexadecimal
if "0x" in num:
num = int(num, 16)
# binary
elif "0b" in num:
num = int(num, 2)
# decimal
else:
num = int(num)
# check that the immediate is in range to fit in the bits
if signed:
maximum = 2 ** (bits - 1) - 1
minimum = -1 * maximum - 1
else:
maximum = 2 ** bits - 1
minimum = 0
if not minimum <= int(num) <= maximum:
raise ValueError("Immediate {} out of range {} to {}"
.format(num, minimum, maximum))
# convert to binary
if num < 0:
bitmask = 2 ** bits - 1
num = -num
return bin((bitmask ^ num) + 1)[2:]
else:
return bin(num)[2:].zfill(bits)
def convert_pseudoinstructions(code: [str]) -> [str]:
"""Convert .spaces to .fill 0's and movis to lui+llis"""
newcode = []
for line in code:
m = re.match(r"^(\w+:)?(?:\s+)?(\.?\w+)\s?(.*)\s?", line)
label, operation, args = m[1], m[2], m[3]
if label is None:
label = ""
if operation == ".space":
newcode.append("{} .fill 0".format(label))
for i in range(int(args) - 1):
newcode.append(".fill 0")
elif operation == "movi":
regA, imm = args.split(",")
newcode.append("{} lui {},{}".format(label, regA, int(imm) // 64))
newcode.append("lli {},{}".format(regA, int(imm) % 64))
else:
newcode.append(line)
return newcode
def assemble_line(line: str) -> str:
"""Convert the assembly instruction to executable machine code
Args:
line: The instruction to be assembled
Returns:
The machine code value as a binary string
"""
m = re.match(r"^(\w+:)?(?:\s+)?(\.?\w+)\s?(.*)\s?", line)
label, operation, args = m[1], m[2], m[3]
opcode = {"add": "000",
"addi": "001",
"nand": "010",
"lui": "011",
"sw": "100",
"lw": "101",
"beq": "110",
"jalr": "111"}.get(operation)
# RRR-type
if operation in ("add", "nand"):
regA, regB, regC = (imm_to_bin(n, 3) for n in args.split(","))
return opcode + regA + regB + "0000" + regC
# RRI-type
elif operation in ("addi", "sw", "lw", "beq", "jalr"):
regA, regB, imm = args.split(",")
return opcode + imm_to_bin(regA, 3) + imm_to_bin(regB, 3)\
+ imm_to_bin(imm, 7, True)
# RI-type
elif operation == "lui":
regA, imm = args.split(",")
return opcode + imm_to_bin(regA, 3) + imm_to_bin(imm, 10)
# pseudo-instructions
elif operation == "nop":
return "00000000000000000"
elif operation == "halt":
return "111000000" + imm_to_bin("1", 7)
elif operation == "lli":
regA, imm = args.split(",")
return "001" + imm_to_bin(regA, 3) * 2 + "0" + imm_to_bin(imm, 6)
elif operation == ".fill":
return imm_to_bin(args, 16, True)
def has_code(line: str) -> bool:
"""
Return True if there's code on the line
(so it's not a comment or an empty line).
"""
return not line.strip().startswith("#") or (line.strip() == "")
def assemble_code(code: [str]) -> [str]:
"""Convert the assembly source code to machine code
Args:
code: the source code in RiSC-16 assembly
Returns:
RiSC-16 machine code
"""
# strip empty lines and unnecessary whitespace
code = (line.strip() for line in code if has_code(line))
code = convert_pseudoinstructions(code)
# find the linenumbers for all the labels in the code for branching
LABELS = dict(map(lambda x: (x[1].partition(":")[0], x[0]),
list(filter(lambda x: x[1].partition(" ")[0].endswith(":"),
enumerate(code)))))
machinecode = []
for line in code:
try:
print("assembling '{}'".format(line))
machinecode.append(assemble_line(line))
print(machinecode[-1])
except Exception as e:
print("Error on line '{}': {}".format(line, e))
return
return machinecode
def main():
if len(sys.argv) <= 1:
print("Missing arguments")
return
with open(sys.argv[1]) as f:
code = f.read().splitlines()
try:
outfile = sys.argv[2]
except IndexError:
outfile = sys.argv[1].partition(".")[0]
with open(outfile, "w") as f:
f.write("\n".join(assemble_code(code)))
if __name__ == "__main__":
main()
|
the-stack_106_31872 | from django.db import models, transaction
from django.forms.models import model_to_dict
from .element import Element
from .team import Team
from typing import List, Dict, Any
import hashlib
import json
class ElementGroupManager(models.Manager):
def _hash_elements(self, elements: List) -> str:
elements_list: List[Dict] = []
for element in elements:
el_dict = model_to_dict(element)
[el_dict.pop(key) for key in ["event", "id", "group"]]
elements_list.append(el_dict)
return hashlib.md5(json.dumps(elements_list, sort_keys=True, default=str).encode("utf-8")).hexdigest()
def create(self, *args: Any, **kwargs: Any):
elements = kwargs.pop("elements")
with transaction.atomic():
kwargs["hash"] = self._hash_elements(elements)
try:
with transaction.atomic():
group = super().create(*args, **kwargs)
except:
return ElementGroup.objects.get(
hash=kwargs["hash"], team_id=kwargs["team"].pk if kwargs.get("team") else kwargs["team_id"],
)
for element in elements:
element.group = group
for element in elements:
setattr(element, "pk", None)
Element.objects.bulk_create(elements)
return group
class ElementGroup(models.Model):
class Meta:
constraints = [models.UniqueConstraint(fields=["team", "hash"], name="unique hash for each team")]
team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)
hash: models.CharField = models.CharField(max_length=400, null=True, blank=True)
objects = ElementGroupManager()
|
the-stack_106_31873 | #
# Command Generator
#
# Send SNMP GET request using the following options:
#
# * with SNMPv3 with user 'usr-md5-des', MD5 auth and DES privacy protocols
# * use remote SNMP Engine ID 0x80004fb805636c6f75644dab22cc (USM
# autodiscovery will run)
# * over IPv4/UDP
# * to an Agent at demo.snmplabs.com:161
# * setting SNMPv2-MIB::sysName.0 to new value (type taken from MIB)
#
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.proto import rfc1902
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.setCmd(
cmdgen.UsmUserData(
'usr-md5-des', 'authkey1', 'privkey1',
securityEngineId=rfc1902.OctetString(
hexValue='80004fb805636c6f75644dab22cc'
)
),
cmdgen.UdpTransportTarget(('demo.snmplabs.com', 161)),
(cmdgen.MibVariable('SNMPv2-MIB', 'sysORDescr', 1), 'new system name'),
)
# Check for errors and print out results
if errorIndication:
print(errorIndication)
else:
if errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex)-1][0] or '?'
)
)
else:
for name, val in varBinds:
print('%s = %s' % (name.prettyPrint(), val.prettyPrint()))
|
the-stack_106_31875 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Super class to handle all operations related to base schema."""
from marshmallow import fields, post_dump
from auth_api.models import ma
class BaseSchema(ma.ModelSchema): # pylint: disable=too-many-ancestors
"""Base Schema."""
class Meta: # pylint: disable=too-few-public-methods
"""Meta class to declare any class attributes."""
datetimeformat = '%Y-%m-%dT%H:%M:%S+00:00' # Default output date format.
created_by = fields.Function(
lambda obj: f'{obj.created_by.firstname} {obj.created_by.lastname}' if obj.created_by else None
)
modified_by = fields.Function(
lambda obj: f'{obj.modified_by.firstname} {obj.modified_by.lastname}' if obj.modified_by else None
)
@post_dump(pass_many=True)
def _remove_empty(self, data, many): # pylint: disable=no-self-use
"""Remove all empty values and versions from the dumped dict."""
if not many:
for key in list(data):
if key == 'versions':
data.pop(key)
return {
key: value for key, value in data.items()
if value is not None
}
for item in data:
for key in list(item):
if (item[key] is None) or (key == 'versions'):
item.pop(key)
return data
|
the-stack_106_31877 | from galaxy.workflow import render
from .workflow_support import yaml_to_model
TEST_WORKFLOW_YAML = """
steps:
- type: "data_input"
order_index: 0
tool_inputs: {"name": "input1"}
position: {"top": 3, "left": 3}
- type: "data_input"
order_index: 1
tool_inputs: {"name": "input2"}
position: {"top": 6, "left": 4}
- type: "tool"
tool_id: "cat1"
order_index: 2
inputs:
input1:
connection:
- "@output_step": 0
output_name: "di1"
position: {"top": 13, "left": 10}
- type: "tool"
tool_id: "cat1"
order_index: 3
inputs:
input1:
connection:
- "@output_step": 0
output_name: "di1"
position: {"top": 33, "left": 103}
"""
def test_render():
# Doesn't check anything about the render code - just exercises to
# ensure that obvious errors aren't thrown.
workflow_canvas = render.WorkflowCanvas()
workflow = yaml_to_model(TEST_WORKFLOW_YAML)
step_0, step_1, step_2, step_3 = workflow.steps
workflow_canvas.populate_data_for_step(
step_0,
"input1",
[],
[{"name": "di1"}],
)
workflow_canvas.populate_data_for_step(
step_1,
"input2",
[],
[{"name": "di1"}],
)
workflow_canvas.populate_data_for_step(
step_2,
"cat wrapper",
[{"name": "input1", "label": "i1"}],
[{"name": "out1"}]
)
workflow_canvas.populate_data_for_step(
step_3,
"cat wrapper",
[{"name": "input1", "label": "i1"}],
[{"name": "out1"}]
)
workflow_canvas.add_steps()
workflow_canvas.finish()
assert workflow_canvas.canvas.tostring()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.