filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_15083
|
from dataclasses import dataclass
from datetime import datetime
from typing import List
from loguru import logger
from pymodbus.constants import Defaults
from mate3.devices import DeviceValues
from mate3.modbus_client import CachingModbusClient, ModbusTcpClient, NonCachingModbusClient
from mate3.read import AllModelReads, ModelRead
from mate3.sunspec.fields import Field, Mode, Uint16Field, Uint32Field
from mate3.sunspec.models import MODEL_DEVICE_IDS, SunSpecEndModel, SunSpecHeaderModel
@dataclass(frozen=False)
class ReadingRange:
"""
Mate's work better reading a contiguous range of values at once as opposed to indivudally. This is a simple wrapper
for such a contiguous range.
"""
fields: List[Field]
start: int
size: int
@property
def end(self):
return self.start + self.size
def extend(self, field: Field):
self.fields.append(field)
self.size += field.size
class Mate3Client:
"""
The main Mate3 object users will interact with. Can (and should) be used as a context manager.
"""
sunspec_register: int = 40000
def __init__(
self,
host: str,
port: int = Defaults.Port,
cache_path: str = None,
cache_only: bool = False,
cache_writeable: bool = False,
):
self.host: str = host
self.port: int = port
self._cache_path: str = cache_path
self._cache_only: bool = cache_only
self._cache_writeable: bool = cache_writeable
self._client: ModbusTcpClient = None
self._devices: DeviceValues = None
def connect(self):
"""
Connect to the mate over modbus.
"""
if self._cache_path is not None:
self._client = CachingModbusClient(
host=self.host,
port=self.port,
cache_path=self._cache_path,
cache_only=self._cache_only,
writeable=self._cache_writeable,
)
else:
self._client = NonCachingModbusClient(self.host, self.port)
# Now read everything. Why? Because most use of the API assumes fields have already been read (e.g. to get
# the devices, or the addresses of fields, etc.)
self.read_all()
def close(self):
"""
Close the modbus connection to the mate.
"""
self._client.close()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def devices(self) -> DeviceValues:
if self._devices is None:
raise RuntimeError("Can't access devices until after first read")
return self._devices
def _get_reading_ranges(self, fields):
"""
Get the ranges of registers which can be read as a contiguous block, which allows for greater performance than
reading a single register at a time.
"""
# The mate3s gets unhappy if one tries to read too much at once
max_range_size = 100
# Loop through fields in start order, finding contiguous blocks of registers:
ranges = []
previous_range = None
for field in sorted(fields, key=lambda x: x.start):
if (
previous_range is None
or previous_range.end != field.start
or previous_range.size + field.size >= max_range_size
):
previous_range = ReadingRange(fields=[field], start=field.start, size=field.size)
ranges.append(previous_range)
else:
previous_range.extend(field)
return ranges
def _read_model(self, device_address: int, first: bool, all_reads: AllModelReads):
"""
Read an individual model at `address`. Use `first` to specify that this is the first block - see comment below.
By default reads everything in the model - use `only` to specify a list of Fields to read, if you want to limit.
"""
# Read the first register for the device ID:
registers = self._client.read_holding_registers(address=device_address, count=2)
# If first, then this is the SunSpecHeaderModel, which has a device ID of Uint32 type not Uint16 like the rest.
if first:
_, device_id = Uint32Field._from_registers(None, registers[:2])
else:
# Don't use the Uint16Field parser as for the SunSpec end block the value is 65535, which is actually the
# 'not implemented' value, so None is returned.
device_id = registers[0]
if device_id not in MODEL_DEVICE_IDS:
logger.warning(f"Unknown model type with device ID {device_id}")
return None, None
model = MODEL_DEVICE_IDS[device_id]
# TODO: Make sure we don't read past the end of length (as reported by device). This shouldn't happen except in
# e.g. a case where the (old) device model firmware returns only 10 fields, and then 'new' one (whatever we're
# using in our spec) specifies 11, then we'd accidentally try to read one more.
# Get the readable fields:
fields = [field for field in model.__model_fields__ if field.mode in (Mode.R, Mode.RW)]
# Order fields by start registry, as this is the order in which we will receive the values
fields = sorted(fields, key=lambda f: f.start)
# Get registers in large ranges, as this drastically improves performance and isn't so demanding of the mate3
model_reads = ModelRead()
for reading_range in self._get_reading_ranges(fields):
logger.debug(
f"Reading range {reading_range.start} -> {reading_range.end}, of {len(reading_range.fields)} fields"
)
register_number = device_address + reading_range.start - 1 # -1 as starts are 1-indexed in fields
registers = self._client.read_holding_registers(address=register_number, count=reading_range.size)
read_time = datetime.now()
offset = 0
for field in reading_range.fields:
address = register_number + offset
try:
field_registers = registers[offset : offset + field.size]
if len(field_registers) != field.size:
raise RuntimeError(
"Didn't get the right number of registers from reading range for this field."
)
implemented, raw_value = field.from_registers(field_registers)
model_reads.add(
field.name, raw_value=raw_value, implemented=implemented, address=address, time=read_time
)
except Exception as e:
logger.warning(f"Error reading field {field.name} - so setting as not implemented. Message: {e}")
model_reads.add(field.name, raw_value=None, implemented=False, address=address, time=read_time)
offset += field.size
return model, model_reads
def read_all(self):
"""
Read all values from all devices. If you want to read only specified fields use e.g.
client.devices.mate3.system_name.read()
This method, however, is optimised for reading everything.
"""
register = self.sunspec_register
max_models = 30
first = True
all_reads = AllModelReads()
for _ in range(max_models):
model, model_reads = self._read_model(register, first, all_reads)
first = False
# Unknown device
if not model:
continue
# No more blocks to read
if model == SunSpecEndModel:
break
# Save the model reads for this model:
all_reads.add(model, model_reads)
# Move register to next block - that is, add the length of the block, which is what follows after the length
# field. For normal fields, we'll already have read 2 registers (DI and length) so we must add this to our
# total increment. For the SunSpecHeaderModel, we need to add 4 as DID is a UInt32 in this case (i.e. 2
# registers) and there's a model ID field (1 register) and length (1 register)
register += model_reads["length"].raw_value + (4 if model == SunSpecHeaderModel else 2)
# create devices if needed:
if self._devices is None:
self._devices = DeviceValues(client=self)
# update:
self._devices.update(all_reads)
def read_all_modbus_values_unparsed(self):
"""
This method just reads all of the values from the modbus devices, with no care about parsing them. All it
assumes is the standard structure of two registers (DID + length), except for the header.
"""
register = self.sunspec_register
max_models = 30
first = True
reads = {}
for _ in range(max_models):
# Data starts generally at 3rd register, except for start (SunSpecHeaderModel) where it starts at 5th
data_offset = 4 if first else 2
registers = self._client.read_holding_registers(address=register, count=data_offset)
# The length is the last register (i.e. the one before data)
_, length = Uint16Field._from_registers(None, registers[-1:])
# We're done when length == 0 i.e. SunSpecEndModel
if length == 0:
break
# Now read everything (in maximum bunches of 100)
batch = 100
for start_offset in range(0, length, batch):
count = min(batch, length - start_offset)
registers = self._client.read_holding_registers(
address=register + data_offset + start_offset, count=count
)
addresses = [register + i for i in range(count)]
for addr, bites in zip(addresses, registers):
reads[addr] = bites
# See comment in self.read re the increment of 2 or 4
register += length + (4 if first else 2)
first = False
return reads
|
the-stack_106_15085
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import tests.utils as test_utils
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
)
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, eos]),
},
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, w1, eos]),
},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.report_accuracy = False
self.args.probs = (
torch.FloatTensor(
[
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]
)
.unsqueeze(0)
.expand(2, 3, 7)
) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_15086
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import keyword_view
from google.ads.googleads.v9.services.types import keyword_view_service
from .base import KeywordViewServiceTransport, DEFAULT_CLIENT_INFO
class KeywordViewServiceGrpcTransport(KeywordViewServiceTransport):
"""gRPC backend transport for KeywordViewService.
Service to manage keyword views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_keyword_view(
self,
) -> Callable[
[keyword_view_service.GetKeywordViewRequest], keyword_view.KeywordView
]:
r"""Return a callable for the get keyword view method over gRPC.
Returns the requested keyword view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetKeywordViewRequest],
~.KeywordView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_keyword_view" not in self._stubs:
self._stubs["get_keyword_view"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.KeywordViewService/GetKeywordView",
request_serializer=keyword_view_service.GetKeywordViewRequest.serialize,
response_deserializer=keyword_view.KeywordView.deserialize,
)
return self._stubs["get_keyword_view"]
__all__ = ("KeywordViewServiceGrpcTransport",)
|
the-stack_106_15087
|
import pddl
import sccs
import timers
from collections import defaultdict
DEBUG = False
def handle_axioms(operators, axioms, goals):
axioms_by_atom = get_axioms_by_atom(axioms)
axiom_literals = compute_necessary_axiom_literals(axioms_by_atom, operators, goals)
axiom_init = get_axiom_init(axioms_by_atom, axiom_literals)
with timers.timing("Simplifying axioms"):
axioms = simplify_axioms(axioms_by_atom, axiom_literals)
axioms = compute_negative_axioms(axioms_by_atom, axiom_literals)
# NOTE: compute_negative_axioms more or less invalidates axioms_by_atom.
# Careful with that axe, Eugene!
axiom_layers = compute_axiom_layers(axioms, axiom_init)
if DEBUG:
verify_layering_condition(axioms, axiom_init, axiom_layers)
return axioms, list(axiom_init), axiom_layers
def verify_layering_condition(axioms, axiom_init, axiom_layers):
# This function is only used for debugging.
variables_in_heads = set()
literals_in_heads = set()
variables_with_layers = set()
for axiom in axioms:
head = axiom.effect
variables_in_heads.add(head.positive())
literals_in_heads.add(head)
variables_with_layers = set(axiom_layers.keys())
# 1. Each derived variable only appears in heads with one
# polarity, i.e., never positively *and* negatively.
if False:
print("Verifying 1...")
for literal in literals_in_heads:
assert literal.negate() not in literals_in_heads, literal
else:
print("Verifying 1... [skipped]")
# We currently violate this condition because we introduce
# "negated axioms". See issue454 and issue453.
# 2. A variable has a defined layer iff it appears in a head.
# (This is stricter than it needs to be; we could allow
# derived variables that are never generated by a rule.
# But this test follows the axiom simplification step, and
# after simplification this should not be too strict.)
# All layers are integers and at least 0.
# (Note: the "-1" layer for non-derived variables is
# set elsewhere.)
print("Verifying 2...")
assert variables_in_heads == variables_with_layers
for atom, layer in axiom_layers.items():
assert isinstance(layer, int)
assert layer >= 0
# 3. For every derived variable, it occurs in axiom_init iff
# its negation occurs as the head of an axiom.
if False:
print("Verifying 3...")
for init in list(axiom_init):
assert init.negate() in literals_in_heads
for literal in literals_in_heads:
assert (literal.negated) == (literal.positive() in axiom_init)
else:
print("Verifying 3 [weaker version]...")
# We currently violate this condition because we introduce
# "negated axioms". See issue454 and issue453.
#
# The weaker version we test here is "For every derived variable:
# [it occurs in axiom_init iff its negation occurs as the
# head of an axiom] OR [it occurs with both polarities in
# heads of axioms]."
for init in list(axiom_init):
assert init.negate() in literals_in_heads
for literal in literals_in_heads:
assert ((literal.negated) == (literal.positive() in axiom_init)
or (literal.negate() in literals_in_heads))
# 4. For every rule head <- ... cond ... where cond is a literal
# of a derived variable where the layer of head is equal to
# the layer of cond, cond occurs with the same polarity in heads.
#
# Note regarding issue454 and issue453: Because of the negated axioms
# mentioned in these issues, a derived variable may appear with *both*
# polarities in heads. This makes this test less strong than it would
# be otherwise. When these issues are addressed and axioms only occur
# with one polarity in heads, this test will remain correct in its
# current form, but it will be able to detect more violations of the
# layering property.
print("Verifying 4...")
for axiom in axioms:
head = axiom.effect
head_positive = head.positive()
body = axiom.condition
for cond in body:
cond_positive = cond.positive()
if (cond_positive in variables_in_heads and
axiom_layers[cond_positive] == axiom_layers[head_positive]):
assert cond in literals_in_heads
# 5. For every rule head <- ... cond ... where cond is a literal
# of a derived variable, the layer of head is greater or equal
# to the layer of cond.
print("Verifying 5...")
for axiom in axioms:
head = axiom.effect
head_positive = head.positive()
body = axiom.condition
for cond in body:
cond_positive = cond.positive()
if cond_positive in variables_in_heads:
# We need the assertion to be on a single line for
# our error handler to be able to print the line.
assert (axiom_layers[cond_positive] <= axiom_layers[head_positive]), (axiom_layers[cond_positive], axiom_layers[head_positive])
def get_axioms_by_atom(axioms):
axioms_by_atom = {}
for axiom in axioms:
axioms_by_atom.setdefault(axiom.effect, []).append(axiom)
return axioms_by_atom
def compute_axiom_layers(axioms, axiom_init):
# We include this assertion to make sure testing membership in
# axiom_init is efficient.
assert isinstance(axiom_init, set)
# Collect all atoms for derived variables.
derived_atoms = set()
for axiom in axioms:
head_atom = axiom.effect.positive()
derived_atoms.add(head_atom)
# Collect dependencies between derived variables:
# 1. "u depends on v" if there is an axiom with variable u
# in the head and variable v in the body.
# 2. "u NBF-depends on v" if additionally the value with which
# v occurs in the body is its NBF (negation-by-failure) value.
#
# We represent depends_on as a dictionary mapping each "u" to
# the list of "v"s such that u depends on v. Note that we do not
# use a defaultdict because the SCC finding algorithm requires
# that all nodes are present as keys in the dict, even if they
# have no successors.
#
# We do not represent NBF-depends on independently, but we do keep
# of a set of triples "weighted_depends_on" which contains all
# triples (u, v, weight) representing dependencies from u to v,
# where weight is 1 for NBF dependencies and 0 for other
# dependencies. Each such triple represents the constraint
# layer(u) >= layer(v) + weight.
depends_on = dict((u, []) for u in derived_atoms)
weighted_depends_on = set()
for axiom in axioms:
if (axiom.effect in axiom_init or
axiom.effect.negated and axiom.effect.positive() not in axiom_init):
# Skip axioms whose head is the negation-by-failure value.
# These are redundant axioms that should eventually go away
# or at least have some kind of special status that marks
# them as "not the primary axioms".
continue
u = axiom.effect.positive()
for condition in axiom.condition:
v = condition.positive()
if v in derived_atoms:
v_polarity = not condition.negated
v_init_polarity = v in axiom_init
# TODO: Don't include duplicates in depends_on.
depends_on[u].append(v)
if v_polarity == v_init_polarity:
weight = 1
else:
weight = 0
weighted_depends_on.add((u, v, weight))
# Compute the SCCs of dependencies according to depends_on,
# in topological order.
atom_sccs = sccs.get_sccs_adjacency_dict(depends_on)
# Compute an index mapping each atom to the id of its SCC.
atom_to_scc_id = {}
for scc in atom_sccs:
scc_id = id(scc)
for atom in scc:
atom_to_scc_id[atom] = scc_id
# Compute a weighted digraph representing the dependencies
# between SCCs. SCCs U and V are represented by their IDs.
# - We have id(V) in scc_weighted_depends_on[id(U)] iff
# some variable u in U depends on some variable v in V.
# - If there is a dependency, scc_weighted_depends_on[id(U)][id(V)]
# is the weight of the dependency: +1 if an NBF-dependency
# exists, 0 otherwise.
# We want the digraph to be acyclic and hence ignore self-loops.
# A self-loop of weight 1 indicates non-stratifiability.
scc_weighted_depends_on = defaultdict(dict)
for u, v, weight in weighted_depends_on:
scc_u_id = atom_to_scc_id[u]
scc_v_id = atom_to_scc_id[v]
if scc_u_id == scc_v_id:
# Ignore self-loops unless they are self-loops based on
# NBF dependencies, which occur iff the axioms are
# non-stratifiable.
if weight == 1:
raise ValueError(
"Cyclic dependencies in axioms; cannot stratify.")
else:
old_weight = scc_weighted_depends_on[scc_u_id].get(scc_v_id, -1)
if weight > old_weight:
scc_weighted_depends_on[scc_u_id][scc_v_id] = weight
# The layer of variable u is the longest path (taking into account
# the weights) in the weighted digraph defined by
# scc_weighted_depends_on from the SCC of u to any sink.
# We first compute the longest paths in the SCC digraph. This
# computation exploits that atom_sccs is given in
# topological sort order.
scc_id_to_layer = {}
for scc in reversed(atom_sccs):
scc_id = id(scc)
layer = 0
for succ_scc_id, weight in scc_weighted_depends_on[scc_id].items():
layer = max(layer, scc_id_to_layer[succ_scc_id] + weight)
scc_id_to_layer[scc_id] = layer
# Finally, we set the layers for all nodes based on the layers of
# their SCCs.
layers = {}
for scc in atom_sccs:
scc_layer = scc_id_to_layer[id(scc)]
for atom in scc:
layers[atom] = scc_layer
return layers
def compute_necessary_axiom_literals(axioms_by_atom, operators, goal):
necessary_literals = set()
queue = []
def register_literals(literals, negated):
for literal in literals:
if literal.positive() in axioms_by_atom: # This is an axiom literal
if negated:
literal = literal.negate()
if literal not in necessary_literals:
necessary_literals.add(literal)
queue.append(literal)
# Initialize queue with axioms required for goal and operators.
register_literals(goal, False)
for op in operators:
register_literals(op.precondition, False)
for (cond, _) in op.add_effects:
register_literals(cond, False)
for (cond, _) in op.del_effects:
register_literals(cond, True)
while queue:
literal = queue.pop()
axioms = axioms_by_atom[literal.positive()]
for axiom in axioms:
register_literals(axiom.condition, literal.negated)
return necessary_literals
def get_axiom_init(axioms_by_atom, necessary_literals):
result = set()
for atom in axioms_by_atom:
if atom not in necessary_literals and atom.negate() in necessary_literals:
# Initial value for axiom: False (which is omitted due to closed world
# assumption) unless it is only needed negatively.
result.add(atom)
return result
def simplify_axioms(axioms_by_atom, necessary_literals):
necessary_atoms = set([literal.positive() for literal in necessary_literals])
new_axioms = []
for atom in necessary_atoms:
axioms = simplify(axioms_by_atom[atom])
axioms_by_atom[atom] = axioms
new_axioms += axioms
return new_axioms
def remove_duplicates(alist):
next_elem = 1
for i in range(1, len(alist)):
if alist[i] != alist[i - 1]:
alist[next_elem] = alist[i]
next_elem += 1
alist[next_elem:] = []
def simplify(axioms):
"""Remove duplicate axioms, duplicates within axioms, and dominated axioms."""
# Remove duplicates from axiom conditions.
for axiom in axioms:
axiom.condition.sort()
remove_duplicates(axiom.condition)
# Remove dominated axioms.
axioms_to_skip = set()
axioms_by_literal = {}
for axiom in axioms:
if axiom.effect in axiom.condition:
axioms_to_skip.add(id(axiom))
else:
for literal in axiom.condition:
axioms_by_literal.setdefault(literal, set()).add(id(axiom))
for axiom in axioms:
if id(axiom) in axioms_to_skip:
continue # Required to keep one of multiple identical axioms.
if not axiom.condition: # empty condition: dominates everything
return [axiom]
literals = iter(axiom.condition)
dominated_axioms = axioms_by_literal[next(literals)]
for literal in literals:
dominated_axioms &= axioms_by_literal[literal]
for dominated_axiom in dominated_axioms:
if dominated_axiom != id(axiom):
axioms_to_skip.add(dominated_axiom)
return [axiom for axiom in axioms if id(axiom) not in axioms_to_skip]
def compute_negative_axioms(axioms_by_atom, necessary_literals):
new_axioms = []
for literal in necessary_literals:
if literal.negated:
new_axioms += negate(axioms_by_atom[literal.positive()])
else:
new_axioms += axioms_by_atom[literal]
return new_axioms
def negate(axioms):
assert axioms
result = [pddl.PropositionalAxiom(axioms[0].name, [], axioms[0].effect.negate())]
for axiom in axioms:
condition = axiom.condition
if len(condition) == 0:
# The derived fact we want to negate is triggered with an
# empty condition, so it is always true and its negation
# is always false.
return []
elif len(condition) == 1: # Handle easy special case quickly.
new_literal = condition[0].negate()
for result_axiom in result:
result_axiom.condition.append(new_literal)
else:
new_result = []
for literal in condition:
literal = literal.negate()
for result_axiom in result:
new_axiom = result_axiom.clone()
new_axiom.condition.append(literal)
new_result.append(new_axiom)
result = new_result
result = simplify(result)
return result
|
the-stack_106_15088
|
import datetime
import functools
import warnings
import numpy as np
import pandas as pd
from . import dtypes, duck_array_ops, nputils, ops
from .arithmetic import SupportsArithmetic
from .common import ImplementsArrayReduce, ImplementsDatasetReduce
from .concat import concat
from .formatting import format_array_flat
from .indexes import propagate_indexes
from .options import _get_keep_attrs
from .pycompat import integer_types
from .utils import (
either_dict_or_kwargs,
hashable,
is_scalar,
maybe_wrap_array,
peek_at,
safe_cast_to_index,
)
from .variable import IndexVariable, Variable, as_variable
def check_reduce_dims(reduce_dims, dimensions):
if reduce_dims is not ...:
if is_scalar(reduce_dims):
reduce_dims = [reduce_dims]
if any(dim not in dimensions for dim in reduce_dims):
raise ValueError(
"cannot reduce over dimensions %r. expected either '...' to reduce over all dimensions or one or more of %r."
% (reduce_dims, dimensions)
)
def unique_value_groups(ar, sort=True):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
sort : boolean, optional
Whether or not to sort unique values.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=sort)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(
{
k: dtypes.get_fill_value(v.dtype)
for k, v in xarray_obj.data_vars.items()
},
{
k: dtypes.get_fill_value(v.dtype)
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims
},
xarray_obj.attrs,
)
elif isinstance(xarray_obj, DataArray):
res = DataArray(
dtypes.get_fill_value(xarray_obj.dtype),
{
k: dtypes.get_fill_value(v.dtype)
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims
},
dims=[],
name=xarray_obj.name,
attrs=xarray_obj.attrs,
)
else: # pragma: no cover
raise AssertionError
return res
def _is_one_or_none(obj):
return obj == 1 or obj is None
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError("list element is not a slice: %r" % slice_)
if (
result
and last_slice.stop == slice_.start
and _is_one_or_none(last_slice.step)
and _is_one_or_none(slice_.step)
):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result
def _inverse_permutation_indices(positions):
"""Like inverse_permutation, but also handles slices.
Parameters
----------
positions : list of np.ndarray or slice objects.
If slice objects, all are assumed to be slices.
Returns
-------
np.ndarray of indices or None, if no permutation is necessary.
"""
if not positions:
return None
if isinstance(positions[0], slice):
positions = _consolidate_slices(positions)
if positions == slice(None):
return None
positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]
indices = nputils.inverse_permutation(np.concatenate(positions))
return indices
class _DummyGroup:
"""Class for keeping track of grouped dimensions without coordinates.
Should not be user visible.
"""
__slots__ = ("name", "coords", "size")
def __init__(self, obj, name, coords):
self.name = name
self.coords = coords
self.size = obj.sizes[name]
@property
def dims(self):
return (self.name,)
@property
def ndim(self):
return 1
@property
def values(self):
return range(self.size)
@property
def shape(self):
return (self.size,)
def __getitem__(self, key):
if isinstance(key, tuple):
key = key[0]
return self.values[key]
def _ensure_1d(group, obj):
if group.ndim != 1:
# try to stack the dims of the group into a single dim
orig_dims = group.dims
stacked_dim = "stacked_" + "_".join(orig_dims)
# these dimensions get created by the stack operation
inserted_dims = [dim for dim in group.dims if dim not in group.coords]
# the copy is necessary here, otherwise read only array raises error
# in pandas: https://github.com/pydata/pandas/issues/12813
group = group.stack(**{stacked_dim: orig_dims}).copy()
obj = obj.stack(**{stacked_dim: orig_dims})
else:
stacked_dim = None
inserted_dims = []
return group, obj, stacked_dim, inserted_dims
def _unique_and_monotonic(group):
if isinstance(group, _DummyGroup):
return True
else:
index = safe_cast_to_index(group)
return index.is_unique and index.is_monotonic
def _apply_loffset(grouper, result):
"""
(copied from pandas)
if loffset is set, offset the result index
This is NOT an idempotent routine, it will be applied
exactly once to the result.
Parameters
----------
result : Series or DataFrame
the result of resample
"""
needs_offset = (
isinstance(grouper.loffset, (pd.DateOffset, datetime.timedelta))
and isinstance(result.index, pd.DatetimeIndex)
and len(result.index) > 0
)
if needs_offset:
result.index = result.index + grouper.loffset
grouper.loffset = None
class GroupBy(SupportsArithmetic):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
__slots__ = (
"_full_index",
"_inserted_dims",
"_group",
"_group_dim",
"_group_indices",
"_groups",
"_obj",
"_restore_coord_dims",
"_stacked_dim",
"_unique_coord",
"_dims",
)
def __init__(
self,
obj,
group,
squeeze=False,
grouper=None,
bins=None,
restore_coord_dims=True,
cut_kwargs=None,
):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray
Array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
bins : array-like, optional
If `bins` is specified, the groups will be discretized into the
specified bins by `pandas.cut`.
restore_coord_dims : bool, default True
If True, also restore the dimension order of multi-dimensional
coordinates.
cut_kwargs : dict, optional
Extra keyword arguments to pass to `pandas.cut`
"""
if cut_kwargs is None:
cut_kwargs = {}
from .dataarray import DataArray
if grouper is not None and bins is not None:
raise TypeError("can't specify both `grouper` and `bins`")
if not isinstance(group, (DataArray, IndexVariable)):
if not hashable(group):
raise TypeError(
"`group` must be an xarray.DataArray or the "
"name of an xarray variable or dimension"
)
group = obj[group]
if len(group) == 0:
raise ValueError(f"{group.name} must not be empty")
if group.name not in obj.coords and group.name in obj.dims:
# DummyGroups should not appear on groupby results
group = _DummyGroup(obj, group.name, group.coords)
if getattr(group, "name", None) is None:
group.name = "group"
group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)
(group_dim,) = group.dims
expected_size = obj.sizes[group_dim]
if group.size != expected_size:
raise ValueError(
"the group variable's length does not "
"match the length of this variable along its "
"dimension"
)
full_index = None
if bins is not None:
if duck_array_ops.isnull(bins).all():
raise ValueError("All bin edges are NaN.")
binned = pd.cut(group.values, bins, **cut_kwargs)
new_dim_name = group.name + "_bins"
group = DataArray(binned, group.coords, name=new_dim_name)
full_index = binned.categories
if grouper is not None:
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError("index must be monotonic for resampling")
full_index, first_items = self._get_index_and_items(index, grouper)
sbins = first_items.values.astype(np.int64)
group_indices = [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [
slice(sbins[-1], None)
]
unique_coord = IndexVariable(group.name, first_items.index)
elif group.dims == (group.name,) and _unique_and_monotonic(group):
# no need to factorize
group_indices = np.arange(group.size)
if not squeeze:
# use slices to do views instead of fancy indexing
# equivalent to: group_indices = group_indices.reshape(-1, 1)
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
if group.isnull().any():
# drop any NaN valued groups.
# also drop obj values where group was NaN
# Use where instead of reindex to account for duplicate coordinate labels.
obj = obj.where(group.notnull(), drop=True)
group = group.dropna(group_dim)
# look through group to find the unique values
group_as_index = safe_cast_to_index(group)
sort = bins is None and (not isinstance(group_as_index, pd.MultiIndex))
unique_values, group_indices = unique_value_groups(
group_as_index, sort=sort
)
unique_coord = IndexVariable(group.name, unique_values)
if len(group_indices) == 0:
if bins is not None:
raise ValueError(
"None of the data falls within bins with edges %r" % bins
)
else:
raise ValueError(
"Failed to group data. Are you grouping by a variable that is all NaN?"
)
# specification for the groupby operation
self._obj = obj
self._group = group
self._group_dim = group_dim
self._group_indices = group_indices
self._unique_coord = unique_coord
self._stacked_dim = stacked_dim
self._inserted_dims = inserted_dims
self._full_index = full_index
self._restore_coord_dims = restore_coord_dims
# cached attributes
self._groups = None
self._dims = None
@property
def dims(self):
if self._dims is None:
self._dims = self._obj.isel(
**{self._group_dim: self._group_indices[0]}
).dims
return self._dims
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self._unique_coord.values, self._group_indices))
return self._groups
def __len__(self):
return self._unique_coord.size
def __iter__(self):
return zip(self._unique_coord.values, self._iter_grouped())
def __repr__(self):
return "{}, grouped over {!r} \n{!r} groups with labels {}.".format(
self.__class__.__name__,
self._unique_coord.name,
self._unique_coord.size,
", ".join(format_array_flat(self._unique_coord, 30).split()),
)
def _get_index_and_items(self, index, grouper):
from .resample_cftime import CFTimeGrouper
s = pd.Series(np.arange(index.size), index)
if isinstance(grouper, CFTimeGrouper):
first_items = grouper.first_items(index)
else:
first_items = s.groupby(grouper).first()
_apply_loffset(grouper, first_items)
full_index = first_items.index
if first_items.isnull().any():
first_items = first_items.dropna()
return full_index, first_items
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices})
def _infer_concat_args(self, applied_example):
if self._group_dim in applied_example.dims:
coord = self._group
positions = self._group_indices
else:
coord = self._unique_coord
positions = None
(dim,) = coord.dims
if isinstance(coord, _DummyGroup):
coord = None
return coord, dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._combine(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self._group.name: group_value})
except AttributeError:
raise TypeError(
"GroupBy objects only support binary ops "
"when the other argument is a Dataset or "
"DataArray"
)
except (KeyError, ValueError):
if self._group.name not in other.dims:
raise ValueError(
"incompatible dimensions for a grouped "
"binary operation: the group variable %r "
"is not a dimension on the other argument" % self._group.name
)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if self._full_index is not None and self._group.name in combined.dims:
indexers = {self._group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def _maybe_unstack(self, obj):
"""This gets called if we are applying on an array with a
multidimensional group."""
if self._stacked_dim is not None and self._stacked_dim in obj.dims:
obj = obj.unstack(self._stacked_dim)
for dim in self._inserted_dims:
if dim in obj.coords:
del obj.coords[dim]
obj._indexes = propagate_indexes(obj._indexes, exclude=self._inserted_dims)
return obj
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
out = ops.fillna(self, value)
return out
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile over each array in the groups and
concatenate them together into a new array.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be between 0 and 1
inclusive.
dim : `...`, str or sequence of str, optional
Dimension(s) over which to apply quantile.
Defaults to the grouped dimension.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
skipna : bool, optional
Whether to skip missing values when aggregating.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result is a
scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile. In either case a
quantile dimension is added to the return array. The other
dimensions are the dimensions that remain after the
reduction of the array.
See Also
--------
numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
Examples
--------
>>> da = xr.DataArray(
... [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],
... coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]},
... dims=("y", "y"),
... )
>>> ds = xr.Dataset({"a": da})
>>> da.groupby("x").quantile(0)
<xarray.DataArray (x: 2, y: 4)>
array([[0.7, 4.2, 0.7, 1.5],
[6.5, 7.3, 2.6, 1.9]])
Coordinates:
quantile float64 0.0
* y (y) int64 1 1 2 2
* x (x) int64 0 1
>>> ds.groupby("y").quantile(0, dim=...)
<xarray.Dataset>
Dimensions: (y: 2)
Coordinates:
quantile float64 0.0
* y (y) int64 1 2
Data variables:
a (y) float64 0.7 0.7
>>> da.groupby("x").quantile([0, 0.5, 1])
<xarray.DataArray (x: 2, y: 4, quantile: 3)>
array([[[0.7 , 1. , 1.3 ],
[4.2 , 6.3 , 8.4 ],
[0.7 , 5.05, 9.4 ],
[1.5 , 4.2 , 6.9 ]],
[[6.5 , 6.5 , 6.5 ],
[7.3 , 7.3 , 7.3 ],
[2.6 , 2.6 , 2.6 ],
[1.9 , 1.9 , 1.9 ]]])
Coordinates:
* y (y) int64 1 1 2 2
* quantile (quantile) float64 0.0 0.5 1.0
* x (x) int64 0 1
>>> ds.groupby("y").quantile([0, 0.5, 1], dim=...)
<xarray.Dataset>
Dimensions: (quantile: 3, y: 2)
Coordinates:
* quantile (quantile) float64 0.0 0.5 1.0
* y (y) int64 1 2
Data variables:
a (y, quantile) float64 0.7 5.35 8.4 0.7 2.25 9.4
"""
if dim is None:
dim = self._group_dim
out = self.map(
self._obj.__class__.quantile,
shortcut=False,
q=q,
dim=dim,
interpolation=interpolation,
keep_attrs=keep_attrs,
skipna=skipna,
)
return out
def where(self, cond, other=dtypes.NA):
"""Return elements from `self` or `other` depending on `cond`.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this objects values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, inserts missing values.
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return ops.where_method(self, cond, other)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self._group_indices[0], integer_types):
# NB. this is currently only used for reductions along an existing
# dimension
return self._obj
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
return self.reduce(op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs)
def first(self, skipna=None, keep_attrs=None):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=None):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)
def assign_coords(self, coords=None, **coords_kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
Dataset.swap_dims
"""
coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords")
return self.map(lambda ds: ds.assign_coords(**coords_kwargs))
def _maybe_reorder(xarray_obj, dim, positions):
order = _inverse_permutation_indices(positions)
if order is None or len(order) != xarray_obj.sizes[dim]:
return xarray_obj
else:
return xarray_obj[{dim: order}]
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self._obj.variable
for indices in self._group_indices:
yield var[{self._group_dim: indices}]
def _concat_shortcut(self, applied, dim, positions=None):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(applied, dim, shortcut=True)
reordered = _maybe_reorder(stacked, dim, positions)
result = self._obj._replace_maybe_drop_dims(reordered)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self._group.name:
(dimension,) = self._group.dims
if dimension in self._obj.dims:
axis = self._obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order, transpose_coords=self._restore_coord_dims)
def map(self, func, shortcut=False, args=(), **kwargs):
"""Apply a function to each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
``*args`` : tuple, optional
Positional arguments passed to `func`.
``**kwargs``
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped)
return self._combine(applied, shortcut=shortcut)
def apply(self, func, shortcut=False, args=(), **kwargs):
"""
Backward compatible implementation of ``map``
See Also
--------
DataArrayGroupBy.map
"""
warnings.warn(
"GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged",
PendingDeprecationWarning,
stacklevel=2,
)
return self.map(func, shortcut=shortcut, args=args, **kwargs)
def _combine(self, applied, restore_coord_dims=False, shortcut=False):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, dim, positions)
else:
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
if isinstance(combined, type(self._obj)):
# only restore dimension order for arrays
combined = self._restore_dim_order(combined)
# assign coord when the applied function does not return that coord
if coord is not None and dim not in applied_example.dims:
if shortcut:
coord_var = as_variable(coord)
combined._coords[coord.name] = coord_var
else:
combined.coords[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(
self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs
):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : `...`, str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is None:
dim = self._group_dim
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
check_reduce_dims(dim, self.dims)
return self.map(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def map(self, func, args=(), shortcut=None, **kwargs):
"""Apply a function to each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
args : tuple, optional
Positional arguments to pass to `func`.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
# ignore shortcut if set (for now)
applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())
return self._combine(applied)
def apply(self, func, args=(), shortcut=None, **kwargs):
"""
Backward compatible implementation of ``map``
See Also
--------
DatasetGroupBy.map
"""
warnings.warn(
"GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged",
PendingDeprecationWarning,
stacklevel=2,
)
return self.map(func, shortcut=shortcut, args=args, **kwargs)
def _combine(self, applied):
"""Recombine the applied objects like the original."""
applied_example, applied = peek_at(applied)
coord, dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, dim)
combined = _maybe_reorder(combined, dim, positions)
# assign coord when the applied function does not return that coord
if coord is not None and dim not in applied_example.dims:
combined[coord.name] = coord
combined = self._maybe_restore_empty_groups(combined)
combined = self._maybe_unstack(combined)
return combined
def reduce(self, func, dim=None, keep_attrs=None, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
dim : `...`, str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is None:
dim = self._group_dim
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
check_reduce_dims(dim, self.dims)
return self.map(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.map(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
|
the-stack_106_15089
|
#!/usr/bin/python
#
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script reads the global interface data collected by
# compute_interfaces_info_overall.py, and writes out the code which adds
# bindings for origin-trial-enabled features at runtime.
import optparse
import os
import posixpath
import sys
from collections import defaultdict, namedtuple
from code_generator import (initialize_jinja_env, normalize_and_sort_includes,
render_template)
from idl_reader import IdlReader
from utilities import (create_component_info_provider, write_file,
idl_filename_to_component)
from v8_utilities import (binding_header_filename, v8_class_name,
v8_class_name_or_partial, origin_trial_feature_name)
# Make sure extension is .py, not .pyc or .pyo, so doesn't depend on caching
MODULE_PYNAME = os.path.splitext(os.path.basename(__file__))[0] + '.py'
OriginTrialInterfaceInfo = namedtuple(
'OriginTrialInterfaceInfo',
['name', 'v8_class', 'v8_class_or_partial', 'is_global'])
def get_install_functions(interfaces, feature_names):
"""Construct a list of V8 bindings installation functions for each feature
on each interface.
interfaces is a list of OriginTrialInterfaceInfo tuples
feature_names is a list of strings, containing names of features which can
be installed on those interfaces.
"""
return [{
'condition':
'RuntimeEnabledFeatures::%sEnabled' % feature_name,
'name':
feature_name,
'install_method':
'Install%s' % feature_name,
'interface_is_global':
interface_info.is_global,
'global_type_check_method':
interface_global_type_check_method(interface_info),
'v8_class':
interface_info.v8_class,
'v8_class_or_partial':
interface_info.v8_class_or_partial,
} for feature_name in feature_names for interface_info in interfaces]
def get_origin_trial_feature_names_from_interface(interface, runtime_features):
feature_names = set()
def add_if_not_none(value):
if value:
feature_names.add(value)
if interface.is_partial:
add_if_not_none(origin_trial_feature_name(interface, runtime_features))
for operation in interface.operations:
add_if_not_none(origin_trial_feature_name(operation, runtime_features))
for attribute in interface.attributes:
add_if_not_none(origin_trial_feature_name(attribute, runtime_features))
return feature_names
def read_idl_file(reader, idl_filename):
definitions = reader.read_idl_file(idl_filename)
interfaces = definitions.interfaces
includes = definitions.includes
# There should only be a single interface defined in an IDL file. Return it.
assert len(interfaces) == 1, (
"Expected one interface in file %r, found %d" %
(idl_filename, len(interfaces)))
return (list(interfaces.values())[0], includes)
def interface_is_global(interface):
return 'Global' in interface.extended_attributes
def interface_global_type_check_method(interface_info):
"""Generate the name of the method on ExecutionContext used to check if the
context matches the type of the interface, which is a global.
Returns None for non-global interfaces.
"""
if not interface_info.is_global:
return None
return 'Is%s' % interface_info.name
def origin_trial_features_info(info_provider, reader, idl_filenames,
target_component):
"""Read a set of IDL files and compile the mapping between interfaces and
the conditional features defined on them.
Returns a tuple (features_for_type, types_for_feature, includes):
- features_for_type is a mapping of interface->feature
- types_for_feature is the reverse mapping: feature->interface
- includes is a set of header files which need to be included in the
generated implementation code.
"""
features_for_type = defaultdict(set)
types_for_feature = defaultdict(set)
include_files = set()
runtime_features = info_provider.component_info['runtime_enabled_features']
for idl_filename in idl_filenames:
interface, includes = read_idl_file(reader, idl_filename)
feature_names = get_origin_trial_feature_names_from_interface(
interface, runtime_features)
# If this interface is a mixin, we don't generate V8 bindings code for
# it.
# TODO(crbug.com/1061995): This incorrectly ignores includes in the
# mixin idl like "SomeInterface includes MixinInterface".
if interface.is_mixin:
continue
# If this interface include another one,
# it inherits any conditional features from it.
for include in includes:
assert include.interface == interface.name, (
"'includes' interface identifier %r in file %r should be %r" %
(include.interface, idl_filename, interface.name))
mixin, _ = read_idl_file(
reader,
info_provider.interfaces_info[include.mixin].get('full_path'))
feature_names |= get_origin_trial_feature_names_from_interface(
mixin, runtime_features)
feature_names = list(feature_names)
if feature_names:
is_global = interface_is_global(interface)
if interface.is_partial:
# For partial interfaces, we need to generate different
# |include_files| if the parent interface is in a different
# component.
parent_interface_info = \
info_provider.interfaces_info[interface.name]
parent_interface, _ = read_idl_file(
reader, parent_interface_info.get('full_path'))
is_global = is_global or interface_is_global(parent_interface)
parent_component = idl_filename_to_component(
parent_interface_info.get('full_path'))
if interface.is_partial and target_component != parent_component:
include_files.add('bindings/%s/v8/%s' % (
parent_component, binding_header_filename(interface.name)))
include_files.add(
'bindings/%s/v8/%s' %
(target_component,
binding_header_filename(interface.name + 'Partial')))
else:
include_files.add('bindings/%s/v8/%s' % (
target_component, binding_header_filename(interface.name)))
# If this is a partial interface in the same component as
# its parent, then treat it as a non-partial interface.
interface.is_partial = False
interface_info = OriginTrialInterfaceInfo(
interface.name, v8_class_name(interface),
v8_class_name_or_partial(interface), is_global)
for feature_name in feature_names:
features_for_type[interface_info].add(feature_name)
types_for_feature[feature_name].add(interface_info)
return features_for_type, types_for_feature, include_files
def origin_trial_features_context(generator_name, feature_info):
context = {'code_generator': generator_name}
# Unpack the feature info tuple.
features_for_type, types_for_feature, include_files = feature_info
# Add includes needed for cpp code and normalize.
include_files.update([
'core/context_features/context_feature_settings.h',
'core/execution_context/execution_context.h',
'core/frame/frame.h',
'core/origin_trials/origin_trials.h',
'platform/bindings/origin_trial_features.h',
'platform/bindings/script_state.h',
'platform/bindings/v8_per_context_data.h',
'platform/runtime_enabled_features.h',
# TODO(iclelland): Remove the need to explicitly include this; it is
# here because the ContextFeatureSettings code needs it.
'bindings/core/v8/v8_window.h',
])
context['includes'] = normalize_and_sort_includes(include_files)
# For each interface, collect a list of bindings installation functions to
# call, organized by conditional feature.
context['installers_by_interface'] = [{
'name':
interface_info.name,
'is_global':
interface_info.is_global,
'v8_class':
interface_info.v8_class,
'installers':
get_install_functions([interface_info], feature_names)
} for interface_info, feature_names in features_for_type.items()]
context['installers_by_interface'].sort(key=lambda x: x['name'])
# For each conditional feature, collect a list of bindings installation
# functions to call, organized by interface.
context['installers_by_feature'] = [{
'name':
feature_name,
'name_constant':
'OriginTrialFeature::k%s' % feature_name,
'installers':
get_install_functions(interfaces, [feature_name])
} for feature_name, interfaces in types_for_feature.items()]
context['installers_by_feature'].sort(key=lambda x: x['name'])
return context
def parse_options():
parser = optparse.OptionParser()
parser.add_option(
'--cache-directory',
help='cache directory, defaults to output directory')
parser.add_option('--output-directory')
parser.add_option('--info-dir')
parser.add_option(
'--target-component',
type='choice',
choices=['core', 'modules'],
help='target component to generate code')
parser.add_option('--idl-files-list')
options, _ = parser.parse_args()
if options.output_directory is None:
parser.error('Must specify output directory using --output-directory.')
return options
def generate_origin_trial_features(info_provider, options, idl_filenames):
reader = IdlReader(info_provider.interfaces_info, options.cache_directory)
jinja_env = initialize_jinja_env(options.cache_directory)
# Extract the bidirectional mapping of conditional features <-> interfaces
# from the global info provider and the supplied list of IDL files.
feature_info = origin_trial_features_info(
info_provider, reader, idl_filenames, options.target_component)
# Convert that mapping into the context required for the Jinja2 templates.
template_context = origin_trial_features_context(MODULE_PYNAME,
feature_info)
file_basename = 'origin_trial_features_for_%s' % options.target_component
# Generate and write out the header file
header_text = render_template(
jinja_env.get_template(file_basename + '.h.tmpl'), template_context)
header_path = posixpath.join(options.output_directory,
file_basename + '.h')
write_file(header_text, header_path)
# Generate and write out the implementation file
cpp_text = render_template(
jinja_env.get_template(file_basename + '.cc.tmpl'), template_context)
cpp_path = posixpath.join(options.output_directory, file_basename + '.cc')
write_file(cpp_text, cpp_path)
def main():
options = parse_options()
info_provider = create_component_info_provider(
os.path.normpath(options.info_dir), options.target_component)
idl_filenames = list(map(str.strip, open(options.idl_files_list)))
generate_origin_trial_features(info_provider, options, idl_filenames)
return 0
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_15091
|
from tests.test_helper import *
class TestPlan(unittest.TestCase):
def test_all_returns_empty_list(self):
Configuration.configure(
Environment.Development,
"test_merchant_id",
"test_public_key",
"test_private_key"
)
plans = Plan.all()
self.assertEqual([], plans)
Configuration.configure(
Environment.Development,
"integration_merchant_id",
"integration_public_key",
"integration_private_key"
)
def test_all_returns_all_the_plans(self):
plan_token = str(random.randint(1, 1000000))
attributes = {
"id": plan_token,
"billing_day_of_month": 1,
"billing_frequency": 1,
"currency_iso_code": "USD",
"description": "some description",
"name": "python test plan",
"number_of_billing_cycles": 1,
"price": "1.00",
}
Configuration.instantiate().http().post(Configuration.instantiate().base_merchant_path() + "/plans/create_plan_for_tests", {"plan": attributes})
add_on_attributes = {
"amount": "100.00",
"description": "some description",
"plan_id": plan_token,
"kind": "add_on",
"name": "python_add_on",
"never_expires": False,
"number_of_billing_cycles": 1
}
Configuration.instantiate().http().post(Configuration.instantiate().base_merchant_path() + "/modifications/create_modification_for_tests", {"modification": add_on_attributes})
discount_attributes = {
"amount": "100.00",
"description": "some description",
"plan_id": plan_token,
"kind": "discount",
"name": "python_discount",
"never_expires": False,
"number_of_billing_cycles": 1
}
Configuration.instantiate().http().post(Configuration.instantiate().base_merchant_path() + "/modifications/create_modification_for_tests", {"modification": discount_attributes})
plans = Plan.all()
for plan in plans:
if plan.id == plan_token:
actual_plan = plan
self.assertNotEqual(None, actual_plan)
self.assertEqual(1, attributes["billing_day_of_month"])
self.assertEqual(1, attributes["billing_frequency"])
self.assertEqual("USD", attributes["currency_iso_code"])
self.assertEqual("some description", attributes["description"])
self.assertEqual("python test plan", attributes["name"])
self.assertEqual(1, attributes["number_of_billing_cycles"])
self.assertEqual("1.00", attributes["price"])
self.assertEqual(1, len(actual_plan.add_ons))
self.assertEqual(add_on_attributes["name"], actual_plan.add_ons[0].name)
self.assertEqual(1, len(actual_plan.discounts))
self.assertEqual(discount_attributes["name"], actual_plan.discounts[0].name)
|
the-stack_106_15092
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
|
the-stack_106_15093
|
#!/usr/bin/python3
import os
import glob
import statistics
comparisons = ["pure_tree","borrowing_05","borrowing_10","borrowing_15","borrowing_20","dialect","swamp"]
def read_table_from_file(infile):
table = []
with open(infile,"r") as f:
for line in f:
table.append(line.strip().rstrip().split("\t"))
return table
def a_greater_than_b(a,b,metric,results):
count = 0
for i in range(len(results["pure_tree"])):
if results[a][i][metric] > results[b][i][metric]:
count += 1
return count
def make_comparison_table():
results = {}
analyses = sorted(glob.glob(os.path.join("analyses","*")))
analyses.remove(os.path.join("analyses","uralex"))
for a in analyses:
basename = os.path.basename(os.path.normpath(a))
results[basename] = []
replications = sorted(glob.glob(os.path.join(a,"*.csv")))
for r in replications:
tiger_file = r + "_rates.txt"
delta_q_file = r + "_delta_qresidual.txt"
tiger_rates = read_table_from_file(tiger_file)
delta_q_rates = read_table_from_file(delta_q_file)[1:] # ignore header
trates = []
dscores = []
qresiduals = []
for line in tiger_rates:
trates.append(float(line[1]))
for line in delta_q_rates:
dscores.append(float(line[1]))
qresiduals.append(float(line[2]))
current = {}
current["tiger"] = statistics.mean(trates)
current["delta"] = statistics.mean(dscores)
current["qresidual"] = statistics.mean(qresiduals)
results[basename].append(current)
total = len(results["pure_tree"])
table = []
table.append("More tree-like vs. less tree-like\tTIGER rate agreements\tDelta score agreements\tQ-residual agreements\tNumber of replications")
for i in range(len(comparisons)-1):
tiger_cmp = a_greater_than_b(comparisons[i],comparisons[i+1],"tiger",results)
delta_cmp = a_greater_than_b(comparisons[i+1],comparisons[i],"delta",results) # Reversed metric compared to TIGER rates
qres_cmp = a_greater_than_b(comparisons[i+1],comparisons[i],"qresidual",results) # Reversed metric compared to TIGER rates
table.append("%s vs. %s\t%i\t%i\t%i\t%i" % (comparisons[i],comparisons[i+1], tiger_cmp, delta_cmp, qres_cmp, total))
return table
def make_mean_rates_table():
results = {}
analyses = sorted(glob.glob(os.path.join("analyses","*")))
for a in analyses:
basename = os.path.basename(os.path.normpath(a))
results[basename] = {}
results[basename]["delta"] = []
results[basename]["tiger"] = []
results[basename]["qresidual"] = []
replications = sorted(glob.glob(os.path.join(a,"*.csv")))
for r in replications:
if basename != "uralex":
tiger_file = r + "_rates.txt"
delta_q_file = r + "_delta_qresidual.txt"
else:
tiger_file = os.path.splitext(r)[0] + "_rates.txt"
delta_q_file = r + "_delta_qresidual.txt"
tiger_rates = read_table_from_file(tiger_file)
delta_q_rates = read_table_from_file(delta_q_file)[1:] # ignore header
for line in tiger_rates:
results[basename]["tiger"].append(float(line[1]))
for line in delta_q_rates:
results[basename]["delta"].append(float(line[1]))
results[basename]["qresidual"].append(float(line[2]))
table = []
table.append("Simulation\tMean TIGER rate\tMean delta score\tMean Q-residual")
for c in comparisons:
mean_tiger = statistics.mean(results[c]["tiger"])
mean_delta = statistics.mean(results[c]["delta"])
mean_qresi = statistics.mean(results[c]["qresidual"])
table.append("%s\t%f\t%f\t%f" % (c,mean_tiger,mean_delta,mean_qresi))
mean_tiger = statistics.mean(results["uralex"]["tiger"])
mean_delta = statistics.mean(results["uralex"]["delta"])
mean_qresi = statistics.mean(results["uralex"]["qresidual"])
table.append("%s\t%f\t%f\t%f" % ("uralex",mean_tiger,mean_delta,mean_qresi))
return table
def main():
comparisons_table = make_comparison_table()
means_table = make_mean_rates_table()
if not os.path.exists("tables"):
os.mkdir("tables")
with open(os.path.join("tables","comparisons.tsv"), "w") as f:
for line in comparisons_table:
f.write(line + "\n")
with open(os.path.join("tables","means.tsv"), "w") as f:
for line in means_table:
f.write(line + "\n")
if __name__ == '__main__':
main()
|
the-stack_106_15094
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: alexnet-dorefa.py
# Author: Yuxin Wu, Yuheng Zou ({wyx,zyh}@megvii.com)
import cv2
import tensorflow as tf
import argparse
import numpy as np
import os
import sys
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.tfutils.varreplace import remap_variables
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from imagenet_utils import get_imagenet_dataflow, fbresnet_augmentor
from dorefa import get_dorefa
"""
This is a tensorpack script for the ImageNet results in paper:
DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients
http://arxiv.org/abs/1606.06160
The original experiements are performed on a proprietary framework.
This is our attempt to reproduce it on tensorpack & TensorFlow.
Accuracy:
Trained with 4 GPUs and (W,A,G)=(1,2,6), it can reach top-1 single-crop validation error of 47.6%,
after 70 epochs. This number is better than what's in the paper
due to more sophisticated augmentations.
With (W,A,G)=(32,32,32) -- full precision baseline, 41.4% error.
With (W,A,G)=(1,32,32) -- BWN, 44.3% error
With (W,A,G)=(1,2,6), 47.6% error
With (W,A,G)=(1,2,4), 58.4% error
Don't train with >4 GPUs because the batch size will be different.
Speed:
About 11 iteration/s on 4 P100s. (Each epoch is set to 10000 iterations)
Note that this code was written early without using NCHW format. You
should expect a speed up if the code is ported to NCHW format.
To Train, for example:
./alexnet-dorefa.py --dorefa 1,2,6 --data PATH --gpu 0,1
PATH should look like:
PATH/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
And you'll need the following to be able to fetch data efficiently
Fast disk random access (Not necessarily SSD. I used a RAID of HDD, but not sure if plain HDD is enough)
More than 20 CPU cores (for data processing)
More than 10G of free memory
To run pretrained model:
./alexnet-dorefa.py --load alexnet-126.npz --run a.jpg --dorefa 1,2,6
"""
BITW = 1
BITA = 2
BITG = 6
TOTAL_BATCH_SIZE = 128
BATCH_SIZE = None
class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
def build_graph(self, image, label):
image = image / 255.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
# monkey-patch tf.get_variable to apply fw
def new_get_variable(v):
name = v.op.name
# don't binarize first and last layer
if not name.endswith('W') or 'conv0' in name or 'fct' in name:
return v
else:
logger.info("Binarizing weight {}".format(v.op.name))
return fw(v)
def nonlin(x):
if BITA == 32:
return tf.nn.relu(x) # still use relu for 32bit cases
return tf.clip_by_value(x, 0.0, 1.0)
def activate(x):
return fa(nonlin(x))
with remap_variables(new_get_variable), \
argscope(BatchNorm, momentum=0.9, epsilon=1e-4), \
argscope(Conv2D, use_bias=False):
logits = (LinearWrap(image)
.Conv2D('conv0', 96, 12, strides=4, padding='VALID')
.apply(activate)
.Conv2D('conv1', 256, 5, padding='SAME', split=2)
.apply(fg)
.BatchNorm('bn1')
.MaxPooling('pool1', 3, 2, padding='SAME')
.apply(activate)
.Conv2D('conv2', 384, 3)
.apply(fg)
.BatchNorm('bn2')
.MaxPooling('pool2', 3, 2, padding='SAME')
.apply(activate)
.Conv2D('conv3', 384, 3, split=2)
.apply(fg)
.BatchNorm('bn3')
.apply(activate)
.Conv2D('conv4', 256, 3, split=2)
.apply(fg)
.BatchNorm('bn4')
.MaxPooling('pool4', 3, 2, padding='VALID')
.apply(activate)
.FullyConnected('fc0', 4096)
.apply(fg)
.BatchNorm('bnfc0')
.apply(activate)
.FullyConnected('fc1', 4096, use_bias=False)
.apply(fg)
.BatchNorm('bnfc1')
.apply(nonlin)
.FullyConnected('fct', 1000, use_bias=True)())
tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
# weight decay on all W of fc layers
wd_cost = regularize_cost('fc.*/W', l2_regularizer(5e-6), name='regularize_cost')
add_param_summary(('.*/W', ['histogram', 'rms']))
total_cost = tf.add_n([cost, wd_cost], name='cost')
add_moving_summary(cost, wd_cost, total_cost)
return total_cost
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
return tf.train.AdamOptimizer(lr, epsilon=1e-5)
def get_data(dataset_name):
isTrain = dataset_name == 'train'
augmentors = fbresnet_augmentor(isTrain)
return get_imagenet_dataflow(
args.data, dataset_name, BATCH_SIZE, augmentors)
def get_config():
logger.auto_set_dir()
data_train = get_data('train')
data_test = get_data('val')
return TrainConfig(
dataflow=data_train,
callbacks=[
ModelSaver(),
# HumanHyperParamSetter('learning_rate'),
ScheduledHyperParamSetter(
'learning_rate', [(56, 2e-5), (64, 4e-6)]),
InferenceRunner(data_test,
[ScalarStats('cost'),
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')])
],
model=Model(),
steps_per_epoch=10000,
max_epoch=100,
)
def run_image(model, sess_init, inputs):
pred_config = PredictConfig(
model=model,
session_init=sess_init,
input_names=['input'],
output_names=['output']
)
predictor = OfflinePredictor(pred_config)
meta = dataset.ILSVRCMeta()
pp_mean = meta.get_per_pixel_mean()
pp_mean_224 = pp_mean[16:-16, 16:-16, :]
words = meta.get_synset_words_1000()
def resize_func(im):
h, w = im.shape[:2]
scale = 256.0 / min(h, w)
desSize = map(int, (max(224, min(w, scale * w)),
max(224, min(h, scale * h))))
im = cv2.resize(im, tuple(desSize), interpolation=cv2.INTER_CUBIC)
return im
transformers = imgaug.AugmentorList([
imgaug.MapImage(resize_func),
imgaug.CenterCrop((224, 224)),
imgaug.MapImage(lambda x: x - pp_mean_224),
])
for f in inputs:
assert os.path.isfile(f)
img = cv2.imread(f).astype('float32')
assert img is not None
img = transformers.augment(img)[np.newaxis, :, :, :]
outputs = predictor(img)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
names = [words[i] for i in ret]
print(f + ":")
print(list(zip(names, prob[ret])))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the physical ids of GPUs to use')
parser.add_argument('--load', help='load a checkpoint, or a npz (given as the pretrained model)')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--dorefa',
help='number of bits for W,A,G, separated by comma', required=True)
parser.add_argument('--run', help='run on a list of images with the pretrained model', nargs='*')
args = parser.parse_args()
BITW, BITA, BITG = map(int, args.dorefa.split(','))
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.run:
assert args.load.endswith('.npz')
run_image(Model(), DictRestore(dict(np.load(args.load))), args.run)
sys.exit()
nr_tower = max(get_nr_gpu(), 1)
BATCH_SIZE = TOTAL_BATCH_SIZE // nr_tower
logger.info("Batch per tower: {}".format(BATCH_SIZE))
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
launch_train_with_config(config, SyncMultiGPUTrainer(nr_tower))
|
the-stack_106_15095
|
#!/usr/bin/env python
import sys
# sys.path.append('/home/u17/timeifler/WFIRST_forecasts')
# sys.path.append('/Users/timeifler/WFIRST_forecasts')
from cosmolike_libs_opti import *
from schwimmbad import MPIPool
file_source_z = os.path.join(dirname, "zdistris/zdistri_WFIRST_LSST_lensing_fine_bin")
file_lens_z = os.path.join(dirname, "zdistris/zdistri_WFIRST_LSST_clustering_fine_bin")
data_file = os.path.join(dirname, "datav/WFIRST_pos_pos_opti")
cov_file = os.path.join(dirname, "cov/WFIRST_pos_pos_inv")
chain_file = "/extra/timeifler/WFIRST_forecasts/chains/like_WFIRST_clustering_sys_opti"
initcosmo("halofit")
initbins(25,30.0,15000.0,4000.0,21.0,10,10)
initpriors("photo_opti","shear_opti","none","none")
initsurvey("WFIRST")
initgalaxies(file_source_z,file_lens_z,"gaussian","gaussian","SN10")
initclusters()
initia("none","none")
# test also with
#initpriors("none","none","none","Planck")
#initpriors("none","none","none","random")
initprobes("pos_pos")
initdatainv(cov_file ,data_file)
#sample_params=sample_LCDM_only()
#sample_params= sample_cosmology_only()
#sample_params = sample_cosmology_shear_nuisance(get_N_tomo_shear())
sample_params = sample_cosmology_clustering_nuisance(get_N_tomo_clustering())
#sample_params = sample_cosmology_2pt_nuisance(get_N_tomo_shear(),get_N_tomo_clustering())
#sample_params = sample_cosmology_2pt_nuisance_IA_marg(get_N_tomo_shear(),get_N_tomo_clustering())
#sample_params = sample_cosmology_2pt_cluster_nuisance(get_N_tomo_shear(),get_N_tomo_clustering())
sample_main(sample_params,10000,560,1,chain_file, blind=False, pool=MPIPool())
|
the-stack_106_15096
|
# coding=utf-8
from __future__ import absolute_import
import octoprint.plugin
from octoprint.server import user_permission
import socket
import json
import logging
import os
import re
import threading
import time
from octoprint_tuyasmartplug.utils.tinytuya import tinytuya
class tuyasmartplugPlugin(
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.StartupPlugin,
):
def __init__(self):
self._logger = logging.getLogger("octoprint.plugins.tuyasmartplug")
self._tuyasmartplug_logger = logging.getLogger(
"octoprint.plugins.tuyasmartplug.debug"
)
# ~~ StartupPlugin mixin
def on_startup(self, host, port):
# setup customized logger
from octoprint.logging.handlers import CleaningTimedRotatingFileHandler
tuyasmartplug_logging_handler = CleaningTimedRotatingFileHandler(
self._settings.get_plugin_logfile_path(postfix="debug"),
when="D",
backupCount=3,
)
tuyasmartplug_logging_handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s")
)
tuyasmartplug_logging_handler.setLevel(logging.DEBUG)
self._tuyasmartplug_logger.addHandler(tuyasmartplug_logging_handler)
self._tuyasmartplug_logger.setLevel(
logging.DEBUG
if self._settings.get_boolean(["debug_logging"])
else logging.INFO
)
self._tuyasmartplug_logger.propagate = False
def on_after_startup(self):
self._logger.info("TuyaSmartplug loaded!")
# ~~ SettingsPlugin mixin
def get_settings_defaults(self):
return dict(
debug_logging=False,
arrSmartplugs=[
{
"ip": "",
"id": "",
"slot": 1,
"localKey": "",
"label": "",
"icon": "icon-bolt",
"displayWarning": True,
"warnPrinting": False,
"gcodeEnabled": False,
"v33": False,
"gcodeOnDelay": 0,
"gcodeOffDelay": 0,
"autoConnect": True,
"autoConnectDelay": 10.0,
"autoDisconnect": True,
"autoDisconnectDelay": 0,
"sysCmdOn": False,
"sysRunCmdOn": "",
"sysCmdOnDelay": 0,
"sysCmdOff": False,
"sysRunCmdOff": "",
"sysCmdOffDelay": 0,
"currentState": "unknown",
"btnColor": "#808080",
"useCountdownRules": False,
"countdownOnDelay": 0,
"countdownOffDelay": 0,
}
],
pollingInterval=15,
pollingEnabled=False,
)
def get_settings_restricted_paths(self):
return dict(
admin=[
[
"arrSmartplugs",
],
]
)
def on_settings_save(self, data):
old_debug_logging = self._settings.get_boolean(["debug_logging"])
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
new_debug_logging = self._settings.get_boolean(["debug_logging"])
if old_debug_logging != new_debug_logging:
if new_debug_logging:
self._tuyasmartplug_logger.setLevel(logging.DEBUG)
else:
self._tuyasmartplug_logger.setLevel(logging.INFO)
def get_settings_version(self):
return 3
def on_settings_migrate(self, target, current=None):
if current is None or current < self.get_settings_version():
# Reset plug settings to defaults.
self._logger.debug("Resetting arrSmartplugs for tuyasmartplug settings.")
self._settings.set(
["arrSmartplugs"], self.get_settings_defaults()["arrSmartplugs"]
)
# ~~ AssetPlugin mixin
def get_assets(self):
return dict(js=["js/tuyasmartplug.js"], css=["css/tuyasmartplug.css"])
# ~~ TemplatePlugin mixin
def get_template_configs(self):
return [
dict(type="navbar", custom_bindings=True),
dict(type="settings", custom_bindings=True),
]
# ~~ SimpleApiPlugin mixin
def turn_on(self, pluglabel):
self._tuyasmartplug_logger.debug("Turning on %s." % pluglabel)
if self.is_turned_on(pluglabel=pluglabel):
self._tuyasmartplug_logger.debug("Plug %s already turned on" % pluglabel)
self._plugin_manager.send_plugin_message(
self._identifier, dict(currentState="on", label=pluglabel)
)
return
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", pluglabel
)
self._tuyasmartplug_logger.debug(plug)
if plug["useCountdownRules"]:
chk = self.sendCommand(
"countdown", plug["label"], int(plug["countdownOnDelay"])
)
else:
chk = self.sendCommand("on", plug["label"])
if chk is not False:
self.check_status(plug["label"], chk)
if plug["autoConnect"]:
c = threading.Timer(
int(plug["autoConnectDelay"]), self._printer.connect
)
c.start()
if plug["sysCmdOn"]:
t = threading.Timer(
int(plug["sysCmdOnDelay"]), os.system, args=[plug["sysRunCmdOn"]]
)
t.start()
else:
self._plugin_manager.send_plugin_message(
self._identifier, dict(currentState="unknown", label=pluglabel)
)
def turn_off(self, pluglabel):
self._tuyasmartplug_logger.debug("Turning off %s." % pluglabel)
if not self.is_turned_on(pluglabel=pluglabel):
self._tuyasmartplug_logger.debug("Plug %s already turned off" % pluglabel)
self._plugin_manager.send_plugin_message(
self._identifier, dict(currentState="off", label=pluglabel)
)
return
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", pluglabel
)
self._tuyasmartplug_logger.debug(plug)
if plug["useCountdownRules"]:
chk = self.sendCommand(
"countdown", plug["label"], int(plug["countdownOffDelay"])
)
if plug["sysCmdOff"]:
t = threading.Timer(
int(plug["sysCmdOffDelay"]), os.system, args=[plug["sysRunCmdOff"]]
)
t.start()
if plug["autoDisconnect"]:
self._printer.disconnect()
time.sleep(int(plug["autoDisconnectDelay"]))
if not plug["useCountdownRules"]:
chk = self.sendCommand("off", plug["label"])
if chk is not False:
self.check_status(plug["label"], chk)
else:
self._plugin_manager.send_plugin_message(
self._identifier, dict(currentState="unknown", label=pluglabel)
)
def check_status(self, pluglabel, resp=None):
self._tuyasmartplug_logger.debug("Checking status of %s." % pluglabel)
if pluglabel != "":
response = resp or self.sendCommand("info", pluglabel)
if response is False:
self._plugin_manager.send_plugin_message(
self._identifier, dict(currentState="unknown", label=pluglabel)
)
else:
self._plugin_manager.send_plugin_message(
self._identifier,
dict(
currentState=(
"on" if self.is_turned_on(response, pluglabel) else "off"
),
label=pluglabel,
),
)
def is_turned_on(self, data=None, pluglabel=None):
if data is None and pluglabel:
data = self.sendCommand("info", pluglabel)
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", pluglabel
)
return data and plug and data.get("dps", {}).get(str(plug["slot"]))
def get_api_commands(self):
return dict(turnOn=["label"], turnOff=["label"], checkStatus=["label"])
def on_api_command(self, command, data):
if not user_permission.can():
from flask import make_response
return make_response("Insufficient rights", 403)
if command == "turnOn":
self.turn_on("{label}".format(**data))
elif command == "turnOff":
self.turn_off("{label}".format(**data))
elif command == "checkStatus":
self.check_status("{label}".format(**data))
# ~~ Utilities
def plug_search(self, lst, key, value):
for item in lst:
if item[key] == value:
return item
def sendCommand(self, cmd, pluglabel, args=None, tries=1):
self._tuyasmartplug_logger.debug("Sending command: %s to %s" % (cmd, pluglabel))
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", pluglabel
)
device = tinytuya.OutletDevice(plug["id"], plug["ip"], plug["localKey"])
# device = pytuya.OutletDevice(plug["id"], plug["ip"], plug["localKey"])
if plug.get("v33"):
device.version = 3.3
commands = {
"info": ("status", None),
"on": ("set_status", True),
"off": ("set_status", False),
"countdown": ("set_timer", None),
}
try:
command, arg = commands[cmd]
func = getattr(device, command, None)
if not func:
self._tuyasmartplug_logger.debug("No such command '%s'" % command)
return False
if args:
func(args)
elif arg is not None:
func(arg, plug["slot"])
else:
func()
time.sleep(0.5)
ret = device.status()
self._tuyasmartplug_logger.debug("Status: %s" % str(ret))
return ret
except socket.error as e:
if e.errno == 104:
if tries <= 3:
self._tuyasmartplug_logger.debug(
"Connection refused... Trying again soon"
)
time.sleep(1)
return self.sendCommand(cmd, pluglabel, args, tries + 1)
self._tuyasmartplug_logger.debug("Too many failed attempts")
return False
self._tuyasmartplug_logger.debug("Network error")
return False
except:
self._tuyasmartplug_logger.debug(
"Something went wrong while running the command"
)
return False
# ~~ Gcode processing hook
def gcode_turn_off(self, plug):
if plug["warnPrinting"] and self._printer.is_printing():
self._logger.info(
"Not powering off %s because printer is printing." % plug["label"]
)
else:
self.turn_off(plug["label"])
def processGCODE(self, comm_instance, phase, cmd, cmd_type, gcode, *args, **kwargs):
if gcode:
if cmd.startswith("M80"):
name = re.sub(r"^M80\s?", "", cmd)
self._tuyasmartplug_logger.debug(
"Received M80 command, attempting power on of %s." % name
)
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "ip", name
)
if not plug:
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", name
)
self._tuyasmartplug_logger.debug(plug)
if plug["gcodeEnabled"]:
t = threading.Timer(
int(plug["gcodeOnDelay"]), self.turn_on, args=[plug["label"]]
)
t.start()
return
elif cmd.startswith("M81"):
name = re.sub(r"^M81\s?", "", cmd)
self._tuyasmartplug_logger.debug(
"Received M81 command, attempting power off of %s." % name
)
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "ip", name
)
if not plug:
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", name
)
self._tuyasmartplug_logger.debug(plug)
if plug["gcodeEnabled"]:
t = threading.Timer(
int(plug["gcodeOffDelay"]), self.gcode_turn_off, args=[plug]
)
t.start()
return
else:
return
elif cmd.startswith("@TUYAON"):
name = re.sub(r"^@TUYAON\s?", "", cmd)
self._tuyasmartplug_logger.debug(
"Received @TUYAON command, attempting power on of %s." % name
)
plug = self.plug_search(self._settings.get(["arrSmartplugs"]), "ip", name)
if not plug:
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", name
)
self._tuyasmartplug_logger.debug(plug)
if plug["gcodeEnabled"]:
t = threading.Timer(
int(plug["gcodeOnDelay"]), self.turn_on, args=[plug["label"]]
)
t.start()
return None
elif cmd.startswith("@TUYAOFF"):
name = re.sub(r"^@TUYAOFF\s?", "", cmd)
self._tuyasmartplug_logger.debug(
"Received TUYAOFF command, attempting power off of %s." % name
)
plug = self.plug_search(self._settings.get(["arrSmartplugs"]), "ip", name)
if not plug:
plug = self.plug_search(
self._settings.get(["arrSmartplugs"]), "label", name
)
self._tuyasmartplug_logger.debug(plug)
if plug["gcodeEnabled"]:
t = threading.Timer(
int(plug["gcodeOffDelay"]), self.gcode_turn_off, args=[plug]
)
t.start()
return None
# ~~ Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
tuyasmartplug=dict(
displayName="Tuya Smartplug",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="matt-beamish",
repo="OctoPrint-TuyaSmartplug",
current=self._plugin_version,
# update method: pip
pip="https://github.com/matt-beamish/OctoPrint-TuyaSmartplug/archive/{target_version}.zip",
)
)
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "Tuya Smartplug"
__plugin_version__ = "0.4.0"
__plugin_pythoncompat__ = ">=2.7,<4"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = tuyasmartplugPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.comm.protocol.gcode.queuing": __plugin_implementation__.processGCODE,
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information,
}
|
the-stack_106_15098
|
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import os.path
import pywatchman
import WatchmanTestCase
from path_utils import norm_absolute_path
@WatchmanTestCase.expand_matrix
class TestAbsoluteRoot(WatchmanTestCase.WatchmanTestCase):
def test_dot(self):
root = self.mkdtemp()
save_dir = os.getcwd()
try:
os.chdir(root)
dot = "" if os.name == "nt" else "."
if self.transport == "cli":
res = self.watchmanCommand("watch", dot)
self.assertEqual(root, norm_absolute_path(res["watch"]))
else:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("watch", dot)
self.assertIn("must be absolute", str(ctx.exception))
finally:
os.chdir(save_dir)
def test_root(self):
if os.name != "nt":
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("watch", "/")
self.assertIn("cannot watch", str(ctx.exception))
|
the-stack_106_15100
|
import random
def CreateLineUp():
"""
The Purpose of this program is to automate the selection of a Fantasy Baseball Lineup on FanDuel.com
to win GPP and Head-To-Head Contests.
Download .csv from FanDuel.com, place in folder and run program to output a Fantasy Baseball lineup.
For use in 50/50 or Head to Head matchups.
Future Changes: An algorithm which determines 'efficiency' of a player based on FPPG and salary.
"""
def LoadPlayerData(player_data_csv, players_data_list):
#This Function Will Load Player Data from a CSV file and send to AddPlayerData Function to create a list of dicts
with open(player_data_csv, 'r') as player_file:
for line in player_file:
new_line = line.split(',')
#Formatting based on FanDuel format
if new_line[13] == '0\n':
pass
else:
AddPlayerData(new_line[2].strip('"'), new_line[3].strip('"'), new_line[1].strip('"'),
new_line[4].strip('"'), new_line[5].strip('"'), new_line[6].strip('"'),
new_line[8].strip('"'), new_line[9].strip('"'), new_line[10].strip('"'),
players_data_list)
return players_data_list
def AddPlayerData(fname, lname, position, FPPG, played, salary, team, opponent, injured, players_data_list):
#The AddPlayerData function will format input arguments and append to player list
players_data_list.append([lname, fname, position, FPPG, played, salary, team, opponent, injured])
def FilterPlayerData(player_list):
#FilterPlayerData function will create a new list of players based on filtered data choices:
#Salary >2100, Not Injured, Played >2 games, FPPG > 2, in starting line-up
filtered_player_list = []
for player in player_list[1:]:
if int(player[5])<2100:
pass
elif float(player[3])<2:
pass
elif int(player[4])<2:
pass
elif player[8] != '':
pass
else:
player.append(int(player[5])/float(player[3])) #Calculate 'Efficiency' value, append to player list
filtered_player_list.append(player) #append filtered list of players
return filtered_player_list
def DraftLineup(filt_player_list):
salary_cap = 35000
draft_salary = 0
#A List is Created for each position based on filtered players list and other determining factors
pitcher_list = []
catch_list = []
first_b_list = []
second_b_list = []
third_b_list = []
ss_list = []
of_list = []
list_of_lineups = []
#Team Rank is based on MLB Power Rankings and will be used to draft players on high-ranked teams which
#play low-ranked teams, from ESPN.com Power Ranking, accessed 5/31/2016
#A Webscraping script will be added to automatically upload Team Ranking from ESPN power ranking
team_rank = {"CHC": 1,
"BOS": 2,
"WAS": 3,
"SFG": 4,
"BAL": 5,
"CWS": 6,
"SEA": 7,
"NYM": 8,
"CLE": 9,
"STL": 10,
"PIT": 11,
"TEX": 12,
"KAN": 13,
"PHI": 14,
"LOS": 15,
"TAM": 16,
"MIA": 17,
"TOR": 18,
"DET": 19,
"NYY": 20,
"COL": 21,
"LAA": 22,
"ARI": 23,
"OAK": 24,
"HOU": 25,
"ATL": 30,
"MIN": 29,
"CIN": 28,
"SDP": 27,
"MIL": 26}
for player in filt_player_list: #Append to filtered player list the differential between ranking of teams
home = player[6] #Players are chosen on teams which are at a significant advantage over their opponent
opponent = player[7]
player.append(team_rank[home]-team_rank[opponent])
for player in filt_player_list: #Create Line-Up based upon positions and team-rankings
if player[10]<-4: #Only Select Players which have a team power ranking difference of greater than 7
if player[2] == 'P':
if int(player[5]) > 7000:
pitcher_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
elif player[2] == 'C':
catch_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
elif player[2] == '1B':
first_b_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
elif player[2] == '2B':
second_b_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
elif player[2] == '3B':
third_b_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
elif player[2] == 'SS':
ss_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
elif player[2] == 'OF':
of_list.append([player[0], player[1], player[-1], player[-2], player[3], player[5]])
#Positions are drafted using random choice based upon pre-built/filtered lists for each position
#Ten LineUps are drafted based upon pre-developed lists of players at each position
while len(list_of_lineups) < 10 :
pitcher = random.choice(pitcher_list)
catcher = random.choice(catch_list)
first_base = random.choice(first_b_list)
second_base = random.choice(second_b_list)
third_base = random.choice(third_b_list)
short_stop = random.choice(ss_list)
outfield1 = random.choice(of_list)
outfield2 = random.choice(of_list)
if outfield1 == outfield2: #Ensuring the outfield players are not Duplicated
outfield2 = random.choice(of_list)
outfield3 = random.choice(of_list)
while outfield3 == outfield2:
outfield3 = random.choice(of_list)
while outfield3 == outfield1:
outfield3 = random.choice(of_list)
lineup_list = [pitcher, catcher, first_base, second_base, third_base, short_stop,
outfield1, outfield2, outfield3]
#Creates Line-Up from randomly-chosen positions
draft_salary = 0
tot_avg_fppg = 0
for player in lineup_list: #Calculates the total salary for each lineup and fppg for the line up
draft_salary += int(player[-1]) #Efficiency will also be calculated for line-ups
tot_avg_fppg += float(player[-2])
lineup_list.append(draft_salary)
lineup_list.append(tot_avg_fppg)
lineup_list.append(float(draft_salary/tot_avg_fppg))
if 34500 < int(draft_salary) < 35100:
list_of_lineups.append(lineup_list) #Includes Efficiency for comparison among various lineups
min_efficiency = 1000
for lineups in list_of_lineups:
print(lineups)
if lineups[-1]<min_efficiency:
min_efficiency=lineups[-1]
print(min_efficiency)
ABTest(list_of_lineups)
def ABTest(lineup_list):
"""
The purpose of this function is to perform A/B Testing on Lineups.
The lineup with lowest Projected Points will be lineup 'B'.
The lineup with highest Project Points will be lineup 'A'.
Each lineup will be entered in 50/50 lineups, with all data recorded in separate excel spreadsheet.
:param lineup_list:
:return:
"""
lineupA = lineup_list[0]
lineupB = lineup_list[1]
for lineup in lineup_list:
if lineup[-2]>lineupA[-2]:
lineupA=lineup
if lineup[-2]<lineupB[-2]:
lineupB = lineup
print("\nLineup A:\n ")
lineupA_list = []
for player in lineupA[:-3]:
player_name = player[1] + " " + player[0]
lineupA_list.append(player_name)
print(lineupA_list, lineupA[-2])
print("\nLineup B:\n ")
lineupB_list = []
for player in lineupB[:-3]:
player_name = player[1] + " " + player[0]
lineupB_list.append(player_name)
print(lineupB_list, lineupB[-2])
def main():
players_data_list = []
play_csv = input('What is the name of the daily CSV file?')
player_list = LoadPlayerData(play_csv, players_data_list)
filter_player_list = FilterPlayerData(player_list)
DraftLineup(filter_player_list)
if __name__ == "__main__":
main()
|
the-stack_106_15101
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.functional_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# pylint: disable=invalid-name
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
class FunctionalOpsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testFoldl_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(208, self.evaluate(r))
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputDifferentDimsSingleOutput(self):
elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
other_elems = np.array([-1.0, 1.0])
initializer = np.array([0.0, 0.0, 0.0])
r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
(elems, other_elems), initializer)
self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
def testFoldl_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldr(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldr_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldr(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
def testFoldr_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, self.evaluate(r))
# pylint: disable=unnecessary-lambda
def testFold_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testMap_Simple(self):
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
def testMapSparseTensor(self):
with self.cached_session():
with self.assertRaises(TypeError):
functional_ops.map_fn(
lambda x: x,
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2]))
@test_util.run_in_graph_and_eager_modes
def testMapOverScalarErrors(self):
with self.assertRaisesRegexp(ValueError, "not scalars"):
functional_ops.map_fn(lambda x: x, [1, 2])
with self.assertRaisesRegexp(ValueError, "not a scalar"):
functional_ops.map_fn(lambda x: x, 1)
def testMap_Scoped(self):
with self.cached_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(x, two)
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
r = functional_ops.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.map_fn(double_scoped, elems)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, self.evaluate(r))
def testMap_Grad(self):
with self.cached_session():
param = constant_op.constant(2.0)
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
y = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.square(x), param), elems)
r = gradients_impl.gradients(y, param)[0]
self.assertAllEqual(91.0, self.evaluate(r))
r = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SimpleNotTensor(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testMap_SingleInputMultiOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=(dtypes.int64, dtypes.int64))
self.assertEqual(2, len(r))
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
received = self.evaluate(r)
self.assertAllEqual((nums + 3) * 2, received[0])
self.assertAllEqual(-(nums + 3) * 2, received[1])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiOutputMismatchedDtype(self):
nums = np.array([1, 2, 3, 4, 5, 6])
with self.assertRaisesRegexp(
TypeError, r"two structures don't have the same nested structure"):
# lambda emits tuple, but dtype is a list
functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=[dtypes.int64, dtypes.int64])
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSingleOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = self.evaluate(r)
self.assertAllEqual(nums * nums + (-nums), received)
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSameStructureOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(lambda x: (x[1][0], (x[1][1], x[0])),
(nums, (2 * nums, -nums)))
r = [r[0], r[1][0], r[1][1]]
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
self.assertEqual((6,), r[2].get_shape())
received = self.evaluate(r)
self.assertAllEqual(2 * nums, received[0])
self.assertAllEqual(-nums, received[1])
self.assertAllEqual(nums, received[2])
@test_util.run_in_graph_and_eager_modes
def testScan_Simple(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Reverse(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems,
reverse=True)
self.assertAllEqual([720., 720., 360., 120., 30., 6.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v,
reverse=True)
self.assertAllEqual([1440., 1440., 720., 240., 60., 12.],
self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
initializer)
r_value = self.evaluate(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSameTypeOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
(elems, -elems))
r_value = self.evaluate(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiOutputMismatchedInitializer(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
with self.assertRaisesRegexp(
ValueError, "two structures don't have the same nested structure"):
functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
def testScan_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScanFoldl_Nested(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return functional_ops.foldl(
lambda b, y: b * y * x, inner_elems, initializer=a)
r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], self.evaluate(r))
def testScan_Control(self):
with self.cached_session() as sess:
s = array_ops.placeholder(dtypes.float32, shape=[None])
b = array_ops.placeholder(dtypes.bool)
with ops.control_dependencies([b]):
c = functional_ops.scan(lambda a, x: x * a, s)
self.assertAllClose(
np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
b: True}))
def testScan_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
# pylint: enable=unnecessary-lambda
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, self.evaluate(r))
def testScanGradientWithPartStopGradient(self):
a = variables.Variable(0.0, name="a")
b = variables.Variable(0.0, name="b")
elems = array_ops.zeros(5)
l0, l1 = functional_ops.scan(
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
sess.run(grad)
@test_util.run_in_graph_and_eager_modes
def testFoldShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_in_graph_and_eager_modes
def testMapShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
y = functional_ops.map_fn(lambda e: e, x)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
def testMapUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
y = functional_ops.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
@test_util.run_in_graph_and_eager_modes
def testMapEmptyScalar(self):
map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([]))
self.assertAllEqual([0], map_return.get_shape().dims)
self.assertAllEqual([0], self.evaluate(map_return).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
def testMapEmptyTensor(self):
with self.cached_session():
map_return = functional_ops.map_fn(lambda x: array_ops.zeros([3, 2]),
constant_op.constant([]))
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], self.evaluate(map_return).shape)
@test_util.run_in_graph_and_eager_modes
def testScanShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
def testScanEmptyTensor(self):
with self.cached_session():
x = functional_ops.scan(
lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), self.evaluate(x).shape)
def testScanUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
initializer = array_ops.placeholder(dtypes.float32)
def fn(_, current_input):
return current_input
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
def testScanVaryingShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
result_grad = gradients_impl.gradients(result, [x])[0]
result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
feed_dict={x: [[1.0, 2.0]]})
def testRemoteFunction(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:worker/replica:0/task:0/cpu:1")
with session.Session(worker[0].target) as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
def testRemoteFunctionDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:1")
with self.test_session(config=worker_config) as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
def testRemoteFunctionSameDeviceDirectSession(self):
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b], Tout=[dtypes.int32], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, [6])
def testRemoteFunctionCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/device:GPU:0")[0] + 3.0
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, 9.0)
def testRemoteFunctionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:0")[0] + 3.0
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, 9.0)
def testRemoteFunctionGPUCPUStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.string)
def _remote_fn(inp):
return array_ops.identity(inp)
a = array_ops.constant("a")
with ops.device("/gpu:0"):
remote_op = functional_ops.remote_call(
args=[a], Tout=[dtypes.string], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
ret = sess.run(remote_op)
self.assertAllEqual(ret, [b"a"])
def testRemoteFunctionCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:worker/replica:0/task:1/cpu:0")[0] + 3.0
with session.Session(workers[0].target) as sess:
sess.run(variables.global_variables_initializer())
mul = sess.run(remote_op)
self.assertEqual(mul, 9)
def testIf(self):
@function.Defun(dtypes.float32)
def Twice(x):
return x * 2
@function.Defun(dtypes.float32)
def Thrice(x):
return x * 3 + 1
with self.test_session(use_gpu=False) as sess:
x = array_ops.placeholder(dtypes.float32)
ret = functional_ops.If(math_ops.greater(x, 0), [x], Twice, Thrice)[0]
self.assertAllEqual(sess.run(ret, feed_dict={x: 9.}), 18.)
self.assertAllEqual(sess.run(ret, feed_dict={x: -8.}), -23.)
self.assertAllEqual(sess.run(ret, feed_dict={x: 0.}), 1.)
def testWhile(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0.], Cond, Body))[1]
with self.session(graph=g, use_gpu=use_gpu) as sess:
self.assertAllEqual(Run(sess, 20.), 210.)
self.assertAllEqual(Run(sess, 100.), 5050.)
def testWhileLowering(self):
def Run(n, fetch_by_name):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
# outputs: [0, n*(n+1)/2]
outputs = functional_ops.While([n, 0.], Cond, Body, name="my_while")
# `outputs` is the list of output tensors of the While op. We
# arbitrarily choose the 0th tensor to get the While op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
if not fetch_by_name:
fetch = outputs[1]
else:
fetch = "my_while:1"
with self.session(graph=g, use_gpu=use_gpu) as sess:
return sess.run(fetch)
self.assertAllEqual(Run(20., False), 210.)
self.assertAllEqual(Run(20., True), 210.)
self.assertAllEqual(Run(100., False), 5050.)
self.assertAllEqual(Run(100., True), 5050.)
def testWhileError(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def CondReturnsTooManyArgs(n, x):
return n > 0, x
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
@function.Defun(*[dtypes.float32] * 2)
def BodyReturnsTooManyArgs(n, x):
return n - 1, x + n, x
with self.session(graph=g, use_gpu=use_gpu):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Expected a single scalar.*got 2 tensors."):
functional_ops.While([5., 0.], CondReturnsTooManyArgs,
Body)[0].eval()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"While loop body returned 3 arguments. Expected: 2"):
functional_ops.While([5., 0.], Cond,
BodyReturnsTooManyArgs)[0].eval()
def testWhileInMultipleSubgraphs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, x): # pylint: disable=unused-argument
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
with self.session(graph=g, use_gpu=use_gpu) as sess:
n = array_ops.placeholder(dtypes.float32)
_, result = functional_ops.While([n, 0.], Cond, Body)
c = constant_op.constant(37.)
self.assertAllEqual(210., sess.run(result, feed_dict={n: 20.}))
self.assertAllEqual(5050., sess.run(result, feed_dict={n: 100.}))
# Test that the result is the same when we run a different subgraph.
self.assertAllEqual(5050.,
sess.run([result, c], feed_dict={n: 100.})[0])
# pylint: disable=cell-var-from-loop
def testWhileCapturedInputs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
v = variables.Variable(1.0)
def TestCond(n, *args):
del args
return n < 10
@function.Defun(*[dtypes.float32] * 2)
def TestUnary(n, x):
return math_ops.add(n, 1), x + n + v
@function.Defun(*[dtypes.float32] * 3)
def TestBinary(n, x, x2):
return math_ops.add(n, 1), x + n + v, x2 + v
with self.session(graph=g, use_gpu=use_gpu) as sess:
result_unary = functional_ops.While(
[1.0, 0.],
function.Defun(*[dtypes.float32] * 2)(TestCond), TestUnary)
result_binary = functional_ops.While(
[1.0, 0., 0.],
function.Defun(*[dtypes.float32] * 3)(TestCond), TestBinary)
sess.run(variables.global_variables_initializer())
assert len(result_unary) == 2
self.assertEqual([10.0, 54.0], sess.run(result_unary))
assert len(result_binary) == 3
self.assertEqual([10.0, 54.0, 9.0], sess.run(result_binary))
def TestCondCapture(n, *args):
del args
return math_ops.to_float(n) + v < 10
with self.assertRaises(ValueError):
_ = functional_ops.While(
[1],
function.Defun(dtypes.int32)(TestCondCapture),
function.Defun(dtypes.int32, dtypes.float32)(TestUnary))
# pylint: enable=cell-var-from-loop
def _tfSum(self, use_gpu, rewrite_with_while):
with ops.Graph().as_default() as g:
with self.session(graph=g, use_gpu=use_gpu) as sess:
@function.Defun(dtypes.int32, dtypes.float32)
def Body(n, x):
return x + math_ops.to_float(n)
xs = [
# 1 + 2 + ... + 20
functional_ops.For(
1, 21, 1, [0.], Body, rewrite_with_while=rewrite_with_while)[0],
# 100 + 99 + ... + 1
functional_ops.For(
100, 0, -1, [0.], Body, rewrite_with_while=rewrite_with_while)
[0],
]
xvals = sess.run(xs)
self.assertAllEqual(210, xvals[0])
self.assertAllEqual(5050, xvals[1])
def testFor(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, False)
def testForWithWhile(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, True)
def testForWithWhileNaming(self):
g = ops.Graph()
with g.as_default():
@function.Defun(dtypes.int32, dtypes.float32, func_name="TestBody")
def TestBody(n, x):
return x + math_ops.to_float(n)
_ = functional_ops.For(
1, 21, 1, [0.], TestBody, rewrite_with_while=True)[0]
names = []
for func in g.as_graph_def().library.function:
names.append(func.signature.name)
self.assertTrue("TestBody" in names)
self.assertTrue("TestBody_Cond" in names)
self.assertTrue("TestBody_Body" in names)
def testForCapturedInputs(self):
v = variables.Variable(1.0)
@function.Defun(dtypes.int32)
def TestNullary(n):
v + math_ops.to_float(n) # pylint: disable=expression-not-assigned
@function.Defun(dtypes.int32, dtypes.float32)
def TestUnary(n, x):
return x + math_ops.to_float(n) + v
@function.Defun(dtypes.int32, dtypes.float32, dtypes.float32)
def TestBinary(n, x, x2):
return x + math_ops.to_float(n) + v, x2 + v
for rewrite_with_while in (True, False):
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu) as sess:
result_nullary = functional_ops.For(
1, 10, 1, [], TestNullary,
rewrite_with_while=rewrite_with_while)
result_unary = functional_ops.For(
1, 10, 1, [0.], TestUnary,
rewrite_with_while=rewrite_with_while)
result_binary = functional_ops.For(
1, 10, 1, [0., 0.], TestBinary,
rewrite_with_while=rewrite_with_while)
sess.run(variables.global_variables_initializer())
assert not result_nullary
# The nullary variant doesn't return anything so we can't easily run it.
# As a total hack, fetch the operation by name and run it.
sess.run(ops.get_default_graph().get_operation_by_name(
"While" if rewrite_with_while else "For"))
assert len(result_unary) == 1
self.assertEqual([54.0], sess.run(result_unary))
assert len(result_binary) == 2
self.assertEqual([54.0, 9.0], sess.run(result_binary))
def _tfMLP(self, xval, wsval, bsval, rewrite_with_while):
# On GPU, don't rewrite using a while loop.
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu):
@function.Defun(dtypes.int32, *[dtypes.float64] * 3)
def MLP(i, a, ws, bs):
a = math_ops.tanh(math_ops.matmul(a, ws[i, :]) + bs[i, :])
return a, ws, bs
ret = functional_ops.For(
0,
wsval.shape[0],
1, [xval, wsval, bsval],
MLP,
rewrite_with_while=rewrite_with_while)[0]
return ret.eval()
def _npMLP(self, xval, wsval, bsval):
for i in range(wsval.shape[0]):
xval = np.tanh(np.dot(xval, wsval[i, :]) + bsval[i, :])
return xval
def _testForMLP(self, rewrite_with_while):
# We construct a 5-layer Multi-Layer Perceptron network here.
# Each layer have the same number of hidden unites (3), and the
# activation function is tanh(). We feed the input (xval) with
# batch size 2.
xval = np.random.normal(size=(2, 3))
wsval = np.random.normal(size=(5, 3, 3))
bsval = np.random.normal(size=(5, 3))
np_ans = self._npMLP(xval, wsval, bsval)
tf_for_ans = self._tfMLP(xval, wsval, bsval, rewrite_with_while)
self.assertAllClose(np_ans, tf_for_ans)
def testForMLP(self):
self._testForMLP(False)
def testForMLPWhile(self):
self._testForMLP(True)
def testForError(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(i, v):
return math_ops.to_float(i) + v
@function.Defun(dtypes.int32, dtypes.float32)
def ReturnsTooManyArgs(unused_i, v):
return v, v
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a scalar"):
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Invalid start/limit/delta"):
functional_ops.For(0, 10, -1, [0.0], Foo)[0].eval()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"For loop body returned 2 arguments. Expected: 1"):
functional_ops.For(0, 10, 1, [0.0], ReturnsTooManyArgs)[0].eval()
def testGradient(self):
@function.Defun(dtypes.float32)
def Poly(x):
# y = 2x^3+3x^2+4x+8
return 2 * x * x * x + 3 * x * x + 4 * x + 8
@function.Defun(dtypes.float32)
def Grad(x):
# dy/dx = dy/dy * dy/dx = 1.0 * (6x^2+6x+4)
return functional_ops.Gradient([x, 1.0], Poly)[0]
with self.test_session(use_gpu=False) as sess:
a = constant_op.constant(0.)
avals = [Poly(a), Grad(a)]
b = constant_op.constant(1.)
bvals = [Poly(b), Grad(b)]
self.assertAllEqual(sess.run(avals), [8., 4.])
self.assertAllEqual(sess.run(bvals), [17., 16.])
# TODO(akshayka): Replace `function.Defun` with tf.contrib.eager.defun` in the
# below test cases.
class PartitionedCallTest(test.TestCase):
def testBasicSingleDevice(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/cpu:0"):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
def testBasicMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
# if x = 1, y = 2, ...
with ops.device("/cpu:0"):
# a:= 1 + 1 = 2
a = x + x
with ops.device("/cpu:1"):
# b:= 2 + 2 = 4
b = a + y
with ops.device("/cpu:2"):
# c:= 2 + 4 = 6
c = a + b
# a + b + c = 2 + 4 + 6 = 12
return a + b + c
with self.test_session(config=config):
output, = functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body)
self.assertEqual(output.eval(), 12.)
def testBasicMultiDeviceGPU(self):
if not test_util.is_gpu_available():
return
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/gpu:0"):
a = x + x
b = y + y
with ops.device("/cpu:0"):
c = a + b
return c
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
def testBasicNoDeviceAnnotations(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
def testShardsRunOnRequestedDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 4})
@function.Defun()
def Body():
# Serialize DT_RESOURCE handles as DT_STRINGs, which encode the device on
# which the resource was created, so that we can verify that ops were
# actually run on the requested devices.
#
# TODO(akshayka): Provide a cleaner, more idiomatic API for obtaining the
# name of the device on which a resource lives / for determining the
# device on which an op ran.
with ops.device("/cpu:0"):
s1 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:1"):
s2 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:2"):
s3 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
return s1, s2, s3
with self.test_session(config=config, use_gpu=True) as sess:
outputs = sess.run(functional_ops.partitioned_call(args=[], f=Body))
self.assertIn(compat.as_bytes("CPU:0"), outputs[0])
self.assertIn(compat.as_bytes("CPU:1"), outputs[1])
self.assertIn(compat.as_bytes("CPU:2"), outputs[2])
def testAssignAddResourceVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.Defun()
def AssignAdd():
v.assign_add(1.0)
op = functional_ops.partitioned_call(
args=AssignAdd.captured_inputs, f=AssignAdd)
_ = self.evaluate(variables.global_variables_initializer())
_ = self.evaluate(op)
value = self.evaluate(v.read_value())
self.assertEqual(value, 2.0)
def testFunctionWithResourcesOnDifferentDevices(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
with ops.device("/cpu:0"):
v_cpu_zero = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_zero")
with ops.device("/cpu:1"):
v_cpu_one = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_one")
with ops.device("/gpu:0"):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_gpu")
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, also_cpu_result, gpu_result
defined = function.Defun()(sum_gather)
with self.test_session(
config=config_pb2.ConfigProto(
allow_soft_placement=False,
log_device_placement=True,
device_count={"CPU": 2})) as sess:
sess.run(variables.global_variables_initializer())
expected = sess.run(sum_gather())
result = sess.run(
functional_ops.partitioned_call(
args=defined.captured_inputs, f=defined))
self.assertAllEqual(expected, result)
# Use an invalid executor name to test the plumbing of the executor_type attr.
def testExecutorTypeAttrExecutorNotFound(self):
@function.Defun(dtypes.int32)
def AddFive(x):
return x + 5
op = functional_ops.partitioned_call(
args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
f=AddFive,
executor_type="NON_EXISTENT_EXECUTOR")
with self.assertRaisesRegexp(errors.NotFoundError,
"NON_EXISTENT_EXECUTOR"):
self.evaluate(op)
if __name__ == "__main__":
test.main()
# pylint: enable=invalid-name
|
the-stack_106_15102
|
import json
import threading
import time
import os
import stat
import ssl
from decimal import Decimal
from typing import Union, Optional, Dict, Sequence, Tuple
from numbers import Real
from copy import deepcopy
from aiorpcx import NetAddress
from . import util
from . import constants
from .util import base_units, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, UnknownBaseUnit, DECIMAL_POINT_DEFAULT
from .util import format_satoshis, format_fee_satoshis
from .util import user_dir, make_dir, NoDynamicFeeEstimates, quantize_feerate
from .i18n import _
from .logging import get_logger, Logger
FEE_ETA_TARGETS = [25, 10, 5, 2]
FEE_DEPTH_TARGETS = [10_000_000, 5_000_000, 2_000_000, 1_000_000,
800_000, 600_000, 400_000, 250_000, 100_000]
FEE_LN_ETA_TARGET = 2 # note: make sure the network is asking for estimates for this target
# satoshi per kbyte
FEERATE_MAX_DYNAMIC = 1500000
FEERATE_WARNING_HIGH_FEE = 600000
FEERATE_FALLBACK_STATIC_FEE = 150000
FEERATE_DEFAULT_RELAY = 1000
FEERATE_MAX_RELAY = 50000
FEERATE_STATIC_VALUES = [1000, 2000, 5000, 10000, 20000, 30000,
50000, 70000, 100000, 150000, 200000, 300000]
FEERATE_REGTEST_HARDCODED = 180000 # for eclair compat
# The min feerate_per_kw that can be used in lightning so that
# the resulting onchain tx pays the min relay fee.
# This would be FEERATE_DEFAULT_RELAY / 4 if not for rounding errors,
# see https://github.com/ElementsProject/lightning/commit/2e687b9b352c9092b5e8bd4a688916ac50b44af0
FEERATE_PER_KW_MIN_RELAY_LIGHTNING = 253
FEE_RATIO_HIGH_WARNING = 0.05 # warn user if fee/amount for on-chain tx is higher than this
_logger = get_logger(__name__)
FINAL_CONFIG_VERSION = 3
class SimpleConfig(Logger):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are two different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
They are taken in order (1. overrides config options set in 2.)
"""
def __init__(self, options=None, read_user_config_function=None,
read_user_dir_function=None):
if options is None:
options = {}
Logger.__init__(self)
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.mempool_fees = None # type: Optional[Sequence[Tuple[Union[float, int], int]]]
self.fee_estimates = {} # type: Dict[int, int]
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# don't allow to be set on CLI:
self.cmdline_options.pop('config_version', None)
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
if not self.user_config:
# avoid new config getting upgraded
self.user_config = {'config_version': FINAL_CONFIG_VERSION}
self._not_modifiable_keys = set()
# config "upgrade" - CLI options
self.rename_config_keys(
self.cmdline_options, {'auto_cycle': 'auto_connect'}, True)
# config upgrade - user config
if self.requires_upgrade():
self.upgrade()
self._check_dependent_keys()
# units and formatting
self.decimal_point = self.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(self.get('num_zeros', 0))
self.amt_precision_post_satoshi = int(self.get('amt_precision_post_satoshi', 0))
self.amt_add_thousands_sep = bool(self.get('amt_add_thousands_sep', False))
def electrum_path(self):
# Read electrum_path from command line
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
make_dir(path, allow_symlink=False)
if self.get('testnet'):
path = os.path.join(path, 'testnet')
make_dir(path, allow_symlink=False)
elif self.get('regtest'):
path = os.path.join(path, 'regtest')
make_dir(path, allow_symlink=False)
elif self.get('simnet'):
path = os.path.join(path, 'simnet')
make_dir(path, allow_symlink=False)
elif self.get('signet'):
path = os.path.join(path, 'signet')
make_dir(path, allow_symlink=False)
self.logger.info(f"electrum directory {path}")
return path
def rename_config_keys(self, config, keypairs, deprecation_warning=False):
"""Migrate old key names to new ones"""
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if new_key not in config:
config[new_key] = config[old_key]
if deprecation_warning:
self.logger.warning('Note that the {} variable has been deprecated. '
'You should use {} instead.'.format(old_key, new_key))
del config[old_key]
updated = True
return updated
def set_key(self, key, value, save=True):
if not self.is_modifiable(key):
self.logger.warning(f"not changing config key '{key}' set on the command line")
return
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return
self._set_key_in_user_config(key, value, save)
def _set_key_in_user_config(self, key, value, save=True):
with self.lock:
if value is not None:
self.user_config[key] = value
else:
self.user_config.pop(key, None)
if save:
self.save_user_config()
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key, default)
return out
def _check_dependent_keys(self) -> None:
if self.get('serverfingerprint'):
if not self.get('server'):
raise Exception("config key 'serverfingerprint' requires 'server' to also be set")
self.make_key_not_modifiable('server')
def requires_upgrade(self):
return self.get_config_version() < FINAL_CONFIG_VERSION
def upgrade(self):
with self.lock:
self.logger.info('upgrading config')
self.convert_version_2()
self.convert_version_3()
self.set_key('config_version', FINAL_CONFIG_VERSION, save=True)
def convert_version_2(self):
if not self._is_upgrade_method_needed(1, 1):
return
self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'})
try:
# change server string FROM host:port:proto TO host:port:s
server_str = self.user_config.get('server')
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in ('s', 't')
int(port) # Throw if cannot be converted to int
server_str = '{}:{}:s'.format(host, port)
self._set_key_in_user_config('server', server_str)
except BaseException:
self._set_key_in_user_config('server', None)
self.set_key('config_version', 2)
def convert_version_3(self):
if not self._is_upgrade_method_needed(2, 2):
return
base_unit = self.user_config.get('base_unit')
if isinstance(base_unit, str):
self._set_key_in_user_config('base_unit', None)
map_ = {'part':8, 'mpart':5, 'upart':2, 'bits':2, 'sat':0}
decimal_point = map_.get(base_unit.lower())
self._set_key_in_user_config('decimal_point', decimal_point)
self.set_key('config_version', 3)
def _is_upgrade_method_needed(self, min_version, max_version):
cur_version = self.get_config_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise Exception(
('config upgrade: unexpected version %d (should be %d-%d)'
% (cur_version, min_version, max_version)))
else:
return True
def get_config_version(self):
config_version = self.get('config_version', 1)
if config_version > FINAL_CONFIG_VERSION:
self.logger.warning('config version ({}) is higher than latest ({})'
.format(config_version, FINAL_CONFIG_VERSION))
return config_version
def is_modifiable(self, key) -> bool:
return (key not in self.cmdline_options
and key not in self._not_modifiable_keys)
def make_key_not_modifiable(self, key) -> None:
self._not_modifiable_keys.add(key)
def save_user_config(self):
if self.get('forget_config'):
return
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
except FileNotFoundError:
# datadir probably deleted while running...
if os.path.exists(self.path): # or maybe not?
raise
def get_backup_dir(self):
# this is used to save wallet file backups (without active lightning channels)
# on Android, the export backup button uses android_backup_dir()
if 'ANDROID_DATA' in os.environ:
return None
else:
return self.get('backup_dir')
def get_wallet_path(self, *, use_gui_last_wallet=False):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd', ''), self.get('wallet_path'))
if use_gui_last_wallet:
path = self.get('gui_last_wallet')
if path and os.path.exists(path):
return path
new_path = self.get_fallback_wallet_path()
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def get_fallback_wallet_path(self):
util.assert_datadir_available(self.path)
dirpath = os.path.join(self.path, "wallets")
make_dir(dirpath, allow_symlink=False)
path = os.path.join(self.path, "wallets", "default_wallet")
return path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.logger.info(f"session timeout -> {seconds} seconds")
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def impose_hard_limits_on_fee(func):
def get_fee_within_limits(self, *args, **kwargs):
fee = func(self, *args, **kwargs)
if fee is None:
return fee
fee = min(FEERATE_MAX_DYNAMIC, fee)
fee = max(FEERATE_DEFAULT_RELAY, fee)
return fee
return get_fee_within_limits
def eta_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))
if slider_pos < len(FEE_ETA_TARGETS):
num_blocks = FEE_ETA_TARGETS[int(slider_pos)]
fee = self.eta_target_to_fee(num_blocks)
else:
fee = self.eta_target_to_fee(1)
return fee
@impose_hard_limits_on_fee
def eta_target_to_fee(self, num_blocks: int) -> Optional[int]:
"""Returns fee in sat/kbyte."""
if num_blocks == 1:
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee / 2
fee = int(fee)
else:
fee = self.fee_estimates.get(num_blocks)
if fee is not None:
fee = int(fee)
return fee
def fee_to_depth(self, target_fee: Real) -> Optional[int]:
"""For a given sat/vbyte fee, returns an estimate of how deep
it would be in the current mempool in vbytes.
Pessimistic == overestimates the depth.
"""
if self.mempool_fees is None:
return None
depth = 0
for fee, s in self.mempool_fees:
depth += s
if fee <= target_fee:
break
return depth
def depth_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
target = self.depth_target(slider_pos)
return self.depth_target_to_fee(target)
@impose_hard_limits_on_fee
def depth_target_to_fee(self, target: int) -> Optional[int]:
"""Returns fee in sat/kbyte.
target: desired mempool depth in vbytes
"""
if self.mempool_fees is None:
return None
depth = 0
for fee, s in self.mempool_fees:
depth += s
if depth > target:
break
else:
return 0
# add one sat/byte as currently that is the max precision of the histogram
# note: precision depends on server.
# old ElectrumX <1.16 has 1 s/b prec, >=1.16 has 0.1 s/b prec.
# electrs seems to use untruncated double-precision floating points.
# # TODO decrease this to 0.1 s/b next time we bump the required protocol version
fee += 1
# convert to sat/kbyte
return int(fee * 1000)
def depth_target(self, slider_pos: int) -> int:
"""Returns mempool depth target in bytes for a fee slider position."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1)
return FEE_DEPTH_TARGETS[slider_pos]
def eta_target(self, slider_pos: int) -> int:
"""Returns 'num blocks' ETA target for a fee slider position."""
if slider_pos == len(FEE_ETA_TARGETS):
return 1
return FEE_ETA_TARGETS[slider_pos]
def fee_to_eta(self, fee_per_kb: Optional[int]) -> int:
"""Returns 'num blocks' ETA estimate for given fee rate,
or -1 for low fee.
"""
import operator
lst = list(self.fee_estimates.items())
next_block_fee = self.eta_target_to_fee(1)
if next_block_fee is not None:
lst += [(1, next_block_fee)]
if not lst or fee_per_kb is None:
return -1
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), lst)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(FEE_ETA_TARGETS[0])/2:
min_target = -1
return min_target
def depth_tooltip(self, depth: Optional[int]) -> str:
"""Returns text tooltip for given mempool depth (in vbytes)."""
if depth is None:
return "unknown from tip"
return "%.1f MB from tip" % (depth/1_000_000)
def eta_tooltip(self, x):
if x < 0:
return _('Low fee')
elif x == 1:
return _('In the next block')
else:
return _('Within {} blocks').format(x)
def get_fee_target(self):
dyn = self.is_dynfee()
mempool = self.use_mempool_fees()
pos = self.get_depth_level() if mempool else self.get_fee_level()
fee_rate = self.fee_per_kb()
target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate)
return target, tooltip, dyn
def get_fee_status(self):
target, tooltip, dyn = self.get_fee_target()
return tooltip + ' [%s]'%target if dyn else target + ' [Static]'
def get_fee_text(
self,
slider_pos: int,
dyn: bool,
mempool: bool,
fee_per_kb: Optional[int],
):
"""Returns (text, tooltip) where
text is what we target: static fee / num blocks to confirm in / mempool depth
tooltip is the corresponding estimate (e.g. num blocks for a static fee)
fee_rate is in sat/kbyte
"""
if fee_per_kb is None:
rate_str = 'unknown'
fee_per_byte = None
else:
fee_per_byte = fee_per_kb/1000
rate_str = format_fee_satoshis(fee_per_byte) + ' sat/byte'
if dyn:
if mempool:
depth = self.depth_target(slider_pos)
text = self.depth_tooltip(depth)
else:
eta = self.eta_target(slider_pos)
text = self.eta_tooltip(eta)
tooltip = rate_str
else: # using static fees
assert fee_per_kb is not None
assert fee_per_byte is not None
text = rate_str
if mempool and self.has_fee_mempool():
depth = self.fee_to_depth(fee_per_byte)
tooltip = self.depth_tooltip(depth)
elif not mempool and self.has_fee_etas():
eta = self.fee_to_eta(fee_per_kb)
tooltip = self.eta_tooltip(eta)
else:
tooltip = ''
return text, tooltip
def get_depth_level(self):
maxp = len(FEE_DEPTH_TARGETS) - 1
return min(maxp, self.get('depth_level', 2))
def get_fee_level(self):
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
return min(maxp, self.get('fee_level', 2))
def get_fee_slider(self, dyn, mempool) -> Tuple[int, int, Optional[int]]:
if dyn:
if mempool:
pos = self.get_depth_level()
maxp = len(FEE_DEPTH_TARGETS) - 1
fee_rate = self.depth_to_fee(pos)
else:
pos = self.get_fee_level()
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
fee_rate = self.eta_to_fee(pos)
else:
fee_rate = self.fee_per_kb(dyn=False)
pos = self.static_fee_index(fee_rate)
maxp = len(FEERATE_STATIC_VALUES) - 1
return maxp, pos, fee_rate
def static_fee(self, i):
return FEERATE_STATIC_VALUES[i]
def static_fee_index(self, fee_per_kb: Optional[int]) -> int:
if fee_per_kb is None:
raise TypeError('static fee cannot be None')
dist = list(map(lambda x: abs(x - fee_per_kb), FEERATE_STATIC_VALUES))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_etas(self):
return len(self.fee_estimates) == 4
def has_fee_mempool(self) -> bool:
return self.mempool_fees is not None
def has_dynamic_fees_ready(self):
if self.use_mempool_fees():
return self.has_fee_mempool()
else:
return self.has_fee_etas()
def is_dynfee(self):
return bool(self.get('dynamic_fees', True))
def use_mempool_fees(self):
return bool(self.get('mempool_fees', False))
def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool,
mempool: bool) -> Union[int, None]:
fee_level = max(fee_level, 0)
fee_level = min(fee_level, 1)
if dyn:
max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS)
slider_pos = round(fee_level * max_pos)
fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos)
else:
max_pos = len(FEERATE_STATIC_VALUES) - 1
slider_pos = round(fee_level * max_pos)
fee_rate = FEERATE_STATIC_VALUES[slider_pos]
return fee_rate
def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Optional[int]:
"""Returns sat/kvB fee to pay for a txn.
Note: might return None.
fee_level: float between 0.0 and 1.0, representing fee slider position
"""
if constants.net is constants.ParticlRegtest:
return FEERATE_REGTEST_HARDCODED
if dyn is None:
dyn = self.is_dynfee()
if mempool is None:
mempool = self.use_mempool_fees()
if fee_level is not None:
return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool)
# there is no fee_level specified; will use config.
# note: 'depth_level' and 'fee_level' in config are integer slider positions,
# unlike fee_level here, which (when given) is a float in [0.0, 1.0]
if dyn:
if mempool:
fee_rate = self.depth_to_fee(self.get_depth_level())
else:
fee_rate = self.eta_to_fee(self.get_fee_level())
else:
fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE)
if fee_rate is not None:
fee_rate = int(fee_rate)
return fee_rate
def fee_per_byte(self):
"""Returns sat/vB fee to pay for a txn.
Note: might return None.
"""
fee_per_kb = self.fee_per_kb()
return fee_per_kb / 1000 if fee_per_kb is not None else None
def estimate_fee(self, size: Union[int, float, Decimal], *,
allow_fallback_to_static_rates: bool = False) -> int:
fee_per_kb = self.fee_per_kb()
if fee_per_kb is None:
if allow_fallback_to_static_rates:
fee_per_kb = FEERATE_FALLBACK_STATIC_FEE
else:
raise NoDynamicFeeEstimates()
return self.estimate_fee_for_feerate(fee_per_kb, size)
@classmethod
def estimate_fee_for_feerate(cls, fee_per_kb: Union[int, float, Decimal],
size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
fee_per_kb = Decimal(fee_per_kb)
fee_per_byte = fee_per_kb / 1000
# to be consistent with what is displayed in the GUI,
# the calculation needs to use the same precision:
fee_per_byte = quantize_feerate(fee_per_byte)
return round(fee_per_byte * size)
def update_fee_estimates(self, nblock_target: int, fee_per_kb: int):
assert isinstance(nblock_target, int), f"expected int, got {nblock_target!r}"
assert isinstance(fee_per_kb, int), f"expected int, got {fee_per_kb!r}"
self.fee_estimates[nblock_target] = fee_per_kb
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
return now - self.last_time_fee_estimates_requested > 60
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def get_ssl_context(self):
ssl_keyfile = self.get('ssl_keyfile')
ssl_certfile = self.get('ssl_certfile')
if ssl_keyfile and ssl_certfile:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(ssl_certfile, ssl_keyfile)
return ssl_context
def get_ssl_domain(self):
from .paymentrequest import check_ssl_config
if self.get('ssl_keyfile') and self.get('ssl_certfile'):
SSL_identity = check_ssl_config(self)
else:
SSL_identity = None
return SSL_identity
def get_netaddress(self, key: str) -> Optional[NetAddress]:
text = self.get(key)
if text:
try:
return NetAddress.from_string(text)
except:
pass
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=self.num_zeros,
decimal_point=self.decimal_point,
is_diff=is_diff,
whitespaces=whitespaces,
precision=self.amt_precision_post_satoshi,
add_thousands_sep=self.amt_add_thousands_sep,
)
def format_amount_and_units(self, amount):
return self.format_amount(amount) + ' '+ self.get_base_unit()
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def set_base_unit(self, unit):
assert unit in base_units.keys()
self.decimal_point = base_unit_name_to_decimal_point(unit)
self.set_key('decimal_point', self.decimal_point, True)
def get_decimal_point(self):
return self.decimal_point
def read_user_config(path):
"""Parse and store the user config settings in electrum.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r", encoding='utf-8') as f:
data = f.read()
result = json.loads(data)
except:
_logger.warning(f"Cannot read config file. {config_path}")
return {}
if not type(result) is dict:
return {}
return result
|
the-stack_106_15107
|
"""
Import COUNCIL
"""
import sys
from django.contrib.gis.geos import Point, GEOSGeometry
from data_collection.management.commands import BaseCsvStationsKmlDistrictsImporter
class Command(BaseCsvStationsKmlDistrictsImporter):
"""
Imports the Polling Station data from COUNCIL
"""
council_id = "COUNCIL_ID"
districts_name = "DISTRICT_FILE.kmz"
stations_name = "STATION_FILE.csv"
"""
List of Election IDs the data imported by this script relates to
https://democracyclub.org.uk/projects/election-ids/
"""
elections = []
# we must always implement station_record_to_dict()
def station_record_to_dict(self, record):
print("Station: ", record)
sys.exit(1)
try:
location = Point(
int(record.point_x), int(record.point_y), srid=self.get_srid()
)
except ValueError:
location = Point(
float(record.point_x), float(record.point_y), srid=self.get_srid()
)
return {
"internal_council_id": record.polling_di,
"postcode": "(no postcode)",
"address": "\n".join([record.building, record.road, record.town_villa]),
"location": location,
}
# sometimes it may also be necessary to override district_record_to_dict()
def district_record_to_dict(self, record):
print("District: ", record)
sys.exit(1)
geojson = self.strip_z_values(record.geom.geojson)
poly = self.clean_poly(GEOSGeometry(geojson, srid=self.get_srid("districts")))
return {
"internal_council_id": record["Name"].value,
"name": record["Name"].value,
"area": poly,
}
|
the-stack_106_15110
|
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
# from __future__ import (absolute_import, division)
from copy import deepcopy
from math import log, exp, sqrt
import sys
import numpy as np
from numpy import eye, zeros, dot, isscalar, outer, empty
from .stats import logpdf
from warnings import warn
from numba import njit
@njit(cache=True)
def cross_variance(Wc, x, z, sigmas_f, sigmas_h):
"""
Compute cross variance of the state `x` and measurement `z`.
"""
Pxz = zeros((sigmas_f.shape[1], sigmas_h.shape[1]))
N = sigmas_f.shape[0]
for i in range(N):
dx = sigmas_f[i] - x
dz = sigmas_h[i] - z
Pxz += Wc[i] * outer(dx, dz)
return Pxz
@njit(cache=True)
def update(z, P, x, Wc, Wm, sigmas_f, sigmas_h):
"""
Update the UKF with the given measurements. On return,
x and P contain the new mean and covariance of the filter.
"""
# mean and covariance of prediction passed through unscented transform
zp, S = unscented_transform(sigmas_h, Wm, Wc)
# compute cross variance of the state and the measurements
Pxz = cross_variance(Wc, x, zp, sigmas_f, sigmas_h)
# Pxz = Pxz[-5:]
y = z - zp # residual
SI = np.linalg.pinv(S)
K = Pxz @ SI # Kalman gain
# update Gaussian state estimate (x, P)
x += K @ y
P -= K @ S @ K.T
return x, P
@njit(cache=True)
def unscented_transform(sigmas, Wm, Wc, noise_cov=0):
r"""
Computes unscented transform of a set of sigma points and weights.
"""
x = Wm @ sigmas
# new covariance is the sum of the outer product of the residuals times the weights
y = sigmas - x.reshape(1, -1)
P = y.T*Wc @ y
P += noise_cov
return (x, P)
class UnscentedKalmanFilter(object):
def __init__(self, dim_x, dim_z, hx, fx, points, instant_warning=False):
"""
Create a Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will
not give you a functional filter.
"""
self.x = empty(dim_x)
self.P = eye(dim_x)
self.Q = eye(dim_x)
self._dim_x = dim_x
self._dim_z = dim_z
self.points_fn = points
self.hx = hx
self.fx = fx
self.instant_warning = instant_warning
self._dim_sig = 0
self.flag = False
def predict(self, fx=None, **fx_args):
r"""
Performs the predict step of the UKF. On return, self.x and
self.P contain the predicted state (x) and covariance (P). '
Important: this MUST be called before update() is called for the first
time.
Parameters
----------
fx : callable f(x, **fx_args), optional
State transition function. If not provided, the default
function passed in during construction will be used.
**fx_args : keyword arguments
optional keyword arguments to be passed into f(x).
"""
# calculate sigma points for given mean and covariance
self.compute_process_sigmas(fx, **fx_args)
# and pass sigmas through the unscented transform to compute prior
self.x, self.P = unscented_transform(
self.sigmas_f, self.Wm, self.Wc, self.Q)
def compute_process_sigmas(self, fx=None, **fx_args):
"""
computes the values of sigmas_f. Normally a user would not call
this, but it is useful if you need to call update more than once
between calls to predict (to update for multiple simultaneous
measurements), so the sigmas correctly reflect the updated state
x, P.
"""
if fx is None:
fx = self.fx
sigmas, dim_sig = self.points_fn.sigma_points(self.x, self.P)
if dim_sig is not self._dim_sig:
self.Wc, self.Wm = self.points_fn.compute_weights(dim_sig)
self._dim_sig = dim_sig
if not hasattr(self, 'sigmas_f'):
self.sigmas_f = empty((sigmas.shape[0], self._dim_x))
self.sigmas_h = empty((sigmas.shape[0], self._dim_z))
elif self.sigmas_f.shape[0] != sigmas.shape[0]:
self.sigmas_f = empty((sigmas.shape[0], self._dim_x))
self.sigmas_h = empty((sigmas.shape[0], self._dim_z))
for i, s in enumerate(sigmas):
x, flag = fx(s, **fx_args)
self.sigmas_f[i] = x
self.sigmas_h[i] = self.hx(x)
if flag:
if self.flag and self.flag is not flag:
self.flag = 3
else:
self.flag = flag
def batch_filter(self, zs, Rs=None):
"""
Performs the UKF filter over the list of measurement in `zs`.
"""
try:
z = zs[0]
except TypeError:
raise TypeError('zs must be list-like')
if self._dim_z == 1:
if not(isscalar(z) or (z.ndim == 1 and len(z) == 1)):
raise TypeError(
'zs must be a list of scalars or 1D, 1 element arrays')
else:
if len(z) != self._dim_z:
raise TypeError(
'each element in zs must be a 1D array of length {}'.format(self._dim_z))
# necessary to re-initialize?
self.x = np.zeros(self._dim_x)
# self.P = eye(self._dim_x)
z_n = np.size(zs, 0)
# mean estimates from Kalman Filter
means = empty((z_n, self._dim_x))
# state covariances from Kalman Filter
covariances = empty((z_n, self._dim_x, self._dim_x))
ll = 0
for i, z in enumerate(zs):
self.predict()
self.x, self.P, S, y = update(
z, self.P, self.x, self.Wc, self.Wm, self.sigmas_f, self.sigmas_h)
means[i, :] = self.x
covariances[i, :, :] = self.P
ll += logpdf(x=y, cov=S)
if self.flag:
warn('Error in transition function during filtering. Code '+str(self.flag))
return (means, covariances, ll)
def rts_smoother(self, Xs, Ps, Qs=None):
"""
Runs the Rauch-Tung-Striebal Kalman smoother on a set of
means and covariances computed by the UKF. The usual input
would come from the output of `batch_filter()`.
"""
if len(Xs) != len(Ps):
raise ValueError('Xs and Ps must have the same length')
n, dim_x = Xs.shape
self._dim_sig = 0
if Qs is None:
Qs = [self.Q] * n
# smoother gain
Ks = empty((n, dim_x, dim_x))
xs, ps = Xs.copy(), Ps.copy()
for k in reversed(range(n-1)):
# create sigma points from state estimate, pass through state func
# sigmas, Wc, Wm = self.points_fn.sigma_points(xs[k], ps[k])
sigmas, dim_sig = self.points_fn.sigma_points(xs[k], ps[k])
if dim_sig is not self._dim_sig:
Wc, Wm = self.points_fn.compute_weights(dim_sig)
self._dim_sig = dim_sig
num_sigmas = sigmas.shape[0]
sigmas_f = empty((num_sigmas, dim_x))
for i in range(num_sigmas):
sigmas_f[i], flag = self.fx(sigmas[i])
if flag:
if self.instant_warning:
warn(
'Errors in transition function during smoothing. Code '+str(flag))
self.flag = flag
if self.flag is not self.flag:
self.flag = 3
xb, Pb = unscented_transform(sigmas_f, Wm, Wc, self.Q)
Pxb = cross_variance(Wc, Xs[k], xb, sigmas, sigmas_f)
# compute gain
K = Pxb @ np.linalg.pinv(Pb)
# update the smoothed estimates
y = xs[k+1] - xb
xs[k] += K @ y
ps[k] += K @ (ps[k+1] - Pb) @ K.T
Ks[k] = K
if self.flag:
warn('Errors in transition function during smoothing. Code '+str(self.flag))
return (xs, ps, Ks)
|
the-stack_106_15111
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('job', '0008_jobtype_trigger_rule'),
]
operations = [
migrations.AddField(
model_name='jobtype',
name='revision_num',
field=models.IntegerField(default=1),
preserve_default=True,
),
]
|
the-stack_106_15112
|
"""
A python program to implement Minimum Falling Path Sum
Given an n x n array of integers matrix, return the minimum sum of any falling path through matrix.
A falling path starts at any element in the first row and chooses the element in the next row that is either directly below or diagonally left/right. Specifically, the next element from position (row, col) will be (row + 1, col - 1), (row + 1, col), or (row + 1, col + 1).
"""
class Solution(object):
def minFallingPathSum(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
def children(row, col):
if row < rows - 1:
yield row + 1, col
if col > 0: yield row + 1, col - 1
if col < cols - 1: yield row + 1, col + 1
def search(row, col):
if (row, col) in mem: return mem[row, col]
mem[row, col] = A[row][col]
childs = list(search(r, c) for r, c in children(row, col))
if childs: mem[row, col] += min(childs)
return mem[row, col]
if not A: return 0
rows, cols = len(A), len(A[0])
mem = {}
return min(search(0, c) for c in range(cols))
"""
Sample Input/Output:
Example 1:
Input: matrix = [[2,1,3],[6,5,4],[7,8,9]]
Output: 13
Example 2:
Input: matrix = [[-19,57],[-40,-5]]
Output: -59
"""
|
the-stack_106_15115
|
import time
import joblib
import numpy as np
from sklearn.base import clone
from .base import Explainer
from .parsers import util
class DShap(Explainer):
"""
Explainer that approx. data Shapley values using
the TMC-Shapley algorithm.
Local-Influence Semantics
- Inf.(x_i, x_t) = Avg. L(y_i, f_{w/o x_i}(x_t)) - L(y_i, f(x_t))
over all possible permutations of the training data.
- Pos. value means a decrease in test loss (a.k.a. proponent, helpful).
- Neg. value means an increase in test loss (a.k.a. opponent, harmful).
Reference
- https://github.com/amiratag/DataShapley
Paper
- http://proceedings.mlr.press/v97/ghorbani19c.html
Note
- Supports both GBDTs and RFs.
- No validation set, we are computing loss on training or ONE test example;
thus, there is no average loss score and use of a `tolerance` parameter
for early truncation.
* However, we can use a hard truncation limit via `trunc_frac`.
"""
def __init__(self, trunc_frac=0.25, n_jobs=1,
check_every=100, random_state=1, logger=None):
"""
Input
trunc_frac: float, fraction of instances to compute marginals for per iter.
n_jobs: int, no. iterations / processes to run in parallel.
check_every: int, no. iterations to run between checking convergence.
random_state: int, random seed to enhance reproducibility.
logger: object, If not None, output to logger.
"""
self.trunc_frac = trunc_frac
self.n_jobs = n_jobs
self.check_every = check_every
self.random_state = random_state
self.logger = logger
def fit(self, model, X, y):
"""
- Convert model to internal standardized tree structures.
- Perform any initialization necessary for the chosen method.
Input
model: tree ensemble.
X: 2d array of train data.
y: 1d array of train targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.original_model_ = model
self.objective_ = self.model_.objective
self.n_class_ = self.model_.n_class_
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.loss_fn_ = util.get_loss_fn(self.objective_, self.n_class_, self.model_.factor)
self.random_loss_ = self._get_random_loss()
return self
def get_local_influence(self, X, y):
"""
- Compute influence of each training instance on the test loss.
Input
X: 2d array of test examples.
y: 1d array of test targets.
Return
- 2d array of shape=(no. train, X.shape[0]).
* Arrays are returned in the same order as the training data.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
return self._run_tmc_shapley(X_test=X, y_test=y, inf='local')
# private
def _run_tmc_shapley(self, X_test=None, y_test=None, batch=False, inf='global', stability_tol=0.1):
"""
- Run the TMC-Shapley algorithm until marginal contributions converge.
Return
- 2d array of average marginals, shape=(no. train, 1 or X_test.shape[0]).
* Arrays are returned in the same order as the traing data.
"""
# extract parameters
original_model = self.original_model_
X_train = self.X_train_
y_train = self.y_train_
loss_fn = self.loss_fn_
random_loss = self.random_loss_
truncation_frac = self.trunc_frac
objective = self.objective_
n_class = self.n_class_
random_state = self.random_state
check_every = self.check_every
# select no. processes to run in parallel
if self.n_jobs == -1:
n_jobs = joblib.cpu_count()
else:
assert self.n_jobs >= 1
n_jobs = min(self.n_jobs, joblib.cpu_count())
start = time.time()
if self.logger:
self.logger.info('\n[INFO] computing approx. data Shapley values...')
self.logger.info(f'[INFO] no. cpus: {n_jobs:,}...')
# run TMC-Shapley alg. until convergence
with joblib.Parallel(n_jobs=n_jobs) as parallel:
# result container
if inf == 'local':
marginals = np.zeros((0, self.X_train_.shape[0], X_test.shape[0]), dtype=util.dtype_t)
result = np.zeros((self.X_train_.shape[0], X_test.shape[0]), dtype=util.dtype_t)
stable = np.zeros(X_test.shape[0], dtype=util.dtype_t)
else:
assert inf == 'global'
marginals = np.zeros((0, self.X_train_.shape[0], 1), dtype=util.dtype_t) # shape=(no. train, 1)
result = np.zeros((self.X_train_.shape[0], 1), dtype=util.dtype_t)
stable = np.zeros(1, dtype=util.dtype_t)
iteration = 0
while True:
# shape=(check_every, no. train, 1 or no. test)
results = parallel(joblib.delayed(_run_iteration)
(original_model, X_train, y_train, loss_fn,
random_loss, truncation_frac, objective, n_class,
random_state, iteration, i, X_test, y_test,
batch, inf) for i in range(check_every))
iteration += check_every
# synchronization barrier
marginals = np.vstack([marginals, results]) # shape=(check_every + (1), no. train, 1 or X.shape[0])
# check convergence
# - add up all marginals using axis=0, then divide by their iteration
# - diff. between last `check_every` runs and last run, divide by last run, average over all points
errors = np.zeros(marginals.shape[2], dtype=util.dtype_t) # shape=(X.shape[0],)
for i in range(marginals.shape[2]):
divisor = np.arange(1, iteration + 1)[-check_every:].reshape(-1, 1) # shape=(iteration, 1)
v = (np.cumsum(marginals[:, :, i], axis=0)[-check_every:] / divisor) # (check_every, no. train)
errors[i] = np.max(np.mean(np.abs(v - v[-1:]) / (np.abs(v[-1:]) + 1e-12), axis=1))
if self.logger:
cum_time = time.time() - start
self.logger.info(f'[INFO] Iter. {iteration:,}, stability: {errors}, cum. time: {cum_time:.3f}s')
# save last cum. sum of marginals without saving entire history
marginals = np.cumsum(marginals, axis=0)[-1:]
# marginals have converged
idxs = np.where(errors < stability_tol)[0] # shape=(1 or X_test.shape[0],)
if len(idxs) > 0:
stable[idxs] = 1.0
# update results
influence = marginals[-1] / iteration
result[:, idxs] = influence[:, idxs] # shape=(len(idxs), 1 or X_test.shape[0])
if np.all(stable):
break
return result
def _get_random_loss(self):
"""
Input
X: 2d array of data.
y: 1d array of targets.
Return 1d array of losses resulting from a random guess; shape=(X.shape[0],)
"""
if self.model_.objective == 'regression':
loss = 0
elif self.model_.objective == 'binary':
loss = -np.log(0.5)
else:
assert self.model_.objective == 'multiclass'
loss = -np.log(1.0 / self.model_.n_class_)
return loss
def _run_iteration(original_model, X_train, y_train, loss_fn, random_loss,
truncation_frac, objective, n_class, finished_iterations,
cur_iter, random_state, X_test=None, y_test=None, batch=False, inf='global'):
"""
- Run one iteration of the TMC-Shapley algorithm.
Return
- 1d array of marginals, shape=(no. train, 1) if global influence,
otherwise shape=(no. train, X_test.shape[0]).
Note
- Parallelizable method.
"""
rng = np.random.default_rng(random_state + finished_iterations + cur_iter)
# get order of training examples to add
train_idxs = rng.permutation(y_train.shape[0]) # shape=(no. train,)
train_idxs = train_idxs[:int(len(train_idxs) * truncation_frac)] # truncate examples
# result container
if inf == 'local':
marginals = np.zeros((X_train.shape[0], X_test.shape[0]), dtype=util.dtype_t)
else: # global influence
marginals = np.zeros((X_train.shape[0], 1), dtype=util.dtype_t) # shape=(no. train, 1)
# empty containers
X_batch = np.zeros((0,) + (X_train.shape[1],), dtype=util.dtype_t) # shape=(0, no. feature)
y_batch = np.zeros(0, dtype=np.int32) # shape=(0,)
old_loss = random_loss # tracker
old_model = None
# add training examples one at a time to measure the effect of each one
for train_idx in train_idxs:
# add example to batch of examples
X_batch = np.vstack([X_batch, X_train[train_idx].reshape(1, -1)])
y_batch = np.concatenate([y_batch, y_train[train_idx].reshape(1)])
# skip batches that do not have enough examples
if objective == 'regression' and X_batch.shape[0] < 2:
continue
elif objective == 'binary' and len(np.unique(y_batch)) < 2:
continue
elif objective == 'multiclass' and len(np.unique(y_batch)) < n_class:
continue
# train and score
model = clone(original_model).fit(X_batch, y_batch)
# local influence
if inf == 'local':
loss = _get_loss(loss_fn, model, objective, X=X_test, y=y_test) # shape=(X_test.shape[0],)
marginals[train_idx, :] = old_loss - loss # loss(x_t) w/o x_i - loss(x_t) w/ x_i
old_loss = loss
# global influence
elif inf == 'global' and X_test is not None and batch:
loss = _get_loss(loss_fn, model, objective, X=X_test, y=y_test, batch=batch)
marginals[train_idx, 0] = old_loss - loss # loss(X_test) w/o x_i - loss(X_test) w/ x_i
old_loss = loss
# self influence
else:
assert inf == 'global' and not batch
X_temp = X_train[[train_idx]]
y_temp = y_train[[train_idx]]
if old_model is None:
old_loss = random_loss
else:
old_loss = _get_loss(loss_fn, old_model, objective, X=X_temp, y=y_temp)
loss = _get_loss(loss_fn, model, objective, X=X_temp, y=y_temp)[0]
marginals[train_idx, 0] = old_loss - loss # loss(x_i) w/o x_i - loss(x_i) w/ x_i
old_model = model
return marginals
def _get_loss(loss_fn, model, objective, X, y, batch=False):
"""
Return
- 1d array of individual losses of shape=(X.shape[0],).
Note
- Parallelizable method.
"""
if objective == 'regression':
y_pred = model.predict(X) # shape=(X.shape[0])
elif objective == 'binary':
y_pred = model.predict_proba(X)[:, 1] # 1d arry of pos. probabilities
else:
assert objective == 'multiclass'
y_pred = model.predict_proba(X) # shape=(X.shape[0], no. class)
result = loss_fn(y, y_pred, raw=False, batch=batch) # shape=(X.shape[0],) or single float
return result
|
the-stack_106_15116
|
from mastodon import Mastodon
import json
from login import login
from out_json import jsoner
from pprint import pprint
import time
'''
return_typeはjson or list
defaultはlistで返す
'''
def fav2(return_type="list", switch=None):
wait_sec = 30
start = time.time()
e = lambda a, b : round(a-b)
print(return_type + " is selected!")
Mastodon = login(switch)
my_cred = Mastodon.account_verify_credentials()
id = my_cred['id']
limit = 40 #40
print("Your id is {0}".format(id))
page = 0
favourites = Mastodon.favourites(None,None,limit)
max_id = favourites[-1]['_pagination_next']['max_id']
print("count: {0} ({1})".format(str(len(favourites)), str(len(favourites))))
while True:
latest_favourites = Mastodon.favourites(max_id,None,limit)
if isinstance(latest_favourites,dict):
pprint("Error code 429:{0} wait {1} sec...".format(latest_favourites['error'],wait_sec))
time.sleep(wait_sec)
latest_favourites = previous_favourites
continue
elif len(latest_favourites) < limit:
favourites.extend(latest_favourites)
page += 1
elapsed_time = time.time() - start
print("End fetch your favourites")
print("count: {0} time:{1}sec".format(str(len(favourites)), elapsed_time))
break
else:
max_id = favourites[-1]['_pagination_next']['max_id']
favourites.extend(latest_favourites)
page += 1
previous_favourites = latest_favourites
print("count: {0} ({1}) time:{2}sec".format(str(len(favourites)), str(len(latest_favourites)), e(time.time(),start)))
time.sleep(3)
if return_type == "json":
filename = str(id) + "_fav"
jsoner(favourtes,filename)
else:
return favourites
if __name__ == '__main__':
main()
|
the-stack_106_15117
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jury', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='jury',
options={'verbose_name': 'Juri', 'verbose_name_plural': 'j\xfaris'},
),
]
|
the-stack_106_15120
|
"""Tests for connectors.pony.database.Factoid
"""
from pony import orm
#from papilotte.connectors.pony import database
import pytest
import datetime
import copy
def test_create_from_ipif(db, data1):
Factoid = db.entities['Factoid']
data = data1['factoids'][0]
data['derivedFrom'] = "999"
with orm.db_session:
f = Factoid.create_from_ipif(data)
assert f.createdBy == data['createdBy']
assert f.createdWhen == datetime.datetime.fromisoformat(data['createdWhen'])
assert f.modifiedBy == data.get('modifiedBy', '')
assert f.modifiedWhen == None # datetime.datetime.fromisoformat(f_data.get('modifiedWhen', ''))
assert f.derivedFrom == data['derivedFrom']
assert f.person.id == data['person']['@id']
assert f.source.id == data['source']['@id']
sorted_statements = sorted(f.statements, key=lambda s: s.id)
assert sorted_statements[0].id == data['statements'][0]['@id']
def test_create_from_ipif_with_existing_children(db, data1):
"""Test factoid creation, when person and source have been create before.
Hint: Statements are weak entities, so they cannot exist in before.
Factoid only contains the ids for person, source.
"""
Factoid = db.entities['Factoid']
Person = db.entities['Person']
Source = db.entities['Source']
Statement = db.entities['Statement']
data = data1['factoids'][0]
data['derivedFrom'] = "999"
# replace full child elements by '@ids' only
person_data = data.pop('person')
data['person'] = {'@id': person_data['@id']}
source_data = data.pop('source')
data['source'] = {'@id': source_data['@id']}
#statement_data = data.pop('statements')
#data['statements'] = [{'@id': s['@id']} for s in statement_data]
with orm.db_session:
# Create person, source and statements, so that they already exist when creating
# the whole factoid
p = Person.create_from_ipif(person_data)
s = Source.create_from_ipif(source_data)
#for stmt in statement_data:
# st = Statement.create_from_ipif(stmt)
# create the factoid (person, source and statements should be recycled)
f = Factoid.create_from_ipif(data)
assert f.createdBy == data['createdBy']
assert f.createdWhen == datetime.datetime.fromisoformat(data['createdWhen'])
assert f.modifiedBy == data.get('modifiedBy', '')
assert f.modifiedWhen == None # datetime.datetime.fromisoformat(f_data.get('modifiedWhen', ''))
assert f.derivedFrom == data['derivedFrom']
assert f.person.id == data['person']['@id']
assert f.person.createdBy == p.createdBy
assert f.person.uris == p.uris
assert f.source.id == data['source']['@id']
assert f.source.label == s.label
assert f.source.uris == s.uris
sorted_statements = sorted(f.statements, key=lambda s: s.id)
assert sorted_statements[0].id == data['statements'][0]['@id']
assert sorted_statements[0].role.label == data['statements'][0]['role']['label']
def test_create_from_ipif_without_id(db, data1):
"Create a Factoid with auto generated id."
Factoid = db.entities['Factoid']
data = data1['factoids'][0]
data.pop('@id')
with orm.db_session:
f = Factoid.create_from_ipif(data)
assert f.id
def test_create_from_ipif_double_id(db, data1):
"Creating a Factoid with an existing id should result in exception."
Factoid = db.entities['Factoid']
data = data1['factoids'][0]
with orm.db_session:
Factoid.create_from_ipif(data)
with pytest.raises(orm.CacheIndexError):
Factoid.create_from_ipif(data)
def test_deep_delete(db, data1):
"""Test .safe_delete().
safe_delete() not only deletes the statement, but also the
related memberOf if it would be orphaned after deleting the Statement.
"""
Factoid = db.entities['Factoid']
Person = db.entities['Person']
Source = db.entities['Source']
Statement = db.entities['Statement']
with orm.db_session:
f1_data = data1['factoids'][0]
# basic: only 1 factoid. All related entries should be gone after
# deleting the factoid
f1 = Factoid.create_from_ipif(f1_data)
f1.deep_delete()
assert orm.select(f for f in Factoid).count() == 0
assert orm.select(p for p in Person).count() == 0
assert orm.select(s for s in Source).count() == 0
assert orm.select(s for s in Statement).count() == 0
# 2 Factoids sharing source and person. Source and person must not be deleteda
# make second factoid with changed ids
f2_data = copy.deepcopy(f1_data)
f2_data['@id'] = 'foo2'
for stmt in f2_data['statements']:
stmt['@id'] = stmt['@id'] + 'a'
f1 = Factoid.create_from_ipif(f1_data)
# create the second factoid (factoid.id and statements are different)
Factoid.create_from_ipif(f2_data)
f1.deep_delete()
assert orm.select(f for f in Factoid).count() == 1
assert orm.select(p for p in Person).count() == 1
assert orm.select(s for s in Source).count() == 1
assert orm.select(s for s in Statement).count() == 2
def test_make_id(db, data1):
"Test if make_id generates unique ids."
Factoid = db.entities['Factoid']
data = data1['factoids'][0]
data.pop('@id')
# we create a Factoid from data, so id is taken
with orm.db_session:
f1 = Factoid.create_from_ipif(data)
# Calling make_id with the same data should result in different id
new_id = Factoid.make_id(data)
assert new_id != f1.id
def test_update_from_ipif(db, data1):
"A simple update on an existing object"
Factoid = db.entities['Factoid']
data = data1['factoids'][0]
data['derivedFrom'] = "999"
with orm.db_session:
f = Factoid.create_from_ipif(data)
# Change simple values. Are they reflected in db?
data['derivedFrom'] = 'xxx'
data['createdBy'] = 'foo foo bar'
data['createdWhen'] = datetime.datetime.now().isoformat()
data['person']['createdBy'] = 'foooooooo'
data['source']['createdBy'] = 'baaaaaaar'
data['statements'][0]['createdBy'] = 'foooobaaaar'
f.update_from_ipif(data)
assert f.id == data['@id']
assert f.createdBy == data['createdBy']
assert f.createdWhen == datetime.datetime.fromisoformat(data['createdWhen'])
assert f.person.id == data['person']['@id']
assert f.person.createdBy == data['person']['createdBy']
assert f.source.id == data['source']['@id']
assert f.source.createdBy == data['source']['createdBy']
sorted_statements = sorted(f.statements, key=lambda s: s.id)
assert sorted_statements[0].id == data['statements'][0]['@id']
assert sorted_statements[0].createdBy == data['statements'][0]['createdBy']
def test_update_from_ipif_with_different_id(db, data1):
"Make sure '@id' is ignored when updating."
Factoid = db.entities['Factoid']
initial_data = copy.deepcopy(data1['factoids'][0])
data = data1['factoids'][0]
initial_data['@id'] = 'abc'
with orm.db_session:
f = Factoid.create_from_ipif(initial_data)
f.update_from_ipif(data)
assert f.id == 'abc'
def test_to_ipif(db, data1):
Factoid = db.entities['Factoid']
data = data1['factoids'][0]
data['derivedFrom'] = '9999'
with orm.db_session:
f = Factoid.create_from_ipif(data)
f_ipif = f.to_ipif()
assert f_ipif['@id'] == data['@id']
assert f_ipif['createdBy'] == data['createdBy']
assert f_ipif['createdWhen'] == data['createdWhen']
assert f_ipif['modifiedBy'] == ''
assert f_ipif['modifiedWhen'] == ""
assert f_ipif['derivedFrom'] == data['derivedFrom']
assert f_ipif['person']['@id'] == data['person']['@id']
assert f_ipif['source']['@id'] == data['source']['@id']
sorted_statements = sorted(f_ipif['statements'], key=lambda s: s['@id'])
assert sorted_statements[0]['@id'] == data['statements'][0]['@id']
assert f_ipif['person-ref']['@id'] == data['person']['@id']
assert f_ipif['source-ref']['@id'] == data['source']['@id']
assert f_ipif['statement-refs'][0]['@id'] == data['statements'][0]['@id']
|
the-stack_106_15123
|
# Listing_16-3.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Drawing a circle
import pygame, sys
pygame.init()
screen = pygame.display.set_mode([640,480])
screen.fill([255,255,255]) # fill screen with white
pygame.draw.circle(screen, [255,0,0],[100,100], 30, 0) # draw red circle
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
|
the-stack_106_15127
|
"""
find GDS labels and write labels into a CSV file
"""
import pathlib
import csv
import klayout.db as pya
from pp import LAYER
def find_labels(gdspath, label_layer=LAYER.LABEL, prefix="opt_"):
""" finds labels and locations from a GDS file """
# Load the layout
gdspath = str(gdspath)
layout = pya.Layout()
layout.read(gdspath)
# Get the top cell and the units, and find out the index of the layer
topcell = layout.top_cell()
dbu = layout.dbu
layer = pya.LayerInfo(label_layer[0], label_layer[1])
layer_index = layout.layer(layer)
# Extract locations
iterator = topcell.begin_shapes_rec(layer_index)
while not (iterator.at_end()):
shape, trans = iterator.shape(), iterator.trans()
iterator.next()
if shape.is_text():
text = shape.text
if text.string.startswith(prefix):
transformed = text.transformed(trans)
yield text.string, transformed.x * dbu, transformed.y * dbu
def write_labels(gdspath, label_layer=LAYER.LABEL, csv_filename=None, prefix="opt_"):
"""Load GDS mask and extracts the labels and coordinates from a GDS file"""
labels = list(find_labels(gdspath, label_layer=label_layer, prefix=prefix))
# Save the coordinates somewhere sensible
if csv_filename is None:
gdspath = pathlib.Path(gdspath)
csv_filename = gdspath.with_suffix(".csv")
with open(csv_filename, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(labels)
print(f"Wrote {csv_filename}")
if __name__ == "__main__":
from pp.config import CONFIG
gdspath = CONFIG["samples_path"] / "mask" / "build" / "mask" / "sample.gds"
write_labels(gdspath)
|
the-stack_106_15128
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
# For better print formatting
from __future__ import print_function
# Imports
import networkx as nx
#
# Args class
#
class Args(object):
"""
Creates an object containing all the information from the command line arguments
Attributes:
- dot_file_path : Complete path to the input DOT file
+ type: String
- output_file_path : Complete path to the ouput PDF file
+ type: String
"""
_DEFAULT_OUTPUT_EXTENSION = ".pdf"
def __init__(self, dot_file_path=None, output_file_path=None):
"""
Initializes the Args object
:param dot_file_path: Complete path to the input DOT file
+ type: String
:param output_file_path: Complete path to the output PDF file
+ type: String
"""
# Parse DOT file path
import os
if dot_file_path.endswith(".dot"):
self.dot_file_path = os.path.abspath(dot_file_path)
else:
self.dot_file_path = os.path.abspath(dot_file_path + ".dot")
# Parse PDF file path
if output_file_path is None:
# PDF file with same name than DOT file but changing extension
self.output_file_path = self.dot_file_path[:-4] + Args._DEFAULT_OUTPUT_EXTENSION
else:
self.output_file_path = os.path.abspath(output_file_path)
def get_dot_file_path(self):
"""
Returns the complete file path of the input DOT file
:return: The complete file path of the input DOT file
+ type: String
"""
return self.dot_file_path
def get_output_file_path(self):
"""
Returns the complete file path of the output PDF file
:return: The complete file path of the output PDF file
+ type: String
"""
return self.output_file_path
#
# Graph class
#
class Graph(object):
"""
Contains an in memory representation of the graph
Attributes:
- nodes : List of defined nodes
+ type: dict<String> = Tuple(String, String, String)
- edges : List of defined edges
+ type: dict<Tuple(String, String)> = Tuple(String,)
- g : Representation of the graph
+ type: networkx.DiGraph
"""
def __init__(self, dot_file_path=None):
"""
Initializes the Graph object
:param dot_file_path: Complete file path of the input DOT file
+ type: String
"""
if dot_file_path is None:
raise Exception("ERROR: Empty input DOT file path")
self.nodes = {}
self.edges = {}
# Always add Synchro0 starting point
self.nodes["Synchro0"] = ("octagon", "#ff0000", "#FFFFFF")
# Add nodes and edges from DOT file
with open(dot_file_path) as f:
for line in f:
if ("shape" in line) and ("fillcolor" in line) and ("fontcolor" in line):
if "label" in line:
# Line defines a sync
l2 = line.split(",")
s_index = l2[0].index("[")
node_name = l2[0][:s_index]
shape = l2[1][len("shape=") + 1:]
self.nodes[node_name] = (shape, "#ff0000", "#FFFFFF")
else:
# print("Adding node " + line)
# Line defines a node
l2 = line.split(",")
s_index = l2[0].index("[")
node_name = l2[0][:s_index]
shape = l2[0][s_index + 7:]
l3 = l2[1].split()
fillcolor = l3[1][len("fillcolor=\""):-1]
fontcolor = l3[2][len("fontcolor=\""):-3]
self.nodes[node_name] = (shape, fillcolor, fontcolor)
elif "->" in line:
# Line defines an edge
f_index = line.index("->")
node_from = line[:f_index].strip()
if "[" in line:
s_index = line.index("[")
e_index = line.index("]")
node_to = line[f_index + 2:s_index].strip()
label = line[s_index + 9:e_index - 2]
else:
s_index = line.index(";")
node_to = line[f_index + 2:s_index].strip()
label = ""
self.edges[(node_from, node_to)] = (label,)
# if __debug__:
# print("List of Nodes:")
# print(self.nodes)
# print("List of Edges")
# print(self.edges)
# Create the graph
self.g = nx.DiGraph()
for node_name, node_info in self.nodes.items():
self.g.add_node(node_name)
# shape=node_info[0], style="filled", color="black", fillcolor=node_info[1], fontcolor=node_info[2])
for edge, edge_info in self.edges.items():
self.g.add_edge(u_of_edge=edge[0], v_of_edge=edge[1]) # label=edge_info[0])
# if __debug__:
# print("Graph contents - List of Nodes:")
# print(self.g.nodes)
# print("Graph contents - List of Edges:")
# print(self.g.edges)
def render(self, output_file_path=None):
"""
Renders the in-memory graph into the given file path
:param output_file_path: Complete file path of the output PDF file
+ type: String
:return: None
"""
if output_file_path is None:
raise Exception("ERROR: Empty output PDF file path")
# Compute node colors
color_map = []
for node_name in self.g.nodes:
node_info = self.nodes[node_name]
color_map.append(node_info[1])
# if __debug__:
# print("Graph contents - List of Node colors:")
# print(color_map)
# Compute depths
depth_per_node = {}
self._compute_depths(current_node="Synchro0", current_depth=0, depths=depth_per_node)
# if __debug__:
# print("Depth per node:")
# print(depth_per_node)
# Compute nodes on each depth
nodes_per_depth = Graph._compute_nodes_per_depth(depths=depth_per_node)
# if __debug__:
# print("Nodes per depth:")
# print(nodes_per_depth)
# Create layout
pos = Graph._compute_layout(nodes_per_depth=nodes_per_depth,
width=1.0,
vert_gap=0.2,
vert_loc=0.0)
# if __debug__:
# print("Layout:")
# print(pos)
# Draw
import matplotlib.pyplot as plt
nx.draw(self.g,
pos=pos, # Node position
arrows=True, # Draw edge arrows
arrowsize=2, # Edge arrows size
width=0.3, # Edge size
node_size=20, # Node size
node_color=color_map, # Node color
with_labels=True, # Node labels
font_size=1, # Node labels font size
)
plt.savefig(output_file_path)
def _compute_depths(self, current_node=None, current_depth=0, depths=None):
"""
Computes the depths of the current node and its children assuming the given
current_depth
:param current_node: Name of the current node
+ type: String
:param current_depth: Value of the current depth
+ type: Int
:param depths: List of depths per node
+ type: Dict<String> = Int
:return: None
"""
if current_node is None:
return {}
# Process current node
if depths is None:
depths = {current_node: current_depth}
else:
if current_node in depths:
depths[current_node] = max(depths[current_node], current_depth)
else:
depths[current_node] = current_depth
# Iterate over children
current_depth = current_depth + 1
for child in self.g.neighbors(current_node):
self._compute_depths(child, current_depth, depths)
@staticmethod
def _compute_nodes_per_depth(depths=None):
"""
From a list of depths per node builds a list of nodes per depth
:param depths: List of depths per node
+ type: Dict<String> = Int
:return: List of nodes per depth
+ type: Dict<Int> = String
"""
nodes_per_depth = {}
for node, depth in depths.items():
if depth in nodes_per_depth:
nodes_per_depth[depth].append(node)
else:
nodes_per_depth[depth] = [node]
return nodes_per_depth
@staticmethod
def _compute_layout(nodes_per_depth=None, width=1.0, vert_gap=0.2, vert_loc=0.0):
"""
Given a list of nodes per depth and some visual sizes, computes the layout of
all the nodes of the graph
:param nodes_per_depth: List of nodes per depth
+ type: Dict<Int> = String
:param width: Layout width
+ type: double
:param vert_gap: Space between rows
+ type: double
:param vert_loc: Starting vertical point
+ type: double
:return: The layout of the graph
+ type: Dict<String> = (x, y)
"""
pos = {}
for depth in sorted(nodes_per_depth.keys()):
nodes = nodes_per_depth[depth]
dx = width / len(nodes)
horz_loc = 0
for node in nodes:
horz_loc += dx
pos[node] = (horz_loc, vert_loc)
vert_loc = vert_loc - vert_gap
return pos
############################################
# HELPER METHODS
############################################
def parse_arguments(cmd_args):
"""
Parses command line arguments and returns an object containing the application information
:param cmd_args: Command line arguments
+ type: List
:return: Object containing the application information
+ type: Args
"""
if len(cmd_args) == 1:
dot_file_path = cmd_args[0]
output_file_path = None
elif len(cmd_args) == 2:
dot_file_path = cmd_args[0]
output_file_path = cmd_args[1]
else:
raise Exception("ERROR: Invalid number of parameters")
return Args(dot_file_path=dot_file_path, output_file_path=output_file_path)
def process_graph(args):
"""
Construct an in-memory representation of the given graph
:param args: Application information
+ type: Args
:return: An object containing the graph representation
+ type: Graph
"""
dot_file_path = args.get_dot_file_path()
return Graph(dot_file_path)
def render_graph(graph, args):
"""
Render the given graph to the output location
:param graph: Object containing the graph representation
+ type: Graph
:param args: Application information
+ type: Args
:return: None
"""
output_file_path = args.get_output_file_path()
graph.render(output_file_path)
############################################
# MAIN
############################################
def main():
print("Starting Graph rendering...")
# Import libraries
import time
import sys
# Parse arguments
if __debug__:
print("[DEBUG] Parsing arguments...")
time_start = time.time()
args = parse_arguments(sys.argv[1:])
time_arguments_end = time.time()
if __debug__:
print("[DEBUG] Arguments parsed")
time_arguments = time_arguments_end - time_start
print("[DEBUG] Arguments parsing time: " + str(time_arguments))
# Process graph
if __debug__:
print("[DEBUG] Processing graph...")
time_process_start = time.time()
graph = process_graph(args)
time_process_end = time.time()
if __debug__:
print("[DEBUG] Graph processed")
time_process = time_process_end - time_process_start
print("[DEBUG] Graph processing time: " + str(time_process))
# Render graph
if __debug__:
print("[DEBUG] Rendering graph...")
time_render_start = time.time()
render_graph(graph, args)
time_render_end = time.time()
if __debug__:
print("[DEBUG] Graph rendered")
time_render = time_render_end - time_render_start
print("[DEBUG] Graph rendering time: " + str(time_render))
# END
time_end = time.time()
time_total = time_end - time_start
print("Elapsed time: " + str(time_total))
print("Graph rendering finished")
if __name__ == "__main__":
main()
|
the-stack_106_15129
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from polyaxon.utils.date_utils import parse_datetime
from polyaxon.utils.tz_utils import now
class V1Stages(polyaxon_sdk.V1Stages):
"""Stage is the information that represents the current stage of an entity's version.
You can describe what stage a component or a model version is at.
The supported stages by Polyaxon.
Enum:
TESTING: "testing"
STAGING: "staging"
PRODUCTION: "production"
DISABLED: "disabled"
"""
pass
class V1Statuses(polyaxon_sdk.V1Statuses):
"""Status is the information that represents the current state of a run.
By examining a run status and/or the history of its statuses,
you can learn what stage the run is at, and what stages are left.
The supported statuses by Polyaxon.
Enum:
CREATED: "created"
ON_SCHEDULE: "on_schedule"
RESUMING: "resuming"
WARNING: "warning"
UNSCHEDULABLE: "unschedulable"
COMPILED: "compiled"
QUEUED: "queued"
SCHEDULED: "scheduled"
STARTING: "starting"
RUNNING: "running"
SUCCEEDED: "succeeded"
FAILED: "failed"
UPSTREAM_FAILED: "upstream_failed"
STOPPING: "stopping"
STOPPED: "stopped"
SKIPPED: "skipped"
RETRYING: "retrying"
UNKNOWN: "unknown"
"""
allowable_hook_values = [
polyaxon_sdk.V1Statuses.FAILED,
polyaxon_sdk.V1Statuses.STOPPED,
polyaxon_sdk.V1Statuses.SUCCEEDED,
polyaxon_sdk.V1Statuses.SKIPPED,
polyaxon_sdk.V1Statuses.UPSTREAM_FAILED,
polyaxon_sdk.V1Statuses.DONE,
]
class StatusColor:
"""The statuses colors.
Enum:
GREEN: #1aaa55
RED: #aa310f
BLUE: #2e77aa
YELLOW: #aa9e4a
GREY: #485563
"""
GREEN = "#1aaa55"
RED = "#aa310f"
BLUE = "#2e77aa"
YELLOW = "#aa9e4a"
GREY = "#485563"
@classmethod
def get_color(cls, status: str) -> str:
if status in [V1Statuses.FAILED, V1Statuses.UPSTREAM_FAILED]:
return cls.RED
if status in [V1Statuses.SUCCEEDED, V1Statuses.SKIPPED]:
return cls.GREEN
if status == V1Statuses.STOPPED:
return cls.GREY
if LifeCycle.is_done(status):
return cls.GREY
return cls.YELLOW
class LifeCycle:
"""The Run LifeCycle is state machine for status transition."""
WARNING_VALUES = {V1Statuses.UNSCHEDULABLE, V1Statuses.WARNING}
ALL_WARNING_VALUES = WARNING_VALUES | {V1Statuses.UNKNOWN}
SAFE_STOP_VALUES = {
V1Statuses.CREATED,
V1Statuses.ON_SCHEDULE,
V1Statuses.RESUMING,
V1Statuses.COMPILED,
}
PENDING_VALUES = {
V1Statuses.CREATED,
V1Statuses.ON_SCHEDULE,
V1Statuses.RESUMING,
}
COMPILABLE_VALUES = PENDING_VALUES | {V1Statuses.RETRYING}
ALL_PENDING_VALUES = COMPILABLE_VALUES | {V1Statuses.COMPILED, V1Statuses.QUEUED}
RUNNING_VALUES = {
V1Statuses.SCHEDULED,
V1Statuses.STARTING,
V1Statuses.PROCESSING,
V1Statuses.RUNNING,
}
ON_K8S_VALUES = RUNNING_VALUES | ALL_WARNING_VALUES
DONE_VALUES = {
V1Statuses.FAILED,
V1Statuses.UPSTREAM_FAILED,
V1Statuses.STOPPED,
V1Statuses.SKIPPED,
V1Statuses.SUCCEEDED,
}
DONE_OR_IN_PROGRESS_VALUES = DONE_VALUES | {
V1Statuses.STOPPING,
}
@classmethod
def can_check_heartbeat(cls, status: str) -> bool:
"""Checks if a run with this status is in a state that allows to check for a heartbeat."""
return status in cls.RUNNING_VALUES
@classmethod
def is_unschedulable(cls, status: str) -> bool:
"""Checks if a run with this status is unschedulable."""
return status == V1Statuses.UNSCHEDULABLE
@classmethod
def is_processing(cls, status: str) -> bool:
"""Checks if a run with this status is processing."""
return status == V1Statuses.PROCESSING
@classmethod
def is_warning(cls, status: str) -> bool:
"""Checks if a run with this status is in a warning status."""
return status in cls.WARNING_VALUES
@classmethod
def is_pending(cls, status: str) -> bool:
"""Checks if a run with this status is in a pending status."""
return status in cls.PENDING_VALUES
@classmethod
def is_compiled(cls, status: str) -> bool:
"""Checks if a run with this status is compiled."""
return status == V1Statuses.COMPILED
@classmethod
def is_compilable(cls, status: str) -> bool:
"""Checks if a run with this status is compilable."""
return status in cls.COMPILABLE_VALUES
@classmethod
def is_queued(cls, status: str) -> bool:
"""Checks if a run with this status is queued."""
return status == V1Statuses.QUEUED
@classmethod
def is_starting(cls, status: str) -> bool:
"""Checks if a run with this status is starting."""
return status == V1Statuses.STARTING
@classmethod
def is_running(cls, status: str) -> bool:
"""Checks if a run with this status is running."""
return status in cls.RUNNING_VALUES
@classmethod
def is_unknown(cls, status: str) -> bool:
"""Checks if a run with this status is in an unknown state."""
return status == V1Statuses.UNKNOWN
@classmethod
def is_safe_stoppable(cls, status: str) -> bool:
"""Checks if a run with this status is an be stopped without operator."""
return status in cls.SAFE_STOP_VALUES
@classmethod
def is_k8s_stoppable(cls, status: str) -> bool:
"""Checks if a run with this status is scheduled on k8s and is stoppable."""
return status in cls.ON_K8S_VALUES
@classmethod
def is_stoppable(cls, status: str) -> bool:
"""Checks if a run with this status is stoppable."""
return not cls.is_done(status)
@classmethod
def is_stopping(cls, status: str) -> bool:
"""Checks if a run with this status is stopping."""
return status == V1Statuses.STOPPING
@classmethod
def is_done(cls, status: str, progressing: bool = False) -> bool:
"""Checks if a run with this status is done."""
if progressing:
return status in cls.DONE_OR_IN_PROGRESS_VALUES
return status in cls.DONE_VALUES
@classmethod
def failed(cls, status: str) -> bool:
"""Checks if a run with this status is failed."""
return status == V1Statuses.FAILED or status == V1Statuses.UPSTREAM_FAILED
@classmethod
def succeeded(cls, status: str) -> bool:
"""Checks if a run with this status is succeeded."""
return status == V1Statuses.SUCCEEDED
@classmethod
def stopped(cls, status: str) -> bool:
"""Checks if a run with this status is stopped."""
return status == V1Statuses.STOPPED
@classmethod
def skipped(cls, status: str) -> bool:
"""Checks if a run with this status is skipped."""
return status == V1Statuses.SKIPPED
@classmethod
def set_started_at(cls, entity) -> bool:
# We allow to override started_at if the value is running
if entity.started_at is not None:
return False
if cls.is_running(entity.status):
entity.started_at = now()
# Update wait_time
if entity.wait_time is None:
entity.wait_time = int(
(entity.started_at - entity.created_at).total_seconds()
)
return True
return False
@classmethod
def set_finished_at(cls, entity) -> bool:
if cls.is_done(entity.status) and entity.finished_at is None:
entity.finished_at = now()
if entity.started_at is None: # We should not have this case
entity.started_at = entity.created_at
# Update duration
if entity.duration is None:
entity.duration = int(
(entity.finished_at - entity.started_at).total_seconds()
)
return True
return False
class ConditionMixin:
@classmethod
def get_condition(
cls,
type=None, # noqa
status=None,
last_update_time=None,
last_transition_time=None,
reason=None,
message=None,
):
current_time = now()
last_update_time = last_update_time or current_time
last_transition_time = last_transition_time or current_time
return cls(
type=type.lower() if type else type,
status=status,
last_update_time=last_update_time,
last_transition_time=last_transition_time,
reason=reason,
message=message,
)
def __eq__(self, other):
return self.type == other.type and self.status == other.status
@classmethod
def get_last_update_time(cls, value):
return parse_datetime(value)
class V1StatusCondition(ConditionMixin, polyaxon_sdk.V1StatusCondition):
pass
class V1StageCondition(ConditionMixin, polyaxon_sdk.V1StageCondition):
pass
class V1ProjectVersionKind(polyaxon_sdk.V1ProjectVersionKind):
pass
class V1ProjectFeature(V1ProjectVersionKind):
RUNTIME = "runtime"
allowable_values = V1ProjectVersionKind.allowable_values + [RUNTIME]
|
the-stack_106_15130
|
from .category import Category
class Reptile(Category):
_name = "reptile"
_values = [
"adder",
"alligator",
"anaconda",
"boa",
"chameleon",
"cobra",
"crocodile",
"frog",
"gecko",
"iguana",
"lizard",
"mamba",
"python",
"rattlesnake",
"salamander",
"skink",
"snake",
"toad",
"tortoise",
"treefrog",
"turtle",
"viper",
]
|
the-stack_106_15131
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from mongoengine.queryset import Q
from st2common import log as logging
from st2api.controllers.resource import BaseResourceIsolationControllerMixin
from st2api.controllers.resource import ContentPackResourceController
from st2common.models.api.rule import RuleViewAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.action import Action
from st2common.persistence.rule import Rule
from st2common.persistence.trigger import TriggerType, Trigger
from st2common.rbac.types import PermissionType
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
__all__ = ['RuleViewController']
class RuleViewController(BaseResourceIsolationControllerMixin, ContentPackResourceController):
"""
Add some extras to a Rule object to make it easier for UI to render a rule. The additions
do not necessarily belong in the Rule itself but are still valuable augmentations.
:Example:
{
"action": {
"description": "Action that executes an arbitrary Linux command on the localhost.",
"parameters": {
"cmd": "echo \"{{trigger.executed_at}}\""
},
"ref": "core.local"
},
"criteria": {},
"description": "Sample rule using an Interval Timer.",
"enabled": false,
"id": "55ea221832ed35759cf3b312",
"name": "sample.with_timer",
"pack": "examples",
"ref": "examples.sample.with_timer",
"tags": [],
"trigger": {
"description": "Triggers on specified intervals. e.g. every 30s, 1week etc.",
"parameters": {
"delta": 5,
"unit": "seconds"
},
"ref": "core.4ad65602-6fb4-4c89-b0f2-b990d7b68bad",
"type": "core.st2.IntervalTimer"
},
"uid": "rule:examples:sample.with_timer"
}
The `description` fields in action and trigger are augmented properties.
"""
model = RuleViewAPI
access = Rule
supported_filters = {
'name': 'name',
'pack': 'pack',
'user': 'context.user'
}
query_options = {
'sort': ['pack', 'name']
}
mandatory_include_fields_retrieve = ['pack', 'name', 'trigger']
def get_all(self, exclude_attributes=None, include_attributes=None, sort=None, offset=0,
limit=None, requester_user=None, **raw_filters):
rules = super(RuleViewController, self)._get_all(exclude_fields=exclude_attributes,
include_fields=include_attributes,
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters,
requester_user=requester_user)
result = self._append_view_properties(rules.json)
rules.json = result
return rules
def get_one(self, ref_or_id, requester_user):
from_model_kwargs = {'mask_secrets': True}
rule = self._get_one(ref_or_id, permission_type=PermissionType.RULE_VIEW,
requester_user=requester_user, from_model_kwargs=from_model_kwargs)
result = self._append_view_properties([rule.json])[0]
rule.json = result
return rule
def _append_view_properties(self, rules):
action_by_refs, trigger_by_refs, trigger_type_by_refs = self._get_referenced_models(rules)
for rule in rules:
action_ref = rule.get('action', {}).get('ref', None)
trigger_ref = rule.get('trigger', {}).get('ref', None)
trigger_type_ref = rule.get('trigger', {}).get('type', None)
action_db = action_by_refs.get(action_ref, None)
if 'action' in rule:
rule['action']['description'] = action_db.description if action_db else ''
if 'trigger' in rule:
rule['trigger']['description'] = ''
trigger_db = trigger_by_refs.get(trigger_ref, None)
if trigger_db:
rule['trigger']['description'] = trigger_db.description
# If description is not found in trigger get description from TriggerType
if 'trigger' in rule and not rule['trigger']['description']:
trigger_type_db = trigger_type_by_refs.get(trigger_type_ref, None)
if trigger_type_db:
rule['trigger']['description'] = trigger_type_db.description
return rules
def _get_referenced_models(self, rules):
"""
Reduces the number of queries to be made to the DB by creating sets of Actions, Triggers
and TriggerTypes.
"""
action_refs = set()
trigger_refs = set()
trigger_type_refs = set()
for rule in rules:
action_ref = rule.get('action', {}).get('ref', None)
trigger_ref = rule.get('trigger', {}).get('ref', None)
trigger_type_ref = rule.get('trigger', {}).get('type', None)
if action_ref:
action_refs.add(action_ref)
if trigger_ref:
trigger_refs.add(trigger_ref)
if trigger_type_ref:
trigger_type_refs.add(trigger_type_ref)
action_by_refs = {}
trigger_by_refs = {}
trigger_type_by_refs = {}
# The functions that will return args that can used to query.
def ref_query_args(ref):
return {'ref': ref}
def name_pack_query_args(ref):
resource_ref = ResourceReference.from_string_reference(ref=ref)
return {'name': resource_ref.name, 'pack': resource_ref.pack}
action_dbs = self._get_entities(model_persistence=Action,
refs=action_refs,
query_args=ref_query_args)
for action_db in action_dbs:
action_by_refs[action_db.ref] = action_db
trigger_dbs = self._get_entities(model_persistence=Trigger,
refs=trigger_refs,
query_args=name_pack_query_args)
for trigger_db in trigger_dbs:
trigger_by_refs[trigger_db.get_reference().ref] = trigger_db
trigger_type_dbs = self._get_entities(model_persistence=TriggerType,
refs=trigger_type_refs,
query_args=name_pack_query_args)
for trigger_type_db in trigger_type_dbs:
trigger_type_by_refs[trigger_type_db.get_reference().ref] = trigger_type_db
return (action_by_refs, trigger_by_refs, trigger_type_by_refs)
def _get_entities(self, model_persistence, refs, query_args):
"""
Returns all the entities for the supplied refs. model_persistence is the persistence
object that will be used to get to the correct query method and the query_args function
to return the ref specific query argument.
This is such a weirdly specific method that it is likely better only in this context.
"""
q = None
for ref in refs:
if not q:
q = Q(**query_args(ref))
else:
q |= Q(**query_args(ref))
if q:
return model_persistence._get_impl().model.objects(q)
return []
rule_view_controller = RuleViewController()
|
the-stack_106_15134
|
# -*- coding: utf-8 -*-)
"""
eve.render
~~~~~~~~~~
Implements proper, automated rendering for Eve responses.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import time
import datetime
import simplejson as json
from werkzeug import utils
from functools import wraps
from eve.methods.common import get_rate_limit
from eve.utils import date_to_str, config, request_method, debug_error_message
from flask import make_response, request, Response, current_app as app, abort
# mapping between supported mime types and render functions.
_MIME_TYPES = [
{'mime': ('application/json',), 'renderer': 'render_json', 'tag': 'JSON'},
{'mime': ('application/xml', 'text/xml', 'application/x-xml',),
'renderer': 'render_xml', 'tag': 'XML'}]
def raise_event(f):
""" Raises both general and resource-level events after the decorated
function has been executed. Returns both the flask.request object and the
response payload to the callback.
.. versionchanged:: 0.2
Renamed 'on_<method>' hooks to 'on_post_<method>' for coherence
with new 'on_pre_<method>' hooks.
.. versionchanged:: 0.1.0
Support for PUT.
.. versionchanged:: 0.0.9
To emphasize the fact that they are tied to a method, in `on_<method>`
events, <method> is now uppercase.
.. versionadded:: 0.0.6
"""
@wraps(f)
def decorated(*args, **kwargs):
r = f(*args, **kwargs)
method = request_method()
if method in ('GET', 'POST', 'PATCH', 'DELETE', 'PUT'):
event_name = 'on_post_' + method
resource = args[0] if args else None
# general hook
getattr(app, event_name)(resource, request, r)
if resource:
# resource hook
getattr(app, event_name + '_' + resource)(request, r)
return r
return decorated
@raise_event
def send_response(resource, response):
""" Prepares the response for the client.
:param resource: the resource involved.
:param response: either a flask.Response object or a tuple. The former will
simply be forwarded to the client. If the latter a proper
response will be prepared, according to directives within
the tuple.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.5
Handling the case where response is None. Happens when the request
method is 'OPTIONS', most likely while processing a CORS 'preflight'
request.
.. versionchanged:: 0.0.4
Now a simple dispatcher. Moved the response preparation logic to
``_prepare_response``.
"""
if isinstance(response, Response):
return response
else:
return _prepare_response(resource, *response if response else [None])
def _prepare_response(resource, dct, last_modified=None, etag=None,
status=200):
""" Prepares the response object according to the client request and
available renderers, making sure that all accessory directives (caching,
etag, last-modified) are present.
:param resource: the resource involved.
:param dct: the dict that should be sent back as a response.
:param last_modified: Last-Modified header value.
:param etag: ETag header value.
:param status: response status.
.. versionchanged:: 0.3
Support for X_MAX_AGE.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged:: 0.0.9
Support for Python 3.3.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing (CORS).
.. versionadded:: 0.0.4
"""
if request.method == 'OPTIONS':
resp = app.make_default_options_response()
else:
# obtain the best match between client's request and available mime
# types, along with the corresponding render function.
mime, renderer = _best_mime()
# invoke the render function and obtain the corresponding rendered item
rendered = globals()[renderer](dct)
# build the main wsgi rensponse object
resp = make_response(rendered, status)
resp.mimetype = mime
# cache directives
if request.method in ('GET', 'HEAD'):
if resource:
cache_control = config.DOMAIN[resource]['cache_control']
expires = config.DOMAIN[resource]['cache_expires']
else:
cache_control = config.CACHE_CONTROL
expires = config.CACHE_EXPIRES
if cache_control:
resp.headers.add('Cache-Control', cache_control)
if expires:
resp.expires = time.time() + expires
# etag and last-modified
if etag:
resp.headers.add('ETag', etag)
if last_modified:
resp.headers.add('Last-Modified', date_to_str(last_modified))
# CORS
if 'Origin' in request.headers and config.X_DOMAINS is not None:
if isinstance(config.X_DOMAINS, str):
domains = [config.X_DOMAINS]
else:
domains = config.X_DOMAINS
if config.X_HEADERS is None:
headers = []
elif isinstance(config.X_HEADERS, str):
headers = [config.X_HEADERS]
else:
headers = config.X_HEADERS
methods = app.make_default_options_response().headers['allow']
resp.headers.add('Access-Control-Allow-Origin', ', '.join(domains))
resp.headers.add('Access-Control-Allow-Headers', ', '.join(headers))
resp.headers.add('Access-Control-Allow-Methods', methods)
resp.headers.add('Access-Control-Allow-Max-Age', config.X_MAX_AGE)
# Rate-Limiting
limit = get_rate_limit()
if limit and limit.send_x_headers:
resp.headers.add('X-RateLimit-Remaining', str(limit.remaining))
resp.headers.add('X-RateLimit-Limit', str(limit.limit))
resp.headers.add('X-RateLimit-Reset', str(limit.reset))
return resp
def _best_mime():
""" Returns the best match between the requested mime type and the
ones supported by Eve. Along with the mime, also the corresponding
render function is returns.
.. versionchanged:: 0.3
Support for optional renderers via XML and JSON configuration keywords.
"""
supported = []
renders = {}
for mime in _MIME_TYPES:
# only mime types that have not been disabled via configuration
if app.config.get(mime['tag'], True):
for mime_type in mime['mime']:
supported.append(mime_type)
renders[mime_type] = mime['renderer']
if len(supported) == 0:
abort(500, description=debug_error_message(
'Configuration error: no supported mime types')
)
best_match = request.accept_mimetypes.best_match(supported) or \
supported[0]
return best_match, renders[best_match]
def render_json(data):
""" JSON render function
.. versionchanged:: 0.2
Json encoder class is now inferred by the active data layer, allowing
for customized, data-aware JSON encoding.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
"""
return json.dumps(data, cls=app.data.json_encoder_class)
def render_xml(data):
""" XML render function.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.2
Use the new ITEMS configuration setting.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged:: 0.0.3
Support for HAL-like hyperlinks and resource descriptors.
"""
if isinstance(data, list):
data = {config.ITEMS: data}
xml = ''
if data:
xml += xml_root_open(data)
xml += xml_add_links(data)
xml += xml_add_items(data)
xml += xml_root_close()
return xml
def xml_root_open(data):
""" Returns the opening tag for the XML root node. If the datastream
includes informations about resource endpoints (href, title), they will
be added as node attributes. The resource endpoint is then removed to allow
for further processing of the datastream.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged:: 0.0.6
Links are now properly escaped.
.. versionadded:: 0.0.3
"""
links = data.get(config.LINKS)
href = title = ''
if links and 'self' in links:
self_ = links.pop('self')
href = ' href="%s" ' % utils.escape(self_['href'])
if 'title' in self_:
title = ' title="%s" ' % self_['title']
return '<resource%s%s>' % (href, title)
def xml_add_links(data):
""" Returns as many <link> nodes as there are in the datastream. The links
are then removed from the datastream to allow for further processing.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.0.6
Links are now properly escaped.
.. versionadded:: 0.0.3
"""
xml = ''
chunk = '<link rel="%s" href="%s" title="%s" />'
links = data.pop(config.LINKS, {})
for rel, link in links.items():
if isinstance(link, list):
xml += ''.join([chunk % (rel, utils.escape(d['href']), d['title'])
for d in link])
else:
xml += ''.join(chunk % (rel, utils.escape(link['href']),
link['title']))
return xml
def xml_add_items(data):
""" When this function is called the datastream can only contain a `_items`
list, or a dictionary. If a list, each item is a resource which rendered as
XML. If a dictionary, it will be rendered as XML.
:param data: the data stream to be rendered as xml.
.. versionadded:: 0.0.3
"""
try:
xml = ''.join([xml_item(item) for item in data[config.ITEMS]])
except:
xml = xml_dict(data)
return xml
def xml_item(item):
""" Represents a single resource (member of a collection) as XML.
:param data: the data stream to be rendered as xml.
.. versionadded:: 0.0.3
"""
xml = xml_root_open(item)
xml += xml_add_links(item)
xml += xml_dict(item)
xml += xml_root_close()
return xml
def xml_root_close():
""" Returns the closing tag of the XML root node.
.. versionadded:: 0.0.3
"""
return '</resource>'
def xml_dict(data):
""" Renders a dict as XML.
:param data: the data stream to be rendered as xml.
.. versionchanged:: 0.2
Leaf values are now properly escaped.
.. versionadded:: 0.0.3
"""
xml = ''
for k, v in data.items():
if isinstance(v, datetime.datetime):
v = date_to_str(v)
elif isinstance(v, (datetime.time, datetime.date)):
v = v.isoformat()
if not isinstance(v, list):
v = [v]
for value in v:
if isinstance(value, dict):
links = xml_add_links(value)
xml += "<%s>" % k
xml += xml_dict(value)
xml += links
xml += "</%s>" % k
else:
xml += "<%s>%s</%s>" % (k, utils.escape(value), k)
return xml
|
the-stack_106_15137
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import Variable
fluid.load_op_library('models/ext_op/src/rrpn_lib.so')
def rrpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
gt_boxes,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for rotated region proposal network (RRPN).**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of RRPN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 5] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has five coordinate values and the layout
is [x, y, w, h, angle]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 5] holds M boxes,
each box is represented as [x, y, w, h, angle],
[x, y] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [w, h] is the right bottom
coordinate of the anchor box, angle is the rotation angle of box.
The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 5], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
use_random(bool): Whether to sample randomly when sampling.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 5], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 5].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 5], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 5], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 5], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target = rrpn_target_assign(
bbox_pred, cls_logits, anchor_box, gt_boxes, im_info)
"""
helper = LayerHelper('rrpn_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rrpn_target_assign",
inputs={'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'ImInfo': im_info},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
cls_logits = fluid.layers.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = fluid.layers.reshape(x=bbox_pred, shape=(-1, 5))
predicted_cls_logits = fluid.layers.gather(cls_logits, score_index)
predicted_bbox_pred = fluid.layers.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox
def rotated_anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
angles=None,
variance=[1.0, 1.0, 1.0, 1.0, 1.0],
stride=None,
offset=0.5,
name=None):
"""
**Rotated Anchor generator operator**
Generate anchors for RRPN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios) * size(angles).
The order of generated anchors is firstly aspect_ratios
loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
angle(list|tuple): Rotated angle of prior boxes. The data type is float32.
variance(list|tuple): The variances to be used in box
regression deltas. The data type is float32, [1.0, 1.0, 1.0, 1.0, 1.0] by
default.
stride(list|tuple): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32): Prior boxes center offset. 0.5 by default.
name(str): Name of this layer. None by default.
Returns:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 5].
H is the height of input, W is the width of input,
num_anchors is the box count of each position. Each anchor is
in (x, y, w, h, angle) format.
Variances(Variable): The expanded variances of anchors with a layout of
[H, W, num_priors, 5]. H is the height of input,
W is the width of input num_anchors is the box count
of each position. Each variance is in (x, y, w, h, angle) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = rotated_anchor_generator(
input=conv1,
anchor_sizes=[128, 256, 512],
aspect_ratios=[0.2, 0.5, 1.0],
variance=[1.0, 1.0, 1.0, 1.0, 1.0],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("rotated_anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not _is_list_or_tuple_(angles):
angles = [angles]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
angles = list(map(float, angles))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'angles': angles,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="rotated_anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def rrpn_box_coder(prior_box, prior_box_var, target_box, name=None):
"""
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 5] holds M boxes and data type is float32 or float64. Each box
is represented as [x, y, w, h, angle], [x, y] is the
center coordinate of the anchor box, [w, h] is the width and height
of the anchor box, angle is rotated angle of prior_box.
prior_box_var(List|Variable|None): "prior_box_var is a 2-D Tensor with
shape [M, 5] holds M group of variance."
target_box(Variable): This input can be a 2-D LoDTensor with shape
[M, 5]. Each box is represented as [x, y, w, h, angle]. The data
type is float32 or float64.
name(str): Name of this layer. None by default.
Returns:
Variable:
output_box(Variable): The output tensor of rrpn_box_coder_op with shape [N, 5] representing the
result of N target boxes encoded with N Prior boxes and variances.
N represents the number of box and 5 represents [x, y, w, h ,angle].
Examples:
.. code-block:: python
import paddle.fluid as fluid
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 5],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 5],
dtype='float32')
output_decode = rrpn_box_coder(prior_box=prior_box_decode,
prior_box_var=[10, 10, 5, 5, 1],
target_box=target_box_decode)
"""
helper = LayerHelper("rrpn_box_coder", **locals())
if name is None:
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
else:
output_box = helper.create_variable(
name=name, dtype=prior_box.dtype, persistable=False)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError(
"Input variance of rrpn_box_coder must be Variable or list")
helper.append_op(
type="rrpn_box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
def rotated_roi_align(input,
rois,
pooled_height=1,
pooled_width=1,
spatial_scale=1.0,
name=None):
"""
**RotatedRoIAlign Operator**
Rotated Region of interest align (also known as Rotated RoI align) is to perform
bilinear interpolation on inputs of nonuniform sizes to obtain
fixed-size feature maps (e.g. 7*7)
Dividing each region proposal into equal-sized sections with
the pooled_width and pooled_height. Location remains the origin
result.
Each ROI bin are transformed to become horizontal by perspective transformation and
values in each ROI bin are computed directly through bilinear interpolation. The output is
the mean of all values.
Thus avoid the misaligned problem.
"""
helper = LayerHelper('rrpn_rotated_roi_align', **locals())
dtype = helper.input_dtype()
align_out = helper.create_variable_for_type_inference(dtype)
cx = helper.create_variable_for_type_inference('float32')
cy = helper.create_variable_for_type_inference('float32')
helper.append_op(
type="rrpn_rotated_roi_align",
inputs={"X": input,
"ROIs": rois},
outputs={"Out": align_out,
"ConIdX": cx,
"ConIdY": cy},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale,
})
return align_out
def rotated_generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False):
"""
**Rotated Generate Proposal Labels**
This operator can be, for given the RotatedGenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by rotated_generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 5]. N is the number of the RotatedGenerateProposalOp's output, each element is a bounding box with [x, y, w, h, angle] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 5]. M is the number of groundtruth, each element is a bounding box with [x, y, w, h, angle] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 5]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 5 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 5 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 5 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 5], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = rotated_generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('rrpn_generate_proposal_labels', **locals())
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="rrpn_generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def rotated_generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
name=None):
"""
**Rotated Generate proposal**
This operation proposes Rotated RoIs according to each box with their
probability to be a foreground object and the box can be calculated by anchors.
bbox_deltas and scores are the output of RPN. Final proposals could be used to
train detection net. For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 5)
2. Calculate box locations as proposals candidates.
3. Remove predicted boxes with small area.
4. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 5*A, H, W]
represents the differece between predicted box locatoin and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Info contains height, width and scale
between origin image size and the size of feature map.
The data type must be int32.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 5]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (x, y, w, h, angle) format. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 5]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
Returns:
tuple:
A tuple with format ``(rrpn_rois, rrpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 5]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 20, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 5], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 5], dtype='float32')
rrois, rroi_probs = fluid.layers.rotated_generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('rrpn_generate_proposals', **locals())
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
helper.append_op(
type="rrpn_generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size
},
outputs={'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
return rpn_rois, rpn_roi_probs
|
the-stack_106_15138
|
import os
import threading
import time
import traceback
from splunktalib.common import log
logger = log.Logs().get_logger("util")
class OrphanProcessChecker(object):
def __init__(self, callback=None):
"""
Only work for Linux platform. On Windows platform, is_orphan is always
False
"""
if os.name == "nt":
self._ppid = 0
else:
self._ppid = os.getppid()
self._callback = callback
def is_orphan(self):
if os.name == "nt":
return False
res = self._ppid != os.getppid()
if res:
logger.warn("Process=%s has become orphan", os.getpid())
return res
def check_orphan(self):
res = self.is_orphan()
if res and self._callback:
self._callback()
return res
class OrphanProcessMonitor(object):
def __init__(self, callback):
self._checker = OrphanProcessChecker(callback)
self._thr = threading.Thread(target=self._do_monitor)
self._thr.daemon = True
self._started = False
def start(self):
if self._started:
return
self._started = True
self._thr.start()
def stop(self):
self._started = False
def _do_monitor(self):
while self._started:
try:
res = self._checker.check_orphan()
if res:
break
time.sleep(1)
except Exception:
logger.error("Failed to monitor orphan process, reason=%s",
traceback.format_exc())
|
the-stack_106_15139
|
# -*- coding: utf-8 -*-
"""
flask.debughelpers
~~~~~~~~~~~~~~~~~~
Various helpers to make the development experience better.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
import os
from warnings import warn
from ._compat import implements_to_string
from ._compat import text_type
from .app import Flask
from .blueprints import Blueprint
from .globals import _request_ctx_stack
class UnexpectedUnicodeError(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
@implements_to_string
class DebugFilesKeyError(KeyError, AssertionError):
"""Raised from request.files during debugging. The idea is that it can
provide a better error message than just a generic KeyError/BadRequest.
"""
def __init__(self, request, key):
form_matches = request.form.getlist(key)
buf = [
'You tried to access the file "%s" in the request.files '
"dictionary but it does not exist. The mimetype for the request "
'is "%s" instead of "multipart/form-data" which means that no '
"file contents were transmitted. To fix this error you should "
'provide enctype="multipart/form-data" in your form.'
% (key, request.mimetype)
]
if form_matches:
buf.append(
"\n\nThe browser instead transmitted some file names. "
"This was submitted: %s" % ", ".join('"%s"' % x for x in form_matches)
)
self.msg = "".join(buf)
def __str__(self):
return self.msg
class FormDataRoutingRedirect(AssertionError):
"""This exception is raised by Flask in debug mode if it detects a
redirect caused by the routing system when the request method is not
GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
"""
def __init__(self, request):
exc = request.routing_exception
buf = [
"A request was sent to this URL (%s) but a redirect was "
'issued automatically by the routing system to "%s".'
% (request.url, exc.new_url)
]
# In case just a slash was appended we can be extra helpful
if request.base_url + "/" == exc.new_url.split("?")[0]:
buf.append(
" The URL was defined with a trailing slash so "
"Flask will automatically redirect to the URL "
"with the trailing slash if it was accessed "
"without one."
)
buf.append(
" Make sure to directly send your %s-request to this URL "
"since we can't make browsers or HTTP clients redirect "
"with form data reliably or without user interaction." % request.method
)
buf.append("\n\nNote: this exception is only raised in debug mode")
AssertionError.__init__(self, "".join(buf).encode("utf-8"))
def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
def _dump_loader_info(loader):
yield "class: %s.%s" % (type(loader).__module__, type(loader).__name__)
for key, value in sorted(loader.__dict__.items()):
if key.startswith("_"):
continue
if isinstance(value, (tuple, list)):
if not all(isinstance(x, (str, text_type)) for x in value):
continue
yield "%s:" % key
for item in value:
yield " - %s" % item
continue
elif not isinstance(value, (str, text_type, int, float, bool)):
continue
yield "%s: %r" % (key, value)
def explain_template_loading_attempts(app, template, attempts):
"""This should help developers understand what failed"""
info = ['Locating template "%s":' % template]
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name, srcobj.import_name)
else:
src_info = repr(srcobj)
info.append("% 5d: trying loader of %s" % (idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(" %s" % line)
if triple is None:
detail = "no match"
else:
detail = "found (%r)" % (triple[1] or "<string>")
total_found += 1
info.append(" -> %s" % detail)
seems_fishy = False
if total_found == 0:
info.append("Error: the template could not be found.")
seems_fishy = True
elif total_found > 1:
info.append("Warning: multiple loaders returned a match for the template.")
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(
" The template was looked up from an endpoint that "
'belongs to the blueprint "%s".' % blueprint
)
info.append(" Maybe you did not place a template in the right folder?")
info.append(" See http://flask.pocoo.org/docs/blueprints/#templates")
app.logger.info("\n".join(info))
def explain_ignored_app_run():
if os.environ.get("WERKZEUG_RUN_MAIN") != "true":
warn(
Warning(
"Silently ignoring app.run() because the "
"application is run from the flask command line "
"executable. Consider putting app.run() behind an "
'if __name__ == "__main__" guard to silence this '
"warning."
),
stacklevel=3,
)
|
the-stack_106_15141
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseRedirect
from .forms import FaceIdentifyForm
from .models import FacePhoto
from django.conf import settings
from pprint import pprint
import requests
# Create your views here.
def face_identify(request):
url = 'https://api.findface.pro/v1/identify'
header = {'Authorization': 'TOK: m3MLGHi8SgbJOFQkC3-h-S2mpoajCRtO'}
form = FaceIdentifyForm(request.POST or None, request.FILES)
if form.is_valid():
username_share = form['user_name'].value()
# photo_share = request.FILES['photo']
if form['threshold'].value() == '':
threshold = 0
else:
threshold = form['threshold'].value()
if form['res_n'].value() == '':
res_n = 1
else:
res_n = form['res_n'].value()
data = {
'mf_selector': 'all',
'threshold': threshold,
'n': res_n,
}
instance = form.save()
id = instance.pk
print('\n\n')
print(instance.pk)
print(id)
print('\n\n')
print(instance.photo.path)
# path = settings.BASE_DIR + 'media_cdn/None/' + instance.photo.filename()
path = instance.photo.path
print('\n\n')
print(path)
files = {
'photo': open(path, 'rb')
}
request.session['username_share'] = username_share
request.session['instance_pk'] = instance.pk
# image = photo_share
r = requests.post(url, headers=header, data=data, files=files)
f = r.json()
pprint(f)
d = {}
for key, value in f['results'].items(): # faces can be multiple
print(key)
d[key] = {}
face_num = 0
for info in value:
# pprint(info)
face_num += 1
face_name = 'face_ex_' + str(face_num)
d[key].update({face_name: {}})
print('\n')
pprint(info['confidence'])
d[key][face_name].update({'confidence': info['confidence']})
print(info['face']['meta'])
ex_info = eval(info['face']['meta']) # фамилия имя отчество
print(ex_info['name'])
d[key][face_name].update({'name': ex_info['name']})
print(info['face']['normalized']) # face shortcut
d[key][face_name].update({'normalized_url': info['face']['normalized']})
print(info['face']['thumbnail']) # person photo
d[key][face_name].update({'thumbnail_url': info['face']['thumbnail']})
print('\n\n')
pprint(d)
request.session['faces_data'] = d
# return redirect("face_identify:fi_result", id=id)
# return redirect("face_identify_result", id=id)
return HttpResponseRedirect("result")
# face_identify_result(request, id=id)
context = {
"form": form,
}
return render(request, "posts_form.html", context)
def face_identify_result(request):
id = username_share = request.session.get('instance_pk')
instance = get_object_or_404(FacePhoto, id=id)
username_share = request.session.get('username_share')
faces_data = request.session.get('faces_data')
photo_share = instance.photo
context = {
"instance": instance,
"photo": photo_share,
"username": username_share,
"faces_data": faces_data,
}
return render(request, "result.html", context)
|
the-stack_106_15146
|
import os
from xml.dom import minidom
import libvirt
from . import tools
from .constant import Constant
from .log import Log
class Box():
# pylint: disable=no-member
def __init__(self, settings):
self.libvirt_use_ssh = settings.libvirt_use_ssh
self.libvirt_private_key_file = settings.libvirt_private_key_file
self.libvirt_user = settings.libvirt_user
self.libvirt_host = settings.libvirt_host
self.libvirt_storage_pool = (
settings.libvirt_storage_pool if settings.libvirt_storage_pool else 'default'
)
self.libvirt_conn = None
self.libvirt_uri = None
self.pool = None
self._populate_box_list()
def _build_libvirt_uri(self):
uri = None
if self.libvirt_use_ssh:
uri = 'qemu+ssh://'
if self.libvirt_user:
uri += "{}@".format(self.libvirt_user)
assert self.libvirt_host, "Cannot use qemu+ssh without a host"
uri += "{}/system".format(self.libvirt_host)
if self.libvirt_private_key_file:
if '/' not in self.libvirt_private_key_file:
self.libvirt_private_key_file = os.path.join(
os.path.expanduser('~'),
'.ssh',
self.libvirt_private_key_file
)
uri += '?keyfile={}'.format(self.libvirt_private_key_file)
else:
uri = 'qemu:///system'
self.libvirt_uri = uri
def _populate_box_list(self):
self.all_possible_boxes = Constant.OS_BOX_MAPPING.keys()
self.boxes = []
cmd = ["vagrant", "box", "list"]
output = tools.run_sync(cmd)
lines = output.split('\n')
for line in lines:
if 'libvirt' in line:
box_name = line.split()[0]
if box_name in self.all_possible_boxes:
self.boxes.append(box_name)
def exists(self, box_name):
return box_name in self.boxes
def get_image_by_box(self, box_name):
#
# open connection to libvirt server
self.open_libvirt_connection()
#
# verify that the corresponding image exists in libvirt storage pool
self.pool = self.libvirt_conn.storagePoolLookupByName(self.libvirt_storage_pool)
removal_candidates = []
for removal_candidate in self.pool.listVolumes():
if str(removal_candidate).startswith('{}_vagrant_box_image'.format(box_name)):
removal_candidates.append(removal_candidate)
if len(removal_candidates) == 0:
return None
if len(removal_candidates) == 1:
return removal_candidates[0]
#
# bad news - multiple images match the box name
print("Images matching Vagrant Box ->{}<-".format(box_name))
print("===================================================")
for candidate in removal_candidates:
print(candidate)
print()
assert False, \
(
"Too many matching images. Don't know which one to remove. "
"This should not happen - please raise a bug!"
)
return None
def get_images_by_deployment(self, dep_id):
self.open_libvirt_connection()
self.pool = self.libvirt_conn.storagePoolLookupByName(self.libvirt_storage_pool)
matching_images = []
for removal_candidate in self.pool.listVolumes():
if str(matching_images).startswith(dep_id):
matching_images.append(removal_candidate)
return matching_images
def get_networks_by_deployment(self, dep_id):
self.open_libvirt_connection()
domains = [x for x in self.libvirt_conn.listAllDomains() if
x.name().startswith(dep_id)]
Log.debug("libvirt matching domains: {}".format(domains))
networks = set()
for domain in domains:
xml = minidom.parseString(domain.XMLDesc())
sources_lst = xml.getElementsByTagName("source")
ifaces = [source for source in sources_lst if
source.parentNode.nodeName == "interface" and
source.parentNode.hasAttribute("type") and
source.parentNode.getAttribute("type") == "network"]
Log.debug("libvirt domain's interfaces: {}".format(ifaces))
for iface in ifaces:
name = iface.getAttribute("network")
if name == "vagrant-libvirt":
continue
networks.add(name)
return list(networks)
def list(self):
for box in self.boxes:
print(box)
def open_libvirt_connection(self):
if self.libvirt_conn:
return None
self._build_libvirt_uri()
# print("Opening libvirt connection to ->{}<-".format(self.libvirt_uri))
self.libvirt_conn = libvirt.open(self.libvirt_uri)
return None
def remove_image(self, image_name):
image = self.pool.storageVolLookupByName(image_name)
image.delete()
@staticmethod
def remove_box(box_name):
tools.run_sync(["vagrant", "box", "remove", box_name])
def destroy_network(self, name):
self.open_libvirt_connection()
try:
network = self.libvirt_conn.networkLookupByName(name)
except libvirt.libvirtError:
Log.warning("Unable to find network '{}' for removal".format(name))
return False
try:
network.destroy()
except libvirt.libvirtError:
Log.error("Something went wrong destroying network '{}'".format(name))
return False
return True
|
the-stack_106_15148
|
# Discord Cryptocurrency bot.
# The bot scans for a period then searchs for the cryptocurrency symbol given by the user.
# Example: .btc is a valid input.
from requests import Request, Session
import discord
import json
import os
# Hidden keys
my_secret = os.environ['Token']
my_secret2 = os.environ['API_KEY']
client = discord.Client()
# url for Coinmarketcap.com lastest crypto updates.
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
@client.event
async def on_ready():
# Verifies that the bot is on.
print('The Bot is on!'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
# The bot is scanning for the period symbol, before any cryptocurrency symbol is given.
if message.content.startswith('.'):
name = message.content
# The first character is removed, (the period).
name = name[1:]
parameters = {
'symbol' : name,
'convert' : 'USD'
}
headers = {
'Accepts' : 'application/json',
'X-CMC_PRO_API_KEY' : my_secret2 # API key
}
session = Session()
session.headers.update(headers)
response = session.get(url, params=parameters)
# This If-statement will check if the cryptocurrency symbol is listed on Coinmarketcap.com,
# and if not it will return the error message to the discord server.
if response:
# Using the try command to test the block of code for anymore errors.
try:
# Sets the price of the cryptocurrency to the variable.
price = (json.loads(response.text)['data'][name.upper()]['quote']['USD']['price'])
except KeyError:
# Error message for a Cryptocurrency listed on Coinmarketcap.com, but doesn't have any data listed for it.
await message.channel.send("Data for " + name.upper() + " isn't available on Coinmarketcap.com")
return
# Rounds to 2 decimals places if the price is greater than or equal to 0.1
if (price >= 0.1):
price = str(round(price, 2))
# Prints the price of the Cryptocurrency.
await message.channel.send("Price of " + name.upper() + " is:" + " ${0}".format(price))
else:
# Error message for a Cryptocurrency not listed on Coinmarketcap.com
await message.channel.send("This isn't a Cryptocurrency!\nOr it's not listed on Coinmarketcap.com")
return
client.run(my_secret) # Discord Token
|
the-stack_106_15149
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from airflow.exceptions import AirflowException
from tests.providers.google.cloud.operators.test_cloud_sql_system_helper import CloudSqlQueryTestHelper
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_CLOUDSQL_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'project-id')
SQL_QUERY_TEST_HELPER = CloudSqlQueryTestHelper()
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_CLOUDSQL_KEY)
class CloudSqlExampleDagsIntegrationTest(GoogleSystemTest):
@provide_gcp_context(GCP_CLOUDSQL_KEY)
def tearDown(self):
# Delete instances just in case the test failed and did not cleanup after itself
with self.authentication():
SQL_QUERY_TEST_HELPER.delete_instances(instance_suffix="-failover-replica")
SQL_QUERY_TEST_HELPER.delete_instances(instance_suffix="-read-replica")
SQL_QUERY_TEST_HELPER.delete_instances()
SQL_QUERY_TEST_HELPER.delete_instances(instance_suffix="2")
SQL_QUERY_TEST_HELPER.delete_service_account_acls()
super().tearDown()
@provide_gcp_context(GCP_CLOUDSQL_KEY)
def test_run_example_dag_cloudsql(self):
try:
self.run_dag('example_gcp_sql', CLOUD_DAG_FOLDER)
except AirflowException as e:
self.log.warning(
"In case you see 'The instance or operation is not in an appropriate "
"state to handle the request' error - you "
"can remove '.random' file from airflow folder and re-run "
"the test. This will generate random name of the database for next run "
"(the problem is that Cloud SQL keeps names of deleted instances in "
"short-term cache).")
raise e
|
the-stack_106_15150
|
from django.core.management.base import BaseCommand
import sys
class Command(BaseCommand):
help = 'Import imputation data. 2 arguments - a csv pointing pourpoint ids at nearest neighbor ids, and a csv for looking up ppt/scenario by delta FC'
def add_arguments(self, parser):
parser.add_argument('ppt_imputation', type=str)
parser.add_argument('nn_lookup', type=str)
def handle(self, *args, **options):
import sys
import csv
from ucsrb.models import PourPoint, ScenarioNNLookup
# Check out Input
try:
ppt_impute_in = options['ppt_imputation']
except IndexError:
self.stdout.write('--- ERROR: You must provide the csv pointing pourpoint ids at nearest neighbor ids! ---')
sys.exit()
try:
nn_lookup_in = options['nn_lookup']
except IndexError:
# self.stdout.write('--- ERROR: You must provide the csv for looking up ppt/scenario by delta FC! ---')
# sys.exit()
nn_lookup_in = False
self.stdout.write('--- NO NN_LOOKUP_GIVEN: Creating new lookups will be skipped ---')
with open(ppt_impute_in) as csvfile:
csvReader = csv.DictReader(csvfile)
for row in csvReader:
try:
ppt = PourPoint.objects.get(id=int(row['ppt_ID']))
ppt.imputed_ppt = PourPoint.objects.get(id=int(row['imputedPpt']))
ppt.streammap_id = int(row['strmMap_id'])
if 'conf' in row.keys():
ppt.confidence = int(row['conf'])
if row['wshed_name'] == 'Upper Methow': #Methow
ppt.watershed_id = 'met'
elif row['wshed_name'] == 'Upper Entiat': #Entiat
ppt.watershed_id = 'ent'
elif row['wshed_name'] == 'Chiwawa': #Wenatchee
ppt.watershed_id = 'wen'
ppt.save()
except:
# There's some junk data in the CSV. Just skip it when the ppt doesn't exist.
pass
if nn_lookup_in:
with open(nn_lookup_in) as csvfile:
csvReader = csv.DictReader(csvfile)
for row in csvReader:
record_dict = {
'ppt_id': int(row['ppt_ID']),
'scenario_id': int(row['scen']),
'treatment_target': int(row['treatment']),
'fc_delta': float(row['delta']),
}
record, created = ScenarioNNLookup.objects.get_or_create(**record_dict)
record.save()
|
the-stack_106_15151
|
#!/usr/bin/env python
# Import stuff for compatibility between python 2 and 3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from future import standard_library
import pyspextools.messages as message
import numpy as np
import astropy.io.fits as fits
standard_library.install_aliases()
class Arf:
"""Class to read OGIP ARF files. The variable naming is made consistent with the HEASOFT HEASP module by
Keith Arnaud."""
def __init__(self):
self.LowEnergy = np.array([],dtype=float) #: Low Energy of bin
self.HighEnergy = np.array([],dtype=float) #: High Energy of bin
self.EffArea = np.array([],dtype=float) #: Effective Area of bin
self.EnergyUnits = 'keV' #: Energy units
self.ARFUnits = 'cm2'
self.Order = 0 #: Grating order (for grating arrays, else 0)
self.Grating = 0 #: Grating instrument (if available, 1 = HEG, 2 = MEG, 3 = LEG)
def read(self,arffile):
"""Read the effective area from an OGIP ARF file."""
(data, header) = fits.getdata(arffile,'SPECRESP',header=True)
self.LowEnergy = data['ENERG_LO']
self.HighEnergy = data['ENERG_HI']
self.EffArea = data['SPECRESP']
self.EnergyUnits = header['TUNIT1']
if header['TUNIT3'] == 'cm**2':
self.ARFUnits = 'cm2'
elif header['TUNIT3'] == 'cm2':
self.ARFUnits = 'cm2'
else:
message.warning("ARF units are not recognized.")
try:
self.Order = header['TG_M']
self.Grating = header['TG_PART']
except:
self.Order = 0
self.Grating = 0
# Check for NULL values
nans = np.isnan(self.EffArea)
if np.any(nans):
for i in np.arange(self.EffArea.size):
if nans[i]:
self.EffArea[i] = 0.0
return 0
def write(self, arffile, telescop=None, instrume=None, filter=None, overwrite=False):
'''Write an OGIP compatible ARF file (Non-grating format).'''
# Write the ARF arrays into FITS column format
col1 = fits.Column(name='ENERG_LO', format='D', unit=self.EnergyUnits, array=self.LowEnergy)
col2 = fits.Column(name='ENERG_HI', format='D', unit=self.EnergyUnits, array=self.HighEnergy)
col3 = fits.Column(name='SPECRESP', format='D', unit=self.ARFUnits, array=self.EffArea)
hdu = fits.BinTableHDU.from_columns([col1, col2, col3])
hdr = hdu.header
hdr.set('EXTNAME','SPECRESP')
# Set the TELESCOP keyword (optional)
if telescop == None:
hdr.set('TELESCOP','None','Telescope name')
else:
hdr.set('TELESCOP',telescop,'Telescope name')
# Set the INSTRUME keyword (optional)
if instrume == None:
hdr.set('INSTRUME','None','Instrument name')
else:
hdr.set('INSTRUME',instrume,'Instrument name')
# Set the FILTER keyword (optional)
if filter == None:
hdr.set('FILTER','None','Filter setting')
else:
hdr.set('FILTER',filter,'Filter setting')
hdr.set('DETNAM','None')
hdr.set('HDUCLASS','OGIP')
hdr.set('HDUCLAS1','RESPONSE')
hdr.set('HDUCLAS2','SPECRESP')
hdr.set('HDUVERS','1.1.0')
hdr.set('ORIGIN','SRON')
hdu.header['HISTORY'] = 'Created by pyspextools:'
hdu.header['HISTORY'] = 'https://github.com/spex-xray/pyspextools'
try:
hdu.writeto(arffile, overwrite=overwrite)
except IOError:
message.error("File {0} already exists. I will not overwrite it!".format(arffile))
return 1
return 0
def check(self):
"""Check if the basic information is read in."""
if self.LowEnergy.size <= 0:
message.error("Energy array has zero length.")
return 1
if self.EffArea.size <= 0:
message.error("Effective area array has zero length.")
return 1
return 0
def disp(self):
"""Display a summary of the ARF object."""
print("ARF effective area:")
print("LowEnergy array: {0} Low Energy of bin".format(self.LowEnergy.size))
print("HighEnergy array: {0} High Energy of bin".format(self.HighEnergy.size))
print("EffArea array: {0} Effective Area of bin".format(self.EffArea.size))
print("Energy units: {0} Energy units".format(self.EnergyUnits))
print("Area units: {0} Area units".format(self.ARFUnits))
return
|
the-stack_106_15152
|
import functools
import pytest
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.shortcuts import reverse
from byro.common.models.configuration import Configuration
@pytest.fixture
def configuration():
config = Configuration.get_solo()
config.name = "Der Verein e.V."
config.backoffice_mail = "[email protected]"
config.mail_from = "[email protected]"
config.save()
return config
@pytest.fixture
def full_testdata(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
call_command("make_testdata")
@pytest.fixture
def user():
user = get_user_model().objects.create(
username="charlotte", last_name="Charlotte Holmes", is_staff=True
)
user.set_password("test_password")
user.save()
yield user
user.delete()
@pytest.fixture
def client(live_server, selenium, user, configuration):
selenium.implicitly_wait(10)
def go_to(url, *args, **kwargs):
return selenium.get(live_server.url + reverse(url, *args, **kwargs))
selenium.go_to = go_to
return selenium
@pytest.fixture
def logged_in_client(live_server, client, user):
client.go_to("common:login")
client.implicitly_wait(10)
client.find_element_by_css_selector("form input[name=username]").send_keys(
user.username
)
client.find_element_by_css_selector("form input[name=password]").send_keys(
"test_password"
)
client.find_element_by_css_selector("form button[type=submit]").click()
return client
@pytest.fixture
def chrome_options(chrome_options):
chrome_options.add_argument("headless")
chrome_options.add_argument("window-size=1488x837")
return chrome_options
@pytest.fixture(autouse=True)
def hide_data():
import byro.common.utils
byro.common.utils.get_version = lambda: None
import byro.common.context_processors
orig = byro.common.context_processors.byro_information
@functools.wraps(orig)
def byro_information(request):
ctx = orig(request)
ctx["log_end"] = {
"auth_hash": "blake2b:428c6368597439c4fd935d9941c3554e29ad6c675a4aa20163dbb79a242bf8f6c6e76a5f7ba484f048cc916d1072bc2f5eea754cfb6f994e8b2a03f0c02cfa30"
}
return ctx
byro.common.context_processors.byro_information = byro_information
|
the-stack_106_15155
|
"""Incompressible Navier-Stokes integrator on periodic two-dimensional domain.
Based on methods proposed in
> Stam, Jos. A simple fluid solver based on the FFT.
> Journal of graphics tools 6.2 (2001): 43-52.
>
> Kim, ByungMoon, Yingjie Liu, Ignacio Llamas, and Jarek Rossignac.
> FlowFixer: using BFECC for fluid simulation.
> Proceedings of the First Eurographics conference on Natural Phenomena.
> Eurographics Association, 2005.
"""
from typing import Tuple
import numpy as np
try:
import pyfftw.interfaces.numpy_fft as fft
PYFFTW_AVAILABLE = True
except ImportError:
import numpy.fft as fft
PYFFTW_AVAILABLE = False
from dapy.integrators.interpolate import batch_bilinear_interpolate
class FourierNavierStokesIntegrator:
"""Incompressible Navier-Stokes fluid simulation on 2D periodic grid.
Simulates evolution of an incompressible fluid velocity field on a 2-torus
using a finite difference based implementation of the incompressible
Navier-Stokes equations in two-dimensions. To enforce the
incompressibility condition the velocity field is parameterised as a
scalar vorticity field corresponding to the curl of the velocity field. A
semi-Lagrangian method is used for the advection updates and a FFT-based
method used to implement the viscous diffusion.
A second-order accurate BFECC (back and forward error correction and
compensation) method is used to simulate the advection steps [1] which
decreases numerical dissipation of voriticity.
References:
1. Kim, ByungMoon, Yingjie Liu, Ignacio Llamas, and Jarek Rossignac.
FlowFixer: using BFECC for fluid simulation. Proceedings of the First
Eurographics conference on Natural Phenomena. Eurographics
Association, 2005.
"""
def __init__(
self,
mesh_shape: Tuple[int, int],
domain_size: Tuple[float, float] = (2.0, 2.0),
time_step: float = 0.05,
viscous_diffusion_coeff: float = 1e-4,
max_num_thread: int = 1,
):
"""
Incompressible Navier-Stokes fluid simulation on 2D periodic grid.
Args:
mesh_shape: Mesh dimensions as a 2-tuple `(dim_0, dim_1)`.
domain_size: Spatial domain size a 2-tuple `(size_0, size_1)`.
time_step: Integrator time-step.
viscous_diffusion_coeff: Velocity viscous diffusion coefficient.
max_num_thread: Maximum number of threads to use for FFT and
interpolation operations.
"""
self.mesh_shape = mesh_shape
self.domain_size = domain_size
self.dim_state = mesh_shape[0] * mesh_shape[1]
self.viscous_diffusion_coeff = viscous_diffusion_coeff
self.time_step = time_step
self.max_num_thread = max_num_thread
# Calculate spatial size of each cell in mesh.
self.mesh_cell_size = np.array(
[
self.domain_size[0] / self.mesh_shape[0],
self.domain_size[1] / self.mesh_shape[1],
]
)
# Coordinate indices of mesh cell corners.
self.mesh_corner_indices = np.array(
np.meshgrid(
np.arange(self.mesh_shape[0]),
np.arange(self.mesh_shape[1]),
indexing="ij",
)
)
# Spatial angular frequency values for rfft2 frequency grid layout i.e.
# FFT along axis 0 and RFFT along axis 1
# Always use numpy.fft module here as pyfftw interface does not provide
# fftfreq functions
freq_grid_0 = np.fft.fftfreq(mesh_shape[0], self.mesh_cell_size[0]) * 2 * np.pi
freq_grid_1 = np.fft.rfftfreq(mesh_shape[1], self.mesh_cell_size[1]) * 2 * np.pi
# Squared wavenumbers
self.wavnums_sq = freq_grid_0[:, None] ** 2 + freq_grid_1[None, :] ** 2
# Kernel in frequency space to simulate viscous diffusion term.
# Corresponds to solving diffusion equation in 2D exactly in time with
# spectral method to approximate second-order spatial derivatives.
self.viscous_diffusion_kernel = np.exp(
-viscous_diffusion_coeff * time_step * self.wavnums_sq
)
# For first derivative expressions zero Nyquist frequency for even
# number of grid points:
# > Notes on FFT-based differentiation.
# > Steven G. Johnson, MIT Applied Mathematics.
# > http://math.mit.edu/~stevenj/fft-deriv.pdf
grad_0_kernel = freq_grid_0 * 1j
grad_1_kernel = freq_grid_1 * 1j
if mesh_shape[0] % 2 == 0:
grad_0_kernel[mesh_shape[0] // 2] = 0
if mesh_shape[1] % 2 == 0:
grad_1_kernel[mesh_shape[1] // 2] = 0
# Clip zero wave number square values to small positive constant to
# avoid divide by zero warnings.
wavnums_sq_clip = np.maximum(self.wavnums_sq, 1e-8)
# Coefficients of vector field frequency components to solve Poisson's
# equation to project to divergence-free field.
self.fft_vel_coeff_0 = grad_1_kernel[None, :] / wavnums_sq_clip
self.fft_vel_coeff_1 = -grad_0_kernel[:, None] / wavnums_sq_clip
def rfft2(self, field):
"""Convenience wrapper for real-valued 2D FFT."""
if PYFFTW_AVAILABLE:
num_thread = min(field.shape[0], self.max_num_thread)
return fft.rfft2(field, norm="ortho", threads=num_thread)
else:
return fft.rfft2(field, norm="ortho")
def irfft2(self, field):
"""Convenience wrapper for inverse real-valued 2D FFT."""
if PYFFTW_AVAILABLE:
num_thread = min(field.shape[0], self.max_num_thread)
return fft.irfft2(field, norm="ortho", threads=num_thread)
else:
return fft.irfft2(field, norm="ortho")
def velocity_from_fft_vorticity(self, fft_vorticity):
"""Compute velocity vector field from FFT of vorticity scalar field."""
# Solve for velocity field in terms of vorticity in frequency space.
fft_velocity = np.stack(
[
self.fft_vel_coeff_0 * fft_vorticity,
self.fft_vel_coeff_1 * fft_vorticity,
],
axis=-3,
)
# Perform inverse 2D real-valued FFT to map back to spatial fields.
return self.irfft2(fft_velocity)
def semi_lagrangian_advect(self, field, velocity):
"""Use semi-Lagrangian method to advect a given field a single step."""
# Set number of threads to parallelise interpolation across
# conservatively such that never more than number of independent fields
num_thread = min(field.shape[0], self.max_num_thread)
# Estimate mesh coordinates of particles which end up at mesh corner
# points when following current velocity field.
particle_centers = self.mesh_corner_indices[None] - (
velocity * self.time_step / self.mesh_cell_size[None, :, None, None]
)
# Calculate advected field values by bilinearly interpolating field
# values at back traced particle locations.
return batch_bilinear_interpolate(field, particle_centers, num_thread)
def bfecc_advect(self, field, velocity):
"""Use BFECC method to advect a given field a single step."""
# Initial forwards step
field_1 = self.semi_lagrangian_advect(field, velocity)
# Backwards step from field_1
field_2 = self.semi_lagrangian_advect(field_1, -velocity)
# Compute error corrected original field
field_3 = (3 * field - field_2) / 2.0
# Final forwards step
return self.semi_lagrangian_advect(field_3, velocity)
def step(self, fft_vorticity):
"""Perform single time step update of FFT of vorticity field."""
# Diffuse vorticity in spectral domain
fft_vorticity *= self.viscous_diffusion_kernel
# Calculate velocity and vorticity in spatial domain
velocity = self.velocity_from_fft_vorticity(fft_vorticity)
vorticity = self.irfft2(fft_vorticity)
# Advect vorticity
vorticity = self.bfecc_advect(vorticity, velocity)
return self.rfft2(vorticity)
|
the-stack_106_15156
|
from tkinter import *
from pygame import mixer
root=Tk()
mixer.init()
def play():
mixer.music.load('alan-walker-julie-bergan-seungri-Y4MZ59Tj.mp3')
mixer.music.play()
def stop():
mixer.music.stop()
def volume(val):
volume=int(val)/100
mixer.music.set_volume(volume)
Button(root,text='play',command=play).pack()
Button(root,text='stop',command=stop).pack()
scale=Scale(root,from_=0,to=100,orient=HORIZONTAL,command=volume)
scale.set(50)
scale.pack()
root.mainloop()
|
the-stack_106_15157
|
import os
import re
import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read_file(filename, encoding='utf8'):
"""Read unicode from given file."""
with codecs.open(filename, encoding=encoding) as fd:
return fd.read()
# read version number from the script
here = os.path.abspath(os.path.dirname(__file__))
script_path = os.path.join(here, 'certsrv.py')
version = dict(re.findall(r"""__([a-z]+)__ = "([^"]+)""", read_file(script_path)))['version']
readme = read_file(os.path.join(here, 'README.rst'))
setup(
name='certsrv',
description='A Python client for the Microsoft AD Certificate Services web page',
long_description=readme,
author='Magnus Watn',
license='MIT',
url='https://github.com/magnuswatn/certsrv',
keywords='ad adcs certsrv pki certificate',
version=version,
py_modules=['certsrv'],
install_requires=[
'requests',
],
extras_require={
'ntlm': ['requests_ntlm'],
'gssapi': ['requests-gssapi'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Systems Administration',
],
)
|
the-stack_106_15161
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/rstcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *rstcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("rstcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_106_15162
|
from __future__ import unicode_literals
from base64 import b64decode
import datetime
import xmltodict
from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds
from moto.core import ACCOUNT_ID
from moto.sts.utils import (
random_access_key_id,
random_secret_access_key,
random_session_token,
random_assumed_role_id,
)
class Token(BaseModel):
def __init__(self, duration, name=None, policy=None):
now = datetime.datetime.utcnow()
self.expiration = now + datetime.timedelta(seconds=duration)
self.name = name
self.policy = None
@property
def expiration_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.expiration)
class AssumedRole(BaseModel):
def __init__(self, role_session_name, role_arn, policy, duration, external_id):
self.session_name = role_session_name
self.role_arn = role_arn
self.policy = policy
now = datetime.datetime.utcnow()
self.expiration = now + datetime.timedelta(seconds=duration)
self.external_id = external_id
self.access_key_id = "ASIA" + random_access_key_id()
self.secret_access_key = random_secret_access_key()
self.session_token = random_session_token()
self.assumed_role_id = "AROA" + random_assumed_role_id()
@property
def expiration_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.expiration)
@property
def user_id(self):
return self.assumed_role_id + ":" + self.session_name
@property
def arn(self):
return (
"arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format(
account_id=ACCOUNT_ID,
role_name=self.role_arn.split("/")[-1],
session_name=self.session_name,
)
)
class STSBackend(BaseBackend):
def __init__(self):
self.assumed_roles = []
def get_session_token(self, duration):
token = Token(duration=duration)
return token
def get_federation_token(self, name, duration, policy):
token = Token(duration=duration, name=name, policy=policy)
return token
def assume_role(self, **kwargs):
role = AssumedRole(**kwargs)
self.assumed_roles.append(role)
return role
def get_assumed_role_from_access_key(self, access_key_id):
for assumed_role in self.assumed_roles:
if assumed_role.access_key_id == access_key_id:
return assumed_role
return None
def assume_role_with_web_identity(self, **kwargs):
return self.assume_role(**kwargs)
def assume_role_with_saml(self, **kwargs):
del kwargs["principal_arn"]
saml_assertion_encoded = kwargs.pop("saml_assertion")
saml_assertion_decoded = b64decode(saml_assertion_encoded)
saml_assertion = xmltodict.parse(saml_assertion_decoded.decode("utf-8"))
kwargs["duration"] = int(
saml_assertion["samlp:Response"]["Assertion"]["AttributeStatement"][
"Attribute"
][2]["AttributeValue"]
)
kwargs["role_session_name"] = saml_assertion["samlp:Response"]["Assertion"][
"AttributeStatement"
]["Attribute"][0]["AttributeValue"]
kwargs["external_id"] = None
kwargs["policy"] = None
role = AssumedRole(**kwargs)
self.assumed_roles.append(role)
return role
sts_backend = STSBackend()
|
the-stack_106_15163
|
import numpy as np
from bokeh.io import output_file
from bokeh.plotting import figure, show
from bokeh.models import ColumnDataSource, PrintfTickFormatter, HoverTool
from bokeh.layouts import gridplot
def generate_fig(results, title, series, x_axis_label, y_axis_label):
results_cds = ColumnDataSource(results)
fig = figure(
title=title,
plot_height=350,
plot_width=700,
x_range=(results["x"].min(), results["x"].max()),
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
)
fig.yaxis[0].formatter = PrintfTickFormatter(format="%4.1e")
for s in series:
x, y, legend, color, line_dash = s
fig.line(
x, y, legend=legend, color=color, line_dash=line_dash, source=results_cds
)
return fig
# def generate_buckle_fig(results):
# buckle_fig = figure(
# title="Buckling Force",
# plot_height=300,
# plot_width=700,
# x_range=(60, 140),
# y_axis_type="log",
# x_axis_label="Minimum Buckle Length [m]",
# y_axis_label="Buckling Force [N]",
# )
# buckle_fig.yaxis[0].formatter = PrintfTickFormatter(format="%4.1e")
# x = np.linspace(60, 140, 100)
# colors = ("red", "blue", "purple", "pink")
# for mode in np.arange(1, 5):
# P_bucs = np.array([buckle_force(L, mode) for L in x])
# color = colors[mode - 1]
# buckle_fig.line(x=x, y=P_bucs, legend=f"Mode {mode}", color=color)
# # buckle_fig.line(x=x, y=-P_max, legend="P_max", color="black", line_dash="dashed")
# buckle_fig.legend.location = "top_left"
# return buckle_fig
def generate_plots(results):
temp_fig = generate_fig(
results,
"Temperature Profile",
[
("x", "T", "Internal Temperature", "blue", []),
("x", "delta_T", "Temperature Difference", "red", []),
],
"KP [m]",
"Temperature [degC]",
)
force_fig = generate_fig(
results,
"Fully Restrained Effective Axial Force",
[("x", "F_eff", None, "blue", [])],
"KP [m]",
"Axial Force [N]",
)
friction_fig = generate_fig(
results,
"Friction Force",
[("x", "F_f", None, "blue", [])],
"KP [m]",
"Friction Force [N]",
)
resultant_fig = generate_fig(
results,
"Resultant Effective Axial Force",
[("x", "F_res", None, "blue", [])],
"KP [m]",
"Axial Force [N]",
)
all_fig = generate_fig(
results,
"Pipeline Axial Force",
[
("x", "F_actual", "Resultant", "blue", []),
("x", "F_res", "EAF", "grey", "dashed"),
("x", "F_b", "BIF", "red", "dashed"),
],
"KP [m]",
"Axial Force [N]",
)
return gridplot(
[[temp_fig], [force_fig], [friction_fig], [resultant_fig], [all_fig]]
)
|
the-stack_106_15164
|
import pandas as pd
import numpy as np
from keras.utils import to_categorical
from keras.layers import Dense,Conv2D, BatchNormalization,Dropout, Input,normalization,Activation,add,MaxPooling2D, Flatten,GaussianNoise
from keras import backend as K
from keras.optimizers import Adam, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, load_model
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from sklearn.model_selection import train_test_split
import os
from hyperas import optim
from hyperopt import Trials, STATUS_OK, tpe
from hyperas.distributions import choice, uniform
import matplotlib.pyplot as plt
def data():
train = pd.read_csv(r'L:\Users\Ang\Documents\data\kaggle\digital\data\train.csv')
label = train['label']
data = train.drop(labels=['label'],axis=1)
label = label.to_numpy()
label = to_categorical(label,num_classes = 10)
data = data.to_numpy()
data=data.reshape(-1,28,28)
data=data.reshape(-1,28,28,1)
data = data/255.0
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
X_train, X_val, Y_train, Y_val = train_test_split(data, label, test_size = 0.1)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_val, Y_val
def test_data():
test = pd.read_csv(r'L:\Users\Ang\Documents\data\kaggle\digital\data\test.csv')
test_data= test.to_numpy()
test_data=test_data.reshape(-1,28,28)
test_data=test_data.reshape(-1,28,28,1)
test_data = test_data/255.0
return test_data
def model(datagen,X_train,Y_train,X_val,Y_val):
num_layers1 = {{choice([48, 64, 96])}}
num_layers2 = {{choice([96, 128, 192])}}
num_layers3 = {{choice([192, 256, 512])}}
lrate = {{choice([0.0001, 0.0004,0.0008])}}
epochs = 60
batch_size = 64
inputs = Input((28,28,1))
nois=GaussianNoise(0.2)(inputs)
conv1 = Conv2D(num_layers1, (3, 3), activation=None, padding='same')(nois)
conv1 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv1)
conv1 = Activation('relu')(conv1)
conv2 = Conv2D(num_layers1, (3, 3), activation=None, padding='same')(conv1)
conv2 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv2)
conv2 = Activation('relu')(conv2)
conv3 = Conv2D(num_layers1, (3, 3), activation=None, padding='same')(conv2)
conv3 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv3)
conv3 = Activation('relu')(conv3)
conv3= MaxPooling2D(pool_size=(2, 2))(conv3)
conv3= Dropout({{uniform(0,0.5)}})(conv3)
conv4 = Conv2D(num_layers2, (3, 3), activation=None, padding='same')(conv3)
conv4 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv4)
conv4 = Activation('relu')(conv4)
conv5 = Conv2D(num_layers2, (3, 3), activation=None, padding='same')(conv4)
conv5 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv5)
conv5 = Activation('relu')(conv5)
conv6 = Conv2D(num_layers2, (3, 3), activation=None, padding='same')(conv5)
conv6 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv6)
conv6 = Activation('relu')(conv6)
conv6= MaxPooling2D(pool_size=(2, 2))(conv6)
conv6= Dropout({{uniform(0,0.5)}})(conv6)
conv7 = Conv2D(num_layers3, (3, 3), activation=None, padding='same')(conv6)
conv7 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv7)
conv7 = Activation('relu')(conv7)
conv8 = Conv2D(num_layers3, (3, 3), activation=None, padding='same')(conv7)
conv8 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv8)
conv8 = Activation('relu')(conv8)
conv9 = Conv2D(num_layers3, (3, 3), activation=None, padding='same')(conv8)
conv9 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(conv9)
conv9 = Activation('relu')(conv9)
conv9= MaxPooling2D(pool_size=(2, 2))(conv9)
conv9= Dropout({{uniform(0,0.5)}})(conv9)
conv9=Flatten()(conv9)
dout1= Dense(256,activation = 'relu')(conv9)
dout1 = normalization.BatchNormalization(epsilon=2e-05, axis=-1, momentum=0.9, weights=None,
beta_initializer='zero', gamma_initializer='one')(dout1)
dout1= Dropout({{uniform(0,0.5)}})(dout1)
dout2 =Dense(10,activation = 'softmax')(dout1)
model = Model(inputs=inputs, outputs=dout2)
optimizer=Adam(lr=lrate, beta_1=0.9, beta_2=0.95, epsilon=None, decay=0.0, amsgrad=False)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
save_path=os.getcwd()
checkpointer = []
#checkpointer.append(ModelCheckpoint(filepath=os.path.join(save_path,'best_model.hdf5'), verbose=1, save_best_only=True))
checkpointer.append(ReduceLROnPlateau(monitor='val_acc', patience=8, verbose=1, factor=0.5, min_lr=0.00001))
checkpointer.append(EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None, restore_best_weights=True))
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val),
verbose = 32, steps_per_epoch=X_train.shape[0] // batch_size
,callbacks=checkpointer)
score, acc = model.evaluate(X_val, Y_val, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def main():
datagen, X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=30,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
tdata=test_data()
results = best_model.predict(tdata)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name='label')
submission = pd.concat([pd.Series(range(1,28001),name = 'ImageId'),results],axis = 1)
submission.to_csv('cnn_mnist_submission.csv',index=False)
if __name__ == '__main__':
main()
|
the-stack_106_15166
|
import syft as sy
from syft.workers.abstract import AbstractWorker
import weakref
from syft.generic.tensor import AbstractTensor
from syft.generic.tensor import initialize_tensor
from syft.messaging.promise import Promise
from syft.generic.frameworks.hook import hook_args
class PromiseTensor(AbstractTensor, Promise):
def __init__(
self, shape, owner=None, id=None, tensor_type=None, plans=None, tags=None, description=None
):
"""Initializes a PromiseTensor
Args:
shape: the shape that should have the tensors keeping the promise.
owner: an optional BaseWorker object to specify the worker on which
the tensor is located.
id: an optional string or integer id of the PromiseTensor.
tensor_type: the type that should have the tensors keeping the promise.
plans: the ids of the plans waiting for the promise to be kept. When the promise is
kept, all the plans corresponding to these ids will be executed if the other
promises they were waiting for are also kept.
tags: an optional set of hashtags corresponding to this tensor
which this tensor should be searchable for.
description: an optional string describing the purpose of the
tensor.
"""
if owner is None:
owner = sy.local_worker
# constructors for AbstractTensor and Promise
AbstractTensor.__init__(self, id=id, owner=owner, tags=tags, description=description)
Promise.__init__(self, owner=owner, obj_type=tensor_type, plans=plans)
self._shape = shape
del self.child
def torch_type(self):
return self.obj_type
@property
def shape(self):
return self._shape
@property
def grad(self):
return None
# if not hasattr(self, "_grad"):
# self._grad = PromiseTensor(shape=self._shape, tensor_type=self.torch_type()).wrap()
#
# return self._grad
def __str__(self):
return f"[PromiseTensor({self.owner.id}:{self.id}) -future-> {self.obj_type.split('.')[-1]} -blocking-> {len(self.plans)} plans]"
def __repr__(self):
return self.__str__()
@staticmethod
def simplify(worker: AbstractWorker, tensor: "PromiseTensor") -> tuple:
"""Takes the attributes of a PromiseTensor and saves them in a tuple.
Args:
tensor: a PromiseTensor.
Returns:
tuple: a tuple holding the unique attributes of the Promise tensor.
"""
return (
sy.serde.msgpack.serde._simplify(worker, tensor.id),
sy.serde.msgpack.serde._simplify(worker, tensor.shape),
sy.serde.msgpack.serde._simplify(worker, tensor.obj_type),
sy.serde.msgpack.serde._simplify(worker, tensor.plans),
sy.serde.msgpack.serde._simplify(worker, tensor.tags),
sy.serde.msgpack.serde._simplify(worker, tensor.description),
)
@staticmethod
def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "PromiseTensor":
"""
This function reconstructs a PromiseTensor given it's attributes in form of a tuple.
Args:
worker: the worker doing the deserialization
tensor_tuple: a tuple holding the attributes of the PromiseTensor
Returns:
PromiseTensor: a PromiseTensor
Examples:
shared_tensor = detail(data)
"""
id, shape, tensor_type, plans, tags, description = tensor_tuple
id = sy.serde.msgpack.serde._detail(worker, id)
shape = sy.serde.msgpack.serde._detail(worker, shape)
tensor_type = sy.serde.msgpack.serde._detail(worker, tensor_type)
plans = sy.serde.msgpack.serde._detail(worker, plans)
tags = sy.serde.msgpack.serde._detail(worker, tags)
description = sy.serde.msgpack.serde._detail(worker, description)
tensor = PromiseTensor(
owner=worker,
id=id,
shape=shape,
tensor_type=tensor_type,
plans=plans,
tags=tags,
description=description,
)
return tensor
### Register the tensor with hook_args.py ###
hook_args.default_register_tensor(PromiseTensor)
|
the-stack_106_15167
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class VolumeMountSpec(object):
def __init__(self, category, autoDelete=None, mountPath=None, readOnly=None, cloudDiskSpec=None, cloudDiskId=None, fsType=None, formatVolume=None):
"""
:param category: 磁盘分类 cloud: 基于云硬盘的卷 root volume只能是cloud类型
:param autoDelete: (Optional) 自动删除,删除容器时自动删除此volume,默认为True;只支持磁盘是云硬盘的场景
:param mountPath: (Optional) 容器内的挂载目录;root volume不需要指定,挂载目录是(/);data volume必须指定;必须是绝对路径,不能包含(:)
:param readOnly: (Optional) 只读,默认false;只针对data volume有效;root volume为false,也就是可读可写
:param cloudDiskSpec: (Optional) 云硬盘规格;随容器自动创建的云硬盘,不会对磁盘分区,只会格式化文件系统
:param cloudDiskId: (Optional) 云硬盘ID;如果使用已有的云硬盘,必须指定partion和fsType
:param fsType: (Optional) 指定volume文件系统类型,目前支持[xfs, ext4];如果新创建的盘,不指定文件系统类型默认格式化成xfs
:param formatVolume: (Optional) 随容器自动创建的新盘,会自动格式化成指定的文件系统类型;挂载已有的盘,默认不会格式化,只会按照指定的fsType去挂载;如果希望格式化,必须设置此字段为true
"""
self.category = category
self.autoDelete = autoDelete
self.mountPath = mountPath
self.readOnly = readOnly
self.cloudDiskSpec = cloudDiskSpec
self.cloudDiskId = cloudDiskId
self.fsType = fsType
self.formatVolume = formatVolume
|
the-stack_106_15169
|
"""Lazy imports that may apply across the xonsh package."""
import os
import importlib
from xonsh.platform import ON_WINDOWS, ON_DARWIN
from xonsh.lazyasd import LazyObject, lazyobject
pygments = LazyObject(
lambda: importlib.import_module("pygments"), globals(), "pygments"
)
pyghooks = LazyObject(
lambda: importlib.import_module("xonsh.pyghooks"), globals(), "pyghooks"
)
@lazyobject
def pty():
if ON_WINDOWS:
return
else:
return importlib.import_module("pty")
@lazyobject
def termios():
if ON_WINDOWS:
return
else:
return importlib.import_module("termios")
@lazyobject
def fcntl():
if ON_WINDOWS:
return
else:
return importlib.import_module("fcntl")
@lazyobject
def tty():
if ON_WINDOWS:
return
else:
return importlib.import_module("tty")
@lazyobject
def _winapi():
if ON_WINDOWS:
import _winapi as m
else:
m = None
return m
@lazyobject
def msvcrt():
if ON_WINDOWS:
import msvcrt as m
else:
m = None
return m
@lazyobject
def winutils():
if ON_WINDOWS:
import xonsh.winutils as m
else:
m = None
return m
@lazyobject
def macutils():
if ON_DARWIN:
import xonsh.macutils as m
else:
m = None
return m
@lazyobject
def terminal256():
return importlib.import_module("pygments.formatters.terminal256")
@lazyobject
def html():
return importlib.import_module("pygments.formatters.html")
@lazyobject
def os_listxattr():
def dummy_listxattr(*args, **kwargs):
return []
return getattr(os, "listxattr", dummy_listxattr)
|
the-stack_106_15170
|
import pickle
import os
def check_didemo(data_root, split):
pairs = []
if split == 'train':
subfolder = 'challenge-release-1'
vids = [l.rstrip('\n') for l in open(os.path.join(data_root, subfolder, 'train_list.txt'))] \
+ [l.rstrip('\n') for l in open(os.path.join(data_root, subfolder, 'val_list.txt'))]
elif split == 'val':
subfolder = 'challenge-release-1'
vids = [l.rstrip('\n') for l in open(os.path.join(data_root, subfolder, 'public_server_val.txt'))]
else:
subfolder = 'challenge-release-2'
vids = [l.rstrip('\n') for l in open(os.path.join(data_root, subfolder, 'public_server_test.txt'))]
captions = pickle.load(open(os.path.join(data_root, subfolder, 'processed-captions.pkl'), 'rb'))
sent2tree = pickle.load(open(os.path.join(data_root, subfolder, 'non_binary_tree.pkl'), 'rb'))
for vid in vids:
sentences = captions[vid]
for tokens in sentences:
if len(tokens) < 20 or split != 'train':
sent = ' '.join(tokens)
tree, span, label = sent2tree[sent]['tree'], sent2tree[sent]['span'], sent2tree[sent]['label']
pairs.append({'video_id': vid, 'sentence': sent, 'tree': tree, 'span': span, 'label': label})
pairs = sorted(pairs, key=lambda x: len(x['sentence'].split(' ')))[::-1]
return pairs
if __name__ == '__main__':
data_root = "data/YouCook2"
split = 'train'
pairs = check_didemo(data_root, split)
print(len(pairs))
|
the-stack_106_15175
|
#!/usr/bin/python3
# ‑∗‑ coding: utf‑8 ‑∗‑
import sys
import numpy as np
from collections import deque
class Monitor:
""" Initialize the monitoring class.
Args:
env: instance of OpenAI Gym's environment
agent: agent that will interact with the environment.
nb_episodes: number of episodes of agent-environment interaction
window: number of episodes to consider when calculating average rewards.
Attributes:
env: instance of OpenAI Gym's environment
agent: agent that will interact with the environment.
nb_episodes: number of episodes of agent-environment interaction
window: number of episodes to consider when calculating average rewards.
"""
def __init__(self, env, agent, nb_episodes=20000, window=100):
self.env = env
self.agent = agent
self.nb_episodes = nb_episodes
self.window = window
self.avg_rewards = deque(maxlen=nb_episodes)
self.sample_rewards = deque(maxlen=window)
self.results = {
"best_average_reward": np.NINF,
"average_rewards": [],
"epsilon": [],
}
def print_progress(self, i_episode):
""" Monitor progress.
"""
print("\rEpisode: {}/{} || Best average reward: {} || Average reward: {}"
.format(i_episode + 1, self.nb_episodes, self.results["best_average_reward"], self.results["average_rewards"][-1]), end="")
sys.stdout.flush()
def step(self, state):
action = self.agent.select_action(state)
# agent performs the selected action
next_state, reward, done, info = self.env.step(action)
# agent performs internal updates based on sampled experience
self.agent.step(state, action, reward, next_state, done)
return next_state, reward, done, info
def episode(self):
# begin the episode
state = self.env.reset()
# initialize the sampled reward
samp_reward = 0
while True:
next_state, reward, done, _ = self.step(state)
# update the sampled reward
samp_reward += reward
# update the state (s <- s') to next time step
state = next_state
if done:
# save final sampled reward
self.sample_rewards.append(samp_reward)
break
def interact(self):
""" Monitor agent's performance.
Returns
results: information of the training score and average results
"""
for i_episode in range(0, self.nb_episodes):
self.episode()
if (i_episode % self.window) == 0:
# get average reward from last 100 episodes
avg_reward = np.mean(self.sample_rewards)
self.results["average_rewards"].append(avg_reward)
# update best average reward
if avg_reward > self.results["best_average_reward"]:
self.results["best_average_reward"] = avg_reward
self.print_progress(i_episode)
print('\n')
self.results["average_rewards"] = np.array(self.results["average_rewards"])[1:]
return self.results
|
the-stack_106_15176
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.views.user.admin
@author Piotr Wójcik
@date 31.01.2014
"""
import re
from django.contrib.sites.models import RequestSite
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from wi import settings as wi_settings
from wi.forms.user import CMAuthenticationForm
from wi.utils import REDIRECT_FIELD_NAME
from wi.utils.auth import cm_login, cm_logout
from wi.utils.decorators import django_view, user_permission
from wi.utils.views import prep_data
@django_view
@user_permission
def cma_login(request, template_name='admin_cm/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=CMAuthenticationForm):
"""
CM panel login page handling.
"""
rest_data = prep_data({'cms': 'guest/cluster/list_names/'}, request.session)
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == 'POST':
form = authentication_form(request, data=request.POST, rest_data=rest_data)
if form.is_valid():
if not redirect_to or ' ' in redirect_to:
redirect_to = wi_settings.LOGIN_REDIRECT_URL
# Heavier security check -- redirects to http://example.com should
# not be allowed, but things like /view/?param=http://example.com
# should be allowed. This regex checks if there is a '//' *before*
# a question mark.
elif '//' in redirect_to and re.match(r'[^\?]*//', redirect_to):
redirect_to = wi_settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
cm_passwd = form.cleaned_data['password']
cm_id = form.cleaned_data['cm']
cm_login(request.session, cm_passwd, cm_id)
if redirect_to == '/':
redirect_to = '/admin_cm/'
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request, rest_data=rest_data)
request.session.set_test_cookie()
current_site = RequestSite(request)
return render_to_response(template_name,
{'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}, context_instance=RequestContext(request))
@django_view
@user_permission
def cma_logout(request, next_page=None,
template_name='admin_cm/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Logs out and redirects to the right next page (\c next_page).
"""
cm_logout(request.session)
if next_page is None:
redirect_to = request.REQUEST.get(redirect_field_name, '')
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
return render_to_response(template_name,
{'title': _('Logged out')},
context_instance=RequestContext(request))
else:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page or request.path)
|
the-stack_106_15178
|
"""
The main module for :mod:`once`.
This defines the basics of memoizing code and saving each other.
"""
__version__ = "0.1.0"
import contextlib
import functools
import pickle
import typing
def unique_name(obj: typing.Any) -> str:
"""Come up with a unique string name for any Python object."""
qualname = getattr(obj, "__qualname__", None) or ""
name = getattr(obj, "__name__", None) or ""
type_name = type(obj).__name__
actual_obj_name = qualname or name or type_name
module = getattr(obj, "__module__", None) or ""
if not module:
return actual_obj_name
return ".".join((module, actual_obj_name))
class FunctionCall(typing.NamedTuple):
"""
Immutable data structures that describes a function call.
This data structure does not store return values of functions.
It is approrpriate to be used as the key in a dictionary
that stores the result of function calls as the value.
"""
function_name: str
args: typing.Tuple
kwargs: typing.Tuple[typing.Tuple[str, typing.Any], ...]
@classmethod
def from_args(
cls,
function: typing.Callable,
args: typing.Iterable,
kwargs: typing.Dict[str, typing.Any],
):
"""
Construct a FunctionCall object from a function and its arguments.
This method handles the translation from a callable object to
a unique(?) string that represents its value.
"""
return cls(
function_name=unique_name(function),
args=tuple(args),
kwargs=tuple(kwargs.items()),
)
class FunctionReturn(typing.NamedTuple):
"""Contains the two outputs of a function--a return value or an exception."""
retval: typing.Any
exception: typing.Optional[BaseException]
_CACHE_TYPE = typing.Dict[FunctionCall, FunctionReturn]
_CENSOR_TYPE = typing.Callable[
[typing.Callable, typing.Iterable, typing.Dict], FunctionCall
]
def _default_censor(function: typing.Callable, *args, **kwargs,) -> FunctionCall:
"""
Does not censor anything. Returns a FunctionCall as given.
"""
return FunctionCall.from_args(function=function, args=args, kwargs=kwargs,)
class Memoize:
"""
Creates a common cache written to by any functions we wrap.
"""
def __init__(self, cache: _CACHE_TYPE = None):
"""Initialize the memoize object."""
self.cache: _CACHE_TYPE = cache or {}
@classmethod
def load_from_file(cls, filename: str, *, empty=False):
"""Unpickle a cache from a filename."""
try:
with open(filename) as handle:
return cls.load(handle)
except FileNotFoundError:
if empty:
return cls(cache=None)
@classmethod
def load(cls, handle):
"""Unpickle a cache object from a file handle. Returns a Memoize object."""
cache = pickle.load(handle)
return cls(cache=cache)
@classmethod
def loads(cls, serialized):
"""Unpickle a cache object from a string or a bytes-like object. Returns a Memoize object."""
cache = pickle.loads(serialized)
return cls(cache=cache)
def dumps(self):
"""Dump the pickled cache to a string."""
return pickle.dumps(self.cache)
def dump(self, handle):
"""Dump a pickled cache to a file handle."""
return pickle.dump(self.cache, handle)
def wrap( # pylint: disable=no-self-use
self, function: typing.Callable, censor: _CENSOR_TYPE = _default_censor,
):
"""Wraps a function to cache results for subsequent calls."""
class Wrapper:
"""
Class that wraps a function, preserving its `__repr__()` method.
This is similar to :func:`functools.wraps`, but
:func:`functools.wraps` does not preserve `__repr__()`
This is based on the following StackOverflow answer:
https://stackoverflow.com/a/10875517/
"""
def __init__(wrap_self): # pylint: disable=no-self-argument
functools.update_wrapper(wrap_self, function)
def __repr__(wrap_self): # pylint: disable=no-self-argument
"""Patch the wrapper function's repr() with the wrapped repr()."""
return repr(function)
def __call__(
wrap_self, *args, **kwargs
): # pylint: disable=no-self-argument
"""Call the wrapped function."""
call = censor(function, *args, **kwargs)
if call in self.cache:
if self.cache[call].exception:
raise self.cache[call].exception
return self.cache[call].retval
try:
retval = function(*args, **kwargs)
except BaseException as exc:
self.cache[call] = FunctionReturn(retval=None, exception=exc,)
raise
else:
self.cache[call] = FunctionReturn(retval=retval, exception=None,)
return retval
return Wrapper()
class MemoizeContext:
"""A context manager that manages the loading and saving of state to/from disk."""
def __init__(self, filename: str):
"""Initialize the context manager."""
self.filename = filename
self.memoizer = None
def __enter__(self):
"""Read the memoizer cache from a file, creating it if needed."""
try:
with open(self.filename, "rb") as handle:
self.memoizer = Memoize.load(handle)
except FileNotFoundError:
self.memoizer = Memoize()
return self.memoizer
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Save the memoizer cache back to the file we read the cache from."""
with open(self.filename, "wb") as handle:
self.memoizer.dump(handle)
class MemoizeClass:
"""Memoize a list of methods from a given class."""
def __new__(cls, input_cls, method_list, memoizer):
cls._memoizer = memoizer
for method_name in method_list:
setattr(
cls, method_name, cls._memoizer.wrap(getattr(input_cls, method_name)),
)
return super().__new__(cls)
|
the-stack_106_15181
|
import numpy as np
class Config(object):
input_dir = r'../data'
train_dir = r'../data/train'
test_dir = r'../data/test'
valid_dir = r'../data/valid'
filenames = 'filenames.txt'
model_type = 'segnetCustomized'
dim = 128
epochs = 50
specific_name = 'simple_binary'
batch_size = 20
input_channels = 1
num_classes = 1
final_activation = 'sigmoid'
crop_labels = False
normalized = True
normalization_type = 'local'
pretrained = False
pretrained_file = 'files/pth.h5'
evaluation_file = 'evaluation.csv'
confidence_threshold = 0.80
keep_percent = 0.1 * dim * dim
iterations = 50
output_shp_file = 'segnetCustomized'
input_dtm = r'../data/dtms/test_dtm.tif'
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.model_path = 'files/{}_{}_{}_{}.h5'.format(self.specific_name,self.model_type, self.dim, self.epochs)
self.hist_path = 'files/{}_{}_{}_{}.csv'.format(self.specific_name,self.model_type, self.dim, self.epochs)
def reset_attributes(self):
self.model_path = 'files/{}_{}_{}_{}.h5'.format(self.specific_name, self.model_type, self.dim, self.epochs)
self.hist_path = 'files/{}_{}_{}_{}.csv'.format(self.specific_name, self.model_type, self.dim, self.epochs)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
|
the-stack_106_15182
|
from setuptools import setup, find_packages
def get_version():
with open("arrlio/__init__.py", "r") as f:
for line in f.readlines():
if line.startswith("__version__ = "):
return line.split("=")[1].strip().strip('"')
raise Exception("Can't read version")
setup(
name="arrlio",
version=get_version(),
author="Roma Koshel",
author_email="[email protected]",
license="MIT",
py_modules=["arrlio"],
packages=find_packages(
exclude=(
"docs",
"examples",
"tests",
)
),
include_package_data=True,
install_requires=[
"aiormq>=5.2.0",
"cryptography",
"pydantic>=1.9.0",
"yarl",
"roview",
"siderpy[hiredis]",
],
dependency_links=[
"git+git://github.com/levsh/roview.git#egg=roview",
],
python_requires=">=3.8",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Topic :: System :: Distributed Computing",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
],
)
|
the-stack_106_15183
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imagenet datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import tarfile
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = '''\
ILSVRC 2012, aka ImageNet is an image dataset organized according to the
WordNet hierarchy. Each meaningful concept in WordNet, possibly described by
multiple words or word phrases, is called a "synonym set" or "synset". There are
more than 100,000 synsets in WordNet, majority of them are nouns (80,000+). In
ImageNet, we aim to provide on average 1000 images to illustrate each synset.
Images of each concept are quality-controlled and human-annotated. In its
completion, we hope ImageNet will offer tens of millions of cleanly sorted
images for most of the concepts in the WordNet hierarchy.
'''
# Web-site is asking to cite paper from 2015.
# http://www.image-net.org/challenges/LSVRC/2012/index#cite
_CITATION = '''\
@article{ILSVRC15,
Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
Title = {{ImageNet Large Scale Visual Recognition Challenge}},
Year = {2015},
journal = {International Journal of Computer Vision (IJCV)},
doi = {10.1007/s11263-015-0816-y},
volume={115},
number={3},
pages={211-252}
}
'''
_LABELS_FNAME = 'image/imagenet2012_labels.txt'
# This file contains the validation labels, in the alphabetic order of
# corresponding image names (and not in the order they have been added to the
# tar file).
_VALIDATION_LABELS_FNAME = 'image/imagenet2012_validation_labels.txt'
class Imagenet2012(tfds.core.GeneratorBasedBuilder):
"""Imagenet 2012, aka ILSVRC 2012."""
VERSION = tfds.core.Version('2.0.0')
# 1.0.0 to 2.0.0: fix validation labels.
def _info(self):
names_file = tfds.core.get_tfds_path(_LABELS_FNAME)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(names_file=names_file),
'file_name': tfds.features.Text(), # Eg: 'n15075141_54.JPEG'
}),
supervised_keys=('image', 'label'),
urls=['http://image-net.org/'],
citation=_CITATION,
)
@staticmethod
def _get_validation_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)
with tf.io.gfile.GFile(labels_path) as labels_f:
labels = labels_f.read().strip().split('\n')
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
def _split_generators(self, dl_manager):
train_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_train.tar')
val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')
if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):
msg = 'You must download the dataset files manually and place them in: '
msg += ', '.join([train_path, val_path])
raise AssertionError(msg)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1000,
gen_kwargs={
'archive': dl_manager.iter_archive(train_path),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=5,
gen_kwargs={
'archive': dl_manager.iter_archive(val_path),
'validation_labels': self._get_validation_labels(val_path),
},
),
]
def _generate_examples(self, archive, validation_labels=None):
if validation_labels: # Validation split
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
# Training split. Main archive contains archives names after a synset noun.
# Each sub-archive contains pictures associated to that synset.
for fname, fobj in archive:
label = fname[:-4] # fname is something like 'n01632458.tar'
# TODO(b/117643231): in py3, the following lines trigger tarfile module
# to call `fobj.seekable()`, which Gfile doesn't have. We should find an
# alternative, as this loads ~150MB in RAM.
fobj_mem = io.BytesIO(fobj.read())
for image_fname, image_fobj in tfds.download.iter_archive(
fobj_mem, tfds.download.ExtractMethod.TAR):
yield {
'file_name': image_fname,
'image': image_fobj,
'label': label,
}
def _generate_examples_validation(self, archive, labels):
for fname, fobj in archive:
yield {
'file_name': fname,
'image': fobj,
'label': labels[fname],
}
|
the-stack_106_15184
|
#!/usr/bin/env python3
import sys
import threading
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
import argparse
import json
import os
import time
import traceback
from functools import cmp_to_key
from itertools import cycle
from pathlib import Path
import platform
if platform.machine() == 'aarch64': # Jetson
os.environ['OPENBLAS_CORETYPE'] = "ARMV8"
from depthai_helpers.app_manager import App
if __name__ == "__main__":
if '--app' in sys.argv:
try:
app = App(appName=sys.argv[sys.argv.index('--app') + 1])
app.createVenv()
app.runApp()
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
try:
import cv2
import depthai as dai
import numpy as np
except Exception as ex:
print("Third party libraries failed to import: {}".format(ex))
print("Run \"python3 install_requirements.py\" to install dependencies or visit our installation page for more details - https://docs.luxonis.com/projects/api/en/latest/install/")
sys.exit(42)
from log_system_information import make_sys_report
from depthai_helpers.supervisor import Supervisor
from depthai_helpers.arg_manager import parseArgs
from depthai_helpers.config_manager import ConfigManager, DEPTHAI_ZOO, DEPTHAI_VIDEOS
from depthai_helpers.metrics import MetricManager
from depthai_helpers.version_check import checkRequirementsVersion
from depthai_sdk import FPSHandler, loadModule, getDeviceInfo, downloadYTVideo, Previews, createBlankFrame
from depthai_sdk.managers import NNetManager, PreviewManager, PipelineManager, EncodingManager, BlobManager
args = parseArgs()
if args.noSupervisor and args.guiType == "qt":
if "QT_QPA_PLATFORM_PLUGIN_PATH" in os.environ:
os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH")
if "QT_QPA_FONTDIR" in os.environ:
os.environ.pop("QT_QPA_FONTDIR")
if not args.noSupervisor:
print('Using depthai module from: ', dai.__file__)
print('Depthai version installed: ', dai.__version__)
if not args.skipVersionCheck and platform.machine() not in ['armv6l', 'aarch64']:
checkRequirementsVersion()
sentryEnabled = False
try:
import sentry_sdk
sentry_sdk.init(
"https://[email protected]/6114622",
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
with_locals=False,
)
sentry_sdk.set_context("syslog", make_sys_report(anonymous=True, skipUsb=True, skipPackages=True))
sentryEnabled = True
except Exception as ex:
print("Logging and crash reporting disabled! {}".format(ex))
class Trackbars:
instances = {}
@staticmethod
def createTrackbar(name, window, minVal, maxVal, defaultVal, callback):
def fn(value):
if Trackbars.instances[name][window] != value:
callback(value)
for otherWindow, previousValue in Trackbars.instances[name].items():
if otherWindow != window and previousValue != value:
Trackbars.instances[name][otherWindow] = value
cv2.setTrackbarPos(name, otherWindow, value)
cv2.createTrackbar(name, window, minVal, maxVal, fn)
Trackbars.instances[name] = {**Trackbars.instances.get(name, {}), window: defaultVal}
cv2.setTrackbarPos(name, window, defaultVal)
noop = lambda *a, **k: None
class Demo:
DISP_CONF_MIN = int(os.getenv("DISP_CONF_MIN", 0))
DISP_CONF_MAX = int(os.getenv("DISP_CONF_MAX", 255))
SIGMA_MIN = int(os.getenv("SIGMA_MIN", 0))
SIGMA_MAX = int(os.getenv("SIGMA_MAX", 250))
LRCT_MIN = int(os.getenv("LRCT_MIN", 0))
LRCT_MAX = int(os.getenv("LRCT_MAX", 10))
def run_all(self, conf):
if conf.args.app is not None:
app = App(appName=conf.args.app)
self.onAppSetup(app)
app.createVenv()
self.onAppStart(app)
app.runApp(shouldRun=self.shouldRun)
else:
self.setup(conf)
self.run()
def __init__(self, displayFrames=True, onNewFrame = noop, onShowFrame = noop, onNn = noop, onReport = noop, onSetup = noop, onTeardown = noop, onIter = noop, onAppSetup = noop, onAppStart = noop, shouldRun = lambda: True, showDownloadProgress=None, collectMetrics=False):
self._openvinoVersion = None
self._displayFrames = displayFrames
self.toggleMetrics(collectMetrics)
self.onNewFrame = onNewFrame
self.onShowFrame = onShowFrame
self.onNn = onNn
self.onReport = onReport
self.onSetup = onSetup
self.onTeardown = onTeardown
self.onIter = onIter
self.shouldRun = shouldRun
self.showDownloadProgress = showDownloadProgress
self.onAppSetup = onAppSetup
self.onAppStart = onAppStart
def setCallbacks(self, onNewFrame=None, onShowFrame=None, onNn=None, onReport=None, onSetup=None, onTeardown=None, onIter=None, onAppSetup=None, onAppStart=None, shouldRun=None, showDownloadProgress=None):
if onNewFrame is not None:
self.onNewFrame = onNewFrame
if onShowFrame is not None:
self.onShowFrame = onShowFrame
if onNn is not None:
self.onNn = onNn
if onReport is not None:
self.onReport = onReport
if onSetup is not None:
self.onSetup = onSetup
if onTeardown is not None:
self.onTeardown = onTeardown
if onIter is not None:
self.onIter = onIter
if shouldRun is not None:
self.shouldRun = shouldRun
if showDownloadProgress is not None:
self.showDownloadProgress = showDownloadProgress
if onAppSetup is not None:
self.onAppSetup = onAppSetup
if onAppStart is not None:
self.onAppStart = onAppStart
def toggleMetrics(self, enabled):
if enabled:
self.metrics = MetricManager()
else:
self.metrics = None
def setup(self, conf: ConfigManager):
print("Setting up demo...")
self._conf = conf
self._rgbRes = conf.getRgbResolution()
self._monoRes = conf.getMonoResolution()
if self._conf.args.openvinoVersion:
self._openvinoVersion = getattr(dai.OpenVINO.Version, 'VERSION_' + self._conf.args.openvinoVersion)
self._deviceInfo = getDeviceInfo(self._conf.args.deviceId)
if self._conf.args.reportFile:
reportFileP = Path(self._conf.args.reportFile).with_suffix('.csv')
reportFileP.parent.mkdir(parents=True, exist_ok=True)
self._reportFile = reportFileP.open('a')
self._pm = PipelineManager(openvinoVersion=self._openvinoVersion)
if self._conf.args.xlinkChunkSize is not None:
self._pm.setXlinkChunkSize(self._conf.args.xlinkChunkSize)
self._nnManager = None
if self._conf.useNN:
self._blobManager = BlobManager(
zooDir=DEPTHAI_ZOO,
zooName=self._conf.getModelName(),
progressFunc=self.showDownloadProgress
)
self._nnManager = NNetManager(inputSize=self._conf.inputSize)
if self._conf.getModelDir() is not None:
configPath = self._conf.getModelDir() / Path(self._conf.getModelName()).with_suffix(f".json")
self._nnManager.readConfig(configPath)
self._nnManager.countLabel(self._conf.getCountLabel(self._nnManager))
self._pm.setNnManager(self._nnManager)
self._device = dai.Device(self._pm.pipeline.getOpenVINOVersion(), self._deviceInfo, usb2Mode=self._conf.args.usbSpeed == "usb2")
if sentryEnabled:
try:
from sentry_sdk import set_user
set_user({"mxid": self._device.getMxId()})
except:
pass
if self.metrics is not None:
self.metrics.reportDevice(self._device)
if self._deviceInfo.desc.protocol == dai.XLinkProtocol.X_LINK_USB_VSC:
print("USB Connection speed: {}".format(self._device.getUsbSpeed()))
self._conf.adjustParamsToDevice(self._device)
self._conf.adjustPreviewToOptions()
if self._conf.lowBandwidth:
self._pm.enableLowBandwidth(poeQuality=self._conf.args.poeQuality)
self._cap = cv2.VideoCapture(self._conf.args.video) if not self._conf.useCamera else None
self._fps = FPSHandler() if self._conf.useCamera else FPSHandler(self._cap)
if self._conf.useCamera or self._conf.args.sync:
self._pv = PreviewManager(display=self._conf.args.show, nnSource=self._conf.getModelSource(), colorMap=self._conf.getColorMap(),
dispMultiplier=self._conf.dispMultiplier, mouseTracker=True, lowBandwidth=self._conf.lowBandwidth,
scale=self._conf.args.scale, sync=self._conf.args.sync, fpsHandler=self._fps, createWindows=self._displayFrames,
depthConfig=self._pm._depthConfig)
if self._conf.leftCameraEnabled:
self._pm.createLeftCam(self._monoRes, self._conf.args.monoFps,
orientation=self._conf.args.cameraOrientation.get(Previews.left.name),
xout=Previews.left.name in self._conf.args.show and (self._conf.getModelSource() != "left" or not self._conf.args.sync))
if self._conf.rightCameraEnabled:
self._pm.createRightCam(self._monoRes, self._conf.args.monoFps,
orientation=self._conf.args.cameraOrientation.get(Previews.right.name),
xout=Previews.right.name in self._conf.args.show and (self._conf.getModelSource() != "right" or not self._conf.args.sync))
if self._conf.rgbCameraEnabled:
self._pm.createColorCam(self._nnManager.inputSize if self._conf.useNN else self._conf.previewSize, self._rgbRes, self._conf.args.rgbFps,
orientation=self._conf.args.cameraOrientation.get(Previews.color.name),
fullFov=not self._conf.args.disableFullFovNn,
xout=Previews.color.name in self._conf.args.show and (self._conf.getModelSource() != "color" or not self._conf.args.sync))
if self._conf.useDepth:
self._pm.createDepth(
self._conf.args.disparityConfidenceThreshold,
self._conf.getMedianFilter(),
self._conf.args.sigma,
self._conf.args.stereoLrCheck,
self._conf.args.lrcThreshold,
self._conf.args.extendedDisparity,
self._conf.args.subpixel,
useDepth=Previews.depth.name in self._conf.args.show or Previews.depthRaw.name in self._conf.args.show,
useDisparity=Previews.disparity.name in self._conf.args.show or Previews.disparityColor.name in self._conf.args.show,
useRectifiedLeft=Previews.rectifiedLeft.name in self._conf.args.show and (
self._conf.getModelSource() != "rectifiedLeft" or not self._conf.args.sync),
useRectifiedRight=Previews.rectifiedRight.name in self._conf.args.show and (
self._conf.getModelSource() != "rectifiedRight" or not self._conf.args.sync),
)
self._encManager = None
if len(self._conf.args.encode) > 0:
self._encManager = EncodingManager(self._conf.args.encode, self._conf.args.encodeOutput)
self._encManager.createEncoders(self._pm)
if len(self._conf.args.report) > 0:
self._pm.createSystemLogger()
if self._conf.useNN:
self._nn = self._nnManager.createNN(
pipeline=self._pm.pipeline, nodes=self._pm.nodes, source=self._conf.getModelSource(),
blobPath=self._blobManager.getBlob(shaves=self._conf.shaves, openvinoVersion=self._nnManager.openvinoVersion),
useDepth=self._conf.useDepth, minDepth=self._conf.args.minDepth, maxDepth=self._conf.args.maxDepth,
sbbScaleFactor=self._conf.args.sbbScaleFactor, fullFov=not self._conf.args.disableFullFovNn,
)
self._pm.addNn(
nn=self._nn, sync=self._conf.args.sync, xoutNnInput=Previews.nnInput.name in self._conf.args.show,
useDepth=self._conf.useDepth, xoutSbb=self._conf.args.spatialBoundingBox and self._conf.useDepth
)
def run(self):
self._device.startPipeline(self._pm.pipeline)
self._pm.createDefaultQueues(self._device)
if self._conf.useNN:
self._nnManager.createQueues(self._device)
self._sbbOut = self._device.getOutputQueue("sbb", maxSize=1, blocking=False) if self._conf.useNN and self._conf.args.spatialBoundingBox else None
self._logOut = self._device.getOutputQueue("systemLogger", maxSize=30, blocking=False) if len(self._conf.args.report) > 0 else None
if self._conf.useDepth:
self._medianFilters = cycle([item for name, item in vars(dai.MedianFilter).items() if name.startswith('KERNEL_') or name.startswith('MEDIAN_')])
for medFilter in self._medianFilters:
# move the cycle to the current median filter
if medFilter == self._pm._depthConfig.postProcessing.median:
break
else:
self._medianFilters = []
if self._conf.useCamera:
cameras = self._device.getConnectedCameras()
if dai.CameraBoardSocket.LEFT in cameras and dai.CameraBoardSocket.RIGHT in cameras:
self._pv.collectCalibData(self._device)
self._cameraConfig = {
"exposure": self._conf.args.cameraExposure,
"sensitivity": self._conf.args.cameraSensitivity,
"saturation": self._conf.args.cameraSaturation,
"contrast": self._conf.args.cameraContrast,
"brightness": self._conf.args.cameraBrightness,
"sharpness": self._conf.args.cameraSharpness
}
if any(self._cameraConfig.values()):
self._updateCameraConfigs()
self._pv.createQueues(self._device, self._createQueueCallback)
if self._encManager is not None:
self._encManager.createDefaultQueues(self._device)
elif self._conf.args.sync:
self._hostOut = self._device.getOutputQueue(Previews.nnInput.name, maxSize=1, blocking=False)
self._seqNum = 0
self._hostFrame = None
self._nnData = []
self._sbbRois = []
self.onSetup(self)
try:
while self.shouldRun():
self._fps.nextIter()
self.onIter(self)
self.loop()
except StopIteration:
pass
except Exception as ex:
if sentryEnabled:
from sentry_sdk import capture_exception
capture_exception(ex)
raise
finally:
self.stop()
def stop(self):
print("Stopping demo...")
self._device.close()
del self._device
self._pm.closeDefaultQueues()
if self._conf.useCamera:
self._pv.closeQueues()
if self._encManager is not None:
self._encManager.close()
if self._nnManager is not None:
self._nnManager.closeQueues()
if self._sbbOut is not None:
self._sbbOut.close()
if self._logOut is not None:
self._logOut.close()
self._fps.printStatus()
self.onTeardown(self)
def loop(self):
if self._conf.useCamera:
self._pv.prepareFrames(callback=self.onNewFrame)
if self._encManager is not None:
self._encManager.parseQueues()
if self._sbbOut is not None:
sbb = self._sbbOut.tryGet()
if sbb is not None:
self._sbbRois = sbb.getConfigData()
depthFrames = [self._pv.get(Previews.depthRaw.name), self._pv.get(Previews.depth.name)]
for depthFrame in depthFrames:
if depthFrame is None:
continue
for roiData in self._sbbRois:
roi = roiData.roi.denormalize(depthFrame.shape[1], depthFrame.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
# Display SBB on the disparity map
cv2.rectangle(depthFrame, (int(topLeft.x), int(topLeft.y)), (int(bottomRight.x), int(bottomRight.y)), self._nnManager._bboxColors[0], 2)
else:
readCorrectly, rawHostFrame = self._cap.read()
if not readCorrectly:
raise StopIteration()
self._nnManager.sendInputFrame(rawHostFrame, self._seqNum)
self._seqNum += 1
if not self._conf.args.sync:
self._hostFrame = rawHostFrame
self._fps.tick('host')
if self._nnManager is not None:
inNn = self._nnManager.outputQueue.tryGet()
if inNn is not None:
self.onNn(inNn)
if not self._conf.useCamera and self._conf.args.sync:
self._hostFrame = Previews.nnInput.value(self._hostOut.get())
self._nnData = self._nnManager.decode(inNn)
self._fps.tick('nn')
if self._conf.useCamera:
if self._nnManager is not None:
self._nnManager.draw(self._pv, self._nnData)
self._pv.showFrames(callback=self._showFramesCallback)
elif self._hostFrame is not None:
debugHostFrame = self._hostFrame.copy()
if self._nnManager is not None:
self._nnManager.draw(debugHostFrame, self._nnData)
self._fps.drawFps(debugHostFrame, "host")
if self._displayFrames:
cv2.imshow("host", debugHostFrame)
if self._logOut:
logs = self._logOut.tryGetAll()
for log in logs:
self._printSysInfo(log)
if self._displayFrames:
key = cv2.waitKey(1)
if key == ord('q'):
raise StopIteration()
elif key == ord('m'):
nextFilter = next(self._medianFilters)
self._pm.updateDepthConfig(self._device, median=nextFilter)
if self._conf.args.cameraControlls:
update = True
if key == ord('t'):
self._cameraConfig["exposure"] = 10000 if self._cameraConfig["exposure"] is None else 500 if self._cameraConfig["exposure"] == 1 else min(self._cameraConfig["exposure"] + 500, 33000)
if self._cameraConfig["sensitivity"] is None:
self._cameraConfig["sensitivity"] = 800
elif key == ord('g'):
self._cameraConfig["exposure"] = 10000 if self._cameraConfig["exposure"] is None else max(self._cameraConfig["exposure"] - 500, 1)
if self._cameraConfig["sensitivity"] is None:
self._cameraConfig["sensitivity"] = 800
elif key == ord('y'):
self._cameraConfig["sensitivity"] = 800 if self._cameraConfig["sensitivity"] is None else min(self._cameraConfig["sensitivity"] + 50, 1600)
if self._cameraConfig["exposure"] is None:
self._cameraConfig["exposure"] = 10000
elif key == ord('h'):
self._cameraConfig["sensitivity"] = 800 if self._cameraConfig["sensitivity"] is None else max(self._cameraConfig["sensitivity"] - 50, 100)
if self._cameraConfig["exposure"] is None:
self._cameraConfig["exposure"] = 10000
elif key == ord('u'):
self._cameraConfig["saturation"] = 0 if self._cameraConfig["saturation"] is None else min(self._cameraConfig["saturation"] + 1, 10)
elif key == ord('j'):
self._cameraConfig["saturation"] = 0 if self._cameraConfig["saturation"] is None else max(self._cameraConfig["saturation"] - 1, -10)
elif key == ord('i'):
self._cameraConfig["contrast"] = 0 if self._cameraConfig["contrast"] is None else min(self._cameraConfig["contrast"] + 1, 10)
elif key == ord('k'):
self._cameraConfig["contrast"] = 0 if self._cameraConfig["contrast"] is None else max(self._cameraConfig["contrast"] - 1, -10)
elif key == ord('o'):
self._cameraConfig["brightness"] = 0 if self._cameraConfig["brightness"] is None else min(self._cameraConfig["brightness"] + 1, 10)
elif key == ord('l'):
self._cameraConfig["brightness"] = 0 if self._cameraConfig["brightness"] is None else max(self._cameraConfig["brightness"] - 1, -10)
elif key == ord('p'):
self._cameraConfig["sharpness"] = 0 if self._cameraConfig["sharpness"] is None else min(self._cameraConfig["sharpness"] + 1, 4)
elif key == ord(';'):
self._cameraConfig["sharpness"] = 0 if self._cameraConfig["sharpness"] is None else max(self._cameraConfig["sharpness"] - 1, 0)
else:
update = False
if update:
self._updateCameraConfigs()
def _createQueueCallback(self, queueName):
if self._displayFrames and queueName in [Previews.disparityColor.name, Previews.disparity.name, Previews.depth.name, Previews.depthRaw.name]:
Trackbars.createTrackbar('Disparity confidence', queueName, self.DISP_CONF_MIN, self.DISP_CONF_MAX, self._conf.args.disparityConfidenceThreshold,
lambda value: self._pm.updateDepthConfig(self._device, dct=value))
if queueName in [Previews.depthRaw.name, Previews.depth.name]:
Trackbars.createTrackbar('Bilateral sigma', queueName, self.SIGMA_MIN, self.SIGMA_MAX, self._conf.args.sigma,
lambda value: self._pm.updateDepthConfig(self._device, sigma=value))
if self._conf.args.stereoLrCheck:
Trackbars.createTrackbar('LR-check threshold', queueName, self.LRCT_MIN, self.LRCT_MAX, self._conf.args.lrcThreshold,
lambda value: self._pm.updateDepthConfig(self._device, lrcThreshold=value))
def _updateCameraConfigs(self):
parsedConfig = {}
for configOption, values in self._cameraConfig.items():
if values is not None:
for cameraName, value in values:
newConfig = {
**parsedConfig.get(cameraName, {}),
configOption: value
}
if cameraName == "all":
parsedConfig[Previews.left.name] = newConfig
parsedConfig[Previews.right.name] = newConfig
parsedConfig[Previews.color.name] = newConfig
else:
parsedConfig[cameraName] = newConfig
if hasattr(self, "_device"):
if self._conf.leftCameraEnabled and Previews.left.name in parsedConfig:
self._pm.updateLeftCamConfig(self._device, **parsedConfig[Previews.left.name])
if self._conf.rightCameraEnabled and Previews.right.name in parsedConfig:
self._pm.updateRightCamConfig(self._device, **parsedConfig[Previews.right.name])
if self._conf.rgbCameraEnabled and Previews.color.name in parsedConfig:
self._pm.updateColorCamConfig(self._device, **parsedConfig[Previews.color.name])
def _showFramesCallback(self, frame, name):
returnFrame = self.onShowFrame(frame, name)
return returnFrame if returnFrame is not None else frame
def _printSysInfo(self, info):
m = 1024 * 1024 # MiB
if not hasattr(self, "_reportFile"):
if "memory" in self._conf.args.report:
print(f"Drr used / total - {info.ddrMemoryUsage.used / m:.2f} / {info.ddrMemoryUsage.total / m:.2f} MiB")
print(f"Cmx used / total - {info.cmxMemoryUsage.used / m:.2f} / {info.cmxMemoryUsage.total / m:.2f} MiB")
print(f"LeonCss heap used / total - {info.leonCssMemoryUsage.used / m:.2f} / {info.leonCssMemoryUsage.total / m:.2f} MiB")
print(f"LeonMss heap used / total - {info.leonMssMemoryUsage.used / m:.2f} / {info.leonMssMemoryUsage.total / m:.2f} MiB")
if "temp" in self._conf.args.report:
t = info.chipTemperature
print(f"Chip temperature - average: {t.average:.2f}, css: {t.css:.2f}, mss: {t.mss:.2f}, upa0: {t.upa:.2f}, upa1: {t.dss:.2f}")
if "cpu" in self._conf.args.report:
print(f"Cpu usage - Leon OS: {info.leonCssCpuUsage.average * 100:.2f}%, Leon RT: {info.leonMssCpuUsage.average * 100:.2f} %")
print("----------------------------------------")
else:
data = {}
if "memory" in self._conf.args.report:
data = {
**data,
"ddrUsed": info.ddrMemoryUsage.used,
"ddrTotal": info.ddrMemoryUsage.total,
"cmxUsed": info.cmxMemoryUsage.used,
"cmxTotal": info.cmxMemoryUsage.total,
"leonCssUsed": info.leonCssMemoryUsage.used,
"leonCssTotal": info.leonCssMemoryUsage.total,
"leonMssUsed": info.leonMssMemoryUsage.used,
"leonMssTotal": info.leonMssMemoryUsage.total,
}
if "temp" in self._conf.args.report:
data = {
**data,
"tempAvg": info.chipTemperature.average,
"tempCss": info.chipTemperature.css,
"tempMss": info.chipTemperature.mss,
"tempUpa0": info.chipTemperature.upa,
"tempUpa1": info.chipTemperature.dss,
}
if "cpu" in self._conf.args.report:
data = {
**data,
"cpuCssAvg": info.leonCssCpuUsage.average,
"cpuMssAvg": info.leonMssCpuUsage.average,
}
if self._reportFile.tell() == 0:
print(','.join(data.keys()), file=self._reportFile)
self.onReport(data)
print(','.join(map(str, data.values())), file=self._reportFile)
def prepareConfManager(in_args):
confManager = ConfigManager(in_args)
confManager.linuxCheckApplyUsbRules()
if not confManager.useCamera:
if str(confManager.args.video).startswith('https'):
confManager.args.video = downloadYTVideo(confManager.args.video, DEPTHAI_VIDEOS)
print("Youtube video downloaded.")
if not Path(confManager.args.video).exists():
raise ValueError("Path {} does not exists!".format(confManager.args.video))
return confManager
def runQt():
from gui.main import DemoQtGui
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import QObject, pyqtSignal, QRunnable, QThreadPool
class WorkerSignals(QObject):
updateConfSignal = pyqtSignal(list)
updateDownloadProgressSignal = pyqtSignal(int, int)
updatePreviewSignal = pyqtSignal(np.ndarray)
setDataSignal = pyqtSignal(list)
exitSignal = pyqtSignal()
errorSignal = pyqtSignal(str)
class Worker(QRunnable):
def __init__(self, instance, parent, conf, selectedPreview=None):
super(Worker, self).__init__()
self.running = False
self.selectedPreview = selectedPreview
self.instance = instance
self.parent = parent
self.conf = conf
self.callback_module = loadModule(conf.args.callback)
self.file_callbacks = {
callbackName: getattr(self.callback_module, callbackName)
for callbackName in ["shouldRun", "onNewFrame", "onShowFrame", "onNn", "onReport", "onSetup", "onTeardown", "onIter"]
if callable(getattr(self.callback_module, callbackName, None))
}
self.instance.setCallbacks(**self.file_callbacks)
self.signals = WorkerSignals()
self.signals.exitSignal.connect(self.terminate)
self.signals.updateConfSignal.connect(self.updateConf)
def run(self):
self.running = True
self.signals.setDataSignal.emit(["restartRequired", False])
self.instance.setCallbacks(shouldRun=self.shouldRun, onShowFrame=self.onShowFrame, onSetup=self.onSetup, onAppSetup=self.onAppSetup, onAppStart=self.onAppStart, showDownloadProgress=self.showDownloadProgress)
self.conf.args.bandwidth = "auto"
if self.conf.args.deviceId is None:
devices = dai.Device.getAllAvailableDevices()
if len(devices) > 0:
defaultDevice = next(map(
lambda info: info.getMxId(),
filter(lambda info: info.desc.protocol == dai.XLinkProtocol.X_LINK_USB_VSC, devices)
), None)
if defaultDevice is None:
defaultDevice = devices[0].getMxId()
self.conf.args.deviceId = defaultDevice
if Previews.color.name not in self.conf.args.show:
self.conf.args.show.append(Previews.color.name)
if Previews.nnInput.name not in self.conf.args.show:
self.conf.args.show.append(Previews.nnInput.name)
if Previews.depth.name not in self.conf.args.show and Previews.disparityColor.name not in self.conf.args.show:
self.conf.args.show.append(Previews.depth.name)
if Previews.depthRaw.name not in self.conf.args.show and Previews.disparity.name not in self.conf.args.show:
self.conf.args.show.append(Previews.depthRaw.name)
if Previews.left.name not in self.conf.args.show:
self.conf.args.show.append(Previews.left.name)
if Previews.rectifiedLeft.name not in self.conf.args.show:
self.conf.args.show.append(Previews.rectifiedLeft.name)
if Previews.right.name not in self.conf.args.show:
self.conf.args.show.append(Previews.right.name)
if Previews.rectifiedRight.name not in self.conf.args.show:
self.conf.args.show.append(Previews.rectifiedRight.name)
try:
self.instance.run_all(self.conf)
except KeyboardInterrupt:
sys.exit(0)
except Exception as ex:
self.onError(ex)
def terminate(self):
self.running = False
self.signals.setDataSignal.emit(["restartRequired", False])
def updateConf(self, argsList):
self.conf.args = argparse.Namespace(**dict(argsList))
def onError(self, ex: Exception):
self.signals.errorSignal.emit(''.join(traceback.format_tb(ex.__traceback__) + [str(ex)]))
self.signals.setDataSignal.emit(["restartRequired", True])
def shouldRun(self):
if "shouldRun" in self.file_callbacks:
return self.running and self.file_callbacks["shouldRun"]()
return self.running
def onShowFrame(self, frame, source):
if "onShowFrame" in self.file_callbacks:
self.file_callbacks["onShowFrame"](frame, source)
if source == self.selectedPreview:
self.signals.updatePreviewSignal.emit(frame)
def onAppSetup(self, app):
setupFrame = createBlankFrame(500, 500)
cv2.putText(setupFrame, "Preparing {} app...".format(app.appName), (150, 250), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 4, cv2.LINE_AA)
cv2.putText(setupFrame, "Preparing {} app...".format(app.appName), (150, 250), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
self.signals.updatePreviewSignal.emit(setupFrame)
def onAppStart(self, app):
setupFrame = createBlankFrame(500, 500)
cv2.putText(setupFrame, "Running {} app... (check console)".format(app.appName), (100, 250), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 4, cv2.LINE_AA)
cv2.putText(setupFrame, "Running {} app... (check console)".format(app.appName), (100, 250), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
self.signals.updatePreviewSignal.emit(setupFrame)
def showDownloadProgress(self, curr, total):
self.signals.updateDownloadProgressSignal.emit(curr, total)
def onSetup(self, instance):
if "onSetup" in self.file_callbacks:
self.file_callbacks["onSetup"](instance)
self.signals.updateConfSignal.emit(list(vars(self.conf.args).items()))
self.signals.setDataSignal.emit(["previewChoices", self.conf.args.show])
devices = [self.instance._deviceInfo.getMxId()] + list(map(lambda info: info.getMxId(), dai.Device.getAllAvailableDevices()))
self.signals.setDataSignal.emit(["deviceChoices", devices])
if instance._nnManager is not None:
self.signals.setDataSignal.emit(["countLabels", instance._nnManager._labels])
else:
self.signals.setDataSignal.emit(["countLabels", []])
self.signals.setDataSignal.emit(["depthEnabled", self.conf.useDepth])
self.signals.setDataSignal.emit(["statisticsAccepted", self.instance.metrics is not None])
self.signals.setDataSignal.emit(["modelChoices", sorted(self.conf.getAvailableZooModels(), key=cmp_to_key(lambda a, b: -1 if a == "mobilenet-ssd" else 1 if b == "mobilenet-ssd" else -1 if a < b else 1))])
class GuiApp(DemoQtGui):
def __init__(self):
super().__init__()
self.confManager = prepareConfManager(args)
self.running = False
self.selectedPreview = self.confManager.args.show[0] if len(self.confManager.args.show) > 0 else "color"
self.useDisparity = False
self.dataInitialized = False
self.appInitialized = False
self.threadpool = QThreadPool()
self._demoInstance = Demo(displayFrames=False)
def updateArg(self, arg_name, arg_value, shouldUpdate=True):
setattr(self.confManager.args, arg_name, arg_value)
if shouldUpdate:
self.worker.signals.setDataSignal.emit(["restartRequired", True])
def showError(self, error):
print(error, file=sys.stderr)
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setText(error)
msgBox.setWindowTitle("An error occured")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec()
def setupDataCollection(self):
try:
with Path(".consent").open() as f:
accepted = json.load(f)["statistics"]
except:
accepted = True
self._demoInstance.toggleMetrics(accepted)
def start(self):
self.setupDataCollection()
self.running = True
self.worker = Worker(self._demoInstance, parent=self, conf=self.confManager, selectedPreview=self.selectedPreview)
self.worker.signals.updatePreviewSignal.connect(self.updatePreview)
self.worker.signals.updateDownloadProgressSignal.connect(self.updateDownloadProgress)
self.worker.signals.setDataSignal.connect(self.setData)
self.worker.signals.errorSignal.connect(self.showError)
self.threadpool.start(self.worker)
if not self.appInitialized:
self.appInitialized = True
exit_code = self.startGui()
self.stop(wait=False)
sys.exit(exit_code)
def stop(self, wait=True):
if hasattr(self._demoInstance, "_device"):
current_mxid = self._demoInstance._device.getMxId()
else:
current_mxid = self.confManager.args.deviceId
self.worker.signals.exitSignal.emit()
self.threadpool.waitForDone(10000)
if wait and current_mxid is not None:
start = time.time()
while time.time() - start < 30:
if current_mxid in list(map(lambda info: info.getMxId(), dai.Device.getAllAvailableDevices())):
break
else:
time.sleep(0.1)
else:
print(f"[Warning] Device not available again after 30 seconds! MXID: {current_mxid}")
def restartDemo(self):
self.stop()
self.start()
def guiOnDepthConfigUpdate(self, median=None, dct=None, sigma=None, lrc=None, lrcThreshold=None):
self._demoInstance._pm.updateDepthConfig(self._demoInstance._device, median=median, dct=dct, sigma=sigma, lrc=lrc, lrcThreshold=lrcThreshold)
if median is not None:
if median == dai.MedianFilter.MEDIAN_OFF:
self.updateArg("stereoMedianSize", 0, False)
elif median == dai.MedianFilter.KERNEL_3x3:
self.updateArg("stereoMedianSize", 3, False)
elif median == dai.MedianFilter.KERNEL_5x5:
self.updateArg("stereoMedianSize", 5, False)
elif median == dai.MedianFilter.KERNEL_7x7:
self.updateArg("stereoMedianSize", 7, False)
if dct is not None:
self.updateArg("disparityConfidenceThreshold", dct, False)
if sigma is not None:
self.updateArg("sigma", sigma, False)
if lrc is not None:
self.updateArg("stereoLrCheck", lrc, False)
if lrcThreshold is not None:
self.updateArg("lrcThreshold", lrcThreshold, False)
def guiOnCameraConfigUpdate(self, name, exposure=None, sensitivity=None, saturation=None, contrast=None, brightness=None, sharpness=None):
if exposure is not None:
newValue = list(filter(lambda item: item[0] == name, (self.confManager.args.cameraExposure or []))) + [(name, exposure)]
self._demoInstance._cameraConfig["exposure"] = newValue
self.updateArg("cameraExposure", newValue, False)
if sensitivity is not None:
newValue = list(filter(lambda item: item[0] == name, (self.confManager.args.cameraSensitivity or []))) + [(name, sensitivity)]
self._demoInstance._cameraConfig["sensitivity"] = newValue
self.updateArg("cameraSensitivity", newValue, False)
if saturation is not None:
newValue = list(filter(lambda item: item[0] == name, (self.confManager.args.cameraSaturation or []))) + [(name, saturation)]
self._demoInstance._cameraConfig["saturation"] = newValue
self.updateArg("cameraSaturation", newValue, False)
if contrast is not None:
newValue = list(filter(lambda item: item[0] == name, (self.confManager.args.cameraContrast or []))) + [(name, contrast)]
self._demoInstance._cameraConfig["contrast"] = newValue
self.updateArg("cameraContrast", newValue, False)
if brightness is not None:
newValue = list(filter(lambda item: item[0] == name, (self.confManager.args.cameraBrightness or []))) + [(name, brightness)]
self._demoInstance._cameraConfig["brightness"] = newValue
self.updateArg("cameraBrightness", newValue, False)
if sharpness is not None:
newValue = list(filter(lambda item: item[0] == name, (self.confManager.args.cameraSharpness or []))) + [(name, sharpness)]
self._demoInstance._cameraConfig["sharpness"] = newValue
self.updateArg("cameraSharpness", newValue, False)
self._demoInstance._updateCameraConfigs()
def guiOnDepthSetupUpdate(self, depthFrom=None, depthTo=None, subpixel=None, extended=None):
if depthFrom is not None:
self.updateArg("minDepth", depthFrom)
if depthTo is not None:
self.updateArg("maxDepth", depthTo)
if subpixel is not None:
self.updateArg("subpixel", subpixel)
if extended is not None:
self.updateArg("extendedDisparity", extended)
def guiOnCameraSetupUpdate(self, name, fps=None, resolution=None):
if fps is not None:
if name == "color":
self.updateArg("rgbFps", fps)
else:
self.updateArg("monoFps", fps)
if resolution is not None:
if name == "color":
self.updateArg("rgbResolution", resolution)
else:
self.updateArg("monoResolution", resolution)
def guiOnAiSetupUpdate(self, cnn=None, shave=None, source=None, fullFov=None, sbb=None, sbbFactor=None, ov=None, countLabel=None):
if cnn is not None:
self.updateArg("cnnModel", cnn)
if shave is not None:
self.updateArg("shaves", shave)
if source is not None:
self.updateArg("camera", source)
if fullFov is not None:
self.updateArg("disableFullFovNn", not fullFov)
if sbb is not None:
self.updateArg("spatialBoundingBox", sbb)
if sbbFactor is not None:
self.updateArg("sbbScaleFactor", sbbFactor)
if ov is not None:
self.updateArg("openvinoVersion", ov)
if countLabel is not None or cnn is not None:
self.updateArg("countLabel", countLabel)
def guiOnPreviewChangeSelected(self, selected):
self.worker.selectedPreview = selected
self.selectedPreview = selected
def guiOnSelectDevice(self, selected):
self.updateArg("deviceId", selected)
def guiOnReloadDevices(self):
devices = list(map(lambda info: info.getMxId(), dai.Device.getAllAvailableDevices()))
if hasattr(self._demoInstance, "_deviceInfo"):
devices.insert(0, self._demoInstance._deviceInfo.getMxId())
self.worker.signals.setDataSignal.emit(["deviceChoices", devices])
if len(devices) > 0:
self.worker.signals.setDataSignal.emit(["restartRequired", True])
def guiOnStaticticsConsent(self, value):
try:
with Path('.consent').open('w') as f:
json.dump({"statistics": value}, f)
except:
pass
self.worker.signals.setDataSignal.emit(["restartRequired", True])
def guiOnToggleColorEncoding(self, enabled, fps):
oldConfig = self.confManager.args.encode or {}
if enabled:
oldConfig["color"] = fps
elif "color" in self.confManager.args.encode:
del oldConfig["color"]
self.updateArg("encode", oldConfig)
def guiOnToggleLeftEncoding(self, enabled, fps):
oldConfig = self.confManager.args.encode or {}
if enabled:
oldConfig["left"] = fps
elif "color" in self.confManager.args.encode:
del oldConfig["left"]
self.updateArg("encode", oldConfig)
def guiOnToggleRightEncoding(self, enabled, fps):
oldConfig = self.confManager.args.encode or {}
if enabled:
oldConfig["right"] = fps
elif "color" in self.confManager.args.encode:
del oldConfig["right"]
self.updateArg("encode", oldConfig)
def guiOnSelectReportingOptions(self, temp, cpu, memory):
options = []
if temp:
options.append("temp")
if cpu:
options.append("cpu")
if memory:
options.append("memory")
self.updateArg("report", options)
def guiOnSelectReportingPath(self, value):
self.updateArg("reportFile", value)
def guiOnSelectEncodingPath(self, value):
self.updateArg("encodeOutput", value)
def guiOnToggleDepth(self, value):
self.updateArg("disableDepth", not value)
selectedPreviews = [Previews.rectifiedRight.name, Previews.rectifiedLeft.name] + ([Previews.disparity.name, Previews.disparityColor.name] if self.useDisparity else [Previews.depth.name, Previews.depthRaw.name])
depthPreviews = [Previews.rectifiedRight.name, Previews.rectifiedLeft.name, Previews.depth.name, Previews.depthRaw.name, Previews.disparity.name, Previews.disparityColor.name]
filtered = list(filter(lambda name: name not in depthPreviews, self.confManager.args.show))
if value:
updated = filtered + selectedPreviews
if self.selectedPreview not in updated:
self.selectedPreview = updated[0]
self.updateArg("show", updated)
else:
updated = filtered + [Previews.left.name, Previews.right.name]
if self.selectedPreview not in updated:
self.selectedPreview = updated[0]
self.updateArg("show", updated)
def guiOnToggleNN(self, value):
self.updateArg("disableNeuralNetwork", not value)
filtered = list(filter(lambda name: name != Previews.nnInput.name, self.confManager.args.show))
if value:
updated = filtered + [Previews.nnInput.name]
if self.selectedPreview not in updated:
self.selectedPreview = updated[0]
self.updateArg("show", filtered + [Previews.nnInput.name])
else:
if self.selectedPreview not in filtered:
self.selectedPreview = filtered[0]
self.updateArg("show", filtered)
def guiOnRunApp(self, appName):
self.stop()
self.updateArg("app", appName, shouldUpdate=False)
self.setData(["runningApp", appName])
self.start()
def guiOnTerminateApp(self, appName):
self.stop()
self.updateArg("app", None, shouldUpdate=False)
self.setData(["runningApp", ""])
self.start()
def guiOnToggleDisparity(self, value):
self.useDisparity = value
depthPreviews = [Previews.depth.name, Previews.depthRaw.name]
disparityPreviews = [Previews.disparity.name, Previews.disparityColor.name]
if value:
filtered = list(filter(lambda name: name not in depthPreviews, self.confManager.args.show))
updated = filtered + disparityPreviews
if self.selectedPreview not in updated:
self.selectedPreview = updated[0]
self.updateArg("show", updated)
else:
filtered = list(filter(lambda name: name not in disparityPreviews, self.confManager.args.show))
updated = filtered + depthPreviews
if self.selectedPreview not in updated:
self.selectedPreview = updated[0]
self.updateArg("show", updated)
GuiApp().start()
def runOpenCv():
confManager = prepareConfManager(args)
demo = Demo()
demo.run_all(confManager)
if __name__ == "__main__":
try:
if args.noSupervisor:
if args.guiType == "qt":
runQt()
else:
args.guiType = "cv"
runOpenCv()
else:
s = Supervisor()
if args.guiType != "cv":
available = s.checkQtAvailability()
if args.guiType == "qt" and not available:
raise RuntimeError("QT backend is not available, run the script with --guiType \"cv\" to use OpenCV backend")
if args.guiType == "auto" and platform.machine() == 'aarch64': # Disable Qt by default on Jetson due to Qt issues
args.guiType = "cv"
elif available:
args.guiType = "qt"
else:
args.guiType = "cv"
s.runDemo(args)
except KeyboardInterrupt:
sys.exit(0)
|
the-stack_106_15185
|
from collections import OrderedDict
# Dictionaries sorting Tokens by hierarchy. Hierarchies are based on game importance and similarity.
GROUND_TOKENS = OrderedDict(
{"X": "Ground Block"}
)
SPECIAL_GROUND_TOKENS = OrderedDict(
{
"#": "Pyramind Block",
}
)
PLATFORM_TOKENS = OrderedDict(
{
"S": "Normal Brick Block",
"%": "Jump through platform",
"|": "Background for the jump through platform",
"b": "Bullet bill neck or body",
}
)
SKY_TOKENS = OrderedDict(
{
"-": "Empty",
}
)
PIPE_TOKENS = OrderedDict(
{
"<": "Top left of empty pipe",
">": "Top right of empty pipe",
"[": "Left of empty pipe",
"]": "Right of empty pipe",
"t": "Empty Pipe",
"T": "Pipe with Piranaha Plant in it",
}
)
ENEMY_TOKENS = OrderedDict(
{
"E": "Goomba",
"g": "Goomba",
"k": "Green Koopa",
"r": "Red Koopa",
"y": "Spiky",
}
)
SPECIAL_ENEMY_TOKENS = OrderedDict(
{
"G": "Winged Goomba",
"K": "Winged Green Koopa",
"R": "Winged Red Koopa",
"Y": "Winged Spiky",
"*": "Bullet Bill",
"B": "Bullet bill head",
}
)
SPECIAL_TOKENS = OrderedDict(
{
"o": "Coin",
"Q": "Coin Question block",
"!": "Coin Question block",
"?": "Special Question block",
"@": "Special Question block",
"M": "Mario Starting Position, not having it will force the engine to start at x = 0 and the first ground floor.",
"F": "Mario finish line, not having it will force the engine to end at x = levelWidth and the first ground floor.",
"C": "Coing Brick Block",
}
)
EXTRA_SPECIAL_TOKENS = OrderedDict(
{
"D": "Used block",
"U": "Musrhoom Brick Block",
"L": "1 up Block",
"2": "Invisible coin bock",
"1": "Invisible 1 up block",
}
)
TOKEN_DOWNSAMPLING_HIERARCHY = [
SKY_TOKENS,
GROUND_TOKENS,
SPECIAL_GROUND_TOKENS,
PLATFORM_TOKENS,
PIPE_TOKENS,
ENEMY_TOKENS,
SPECIAL_ENEMY_TOKENS,
SPECIAL_TOKENS,
EXTRA_SPECIAL_TOKENS
]
TOKENS = OrderedDict(
{**GROUND_TOKENS, **SPECIAL_GROUND_TOKENS, **PLATFORM_TOKENS, **SKY_TOKENS, **PIPE_TOKENS,
**ENEMY_TOKENS, **SPECIAL_ENEMY_TOKENS, **SPECIAL_TOKENS, **EXTRA_SPECIAL_TOKENS}
)
TOKEN_GROUPS = [SKY_TOKENS, GROUND_TOKENS, SPECIAL_GROUND_TOKENS, PLATFORM_TOKENS, ENEMY_TOKENS,
SPECIAL_ENEMY_TOKENS, PIPE_TOKENS, SPECIAL_TOKENS, EXTRA_SPECIAL_TOKENS]
REPLACE_TOKENS = {"F": "-", "M": "-"} # We replace these tokens so the generator doesn't add random start or end points
|
the-stack_106_15186
|
import os;
try: # mDebugOutput use is Optional
from mDebugOutput import ShowDebugOutput, fShowDebugOutput;
except ModuleNotFoundError as oException:
if oException.args[0] != "No module named 'mDebugOutput'":
raise;
ShowDebugOutput = fShowDebugOutput = lambda x: x; # NOP
from mFileSystemItem import cFileSystemItem;
import mHTTPProtocol;
from mNotProvided import *;
def gfCheckIfIdIsUsedInTreeForNode(sId, oRootNode, oNode):
oExistingNodeWithId = oRootNode.foGetNodeById(sId)
assert oExistingNodeWithId is None, \
"%s cannot have id %s because it is already used in the tree by %s" % (oNode.sName, sId, oExistingNodeWithId.sName);
goIconsFolder = cFileSystemItem(__file__).oParent.foGetChild("icons", bMustBeFolder = True);
class cTreeNode(object):
sNamespace = "cTreeNode";
oIconsFolder = goIconsFolder;
def __init__(oSelf, sName, sType = None, xData = None, sId = None, oIconFile = None, sIconURL = None, bOpened = None, bDisabled = None, bSelected = None, sToolTip = None):
oSelf.sName = sName;
oSelf.sType = sType; # Valid values: None, "text", "html", "markdown", "node-link", "url-link", "iframe", "img".
oSelf.xData = xData;
oSelf.__sId = sId;
oSelf.oIconFile = oIconFile;
oSelf.sIconURL = sIconURL;
oSelf.bOpened = bOpened;
oSelf.bDisabled = bDisabled;
oSelf.bSelected = bSelected;
oSelf.sToolTip = sToolTip;
oSelf.__oParent = None;
oSelf.__aoChildren = [];
def foCreateChild(oSelf, sName, *txAdditionalArguments, **dxAdditionalArguments):
oChild = cTreeNode(sName, *txAdditionalArguments, **dxAdditionalArguments);
oSelf.fAppendChild(oChild);
return oChild;
@property
def sId(oSelf):
if oSelf.__sId:
return oSelf.__sId;
if oSelf.__oParent:
return "%s>anonymous #%d" % (oSelf.__oParent.sId, oSelf.__oParent.__aoChildren.index(oSelf));
return "##Anonymous root##";
@sId.setter
def sId(oSelf, sId):
assert oSelf.__sId is None, \
"%s already has an id" % oSelf.sName;
gfCheckIfIdIsUsedInTreeForNode(sId, oSelf.oRoot, oSelf);
oSelf.__sId = sId;
def fLinkToNode(oSelf, oTargetTreeNode):
fAssertType("oTargetTreeNode", oTargetTreeNode, cTreeNode);
# Make sure oTargetTreeNode is part of the same tree as oSelf.
assert oSelf.oRoot is oTargetTreeNode.oRoot, \
"Cannot link to a node that is not part of the same tree!";
oSelf.sType = "node-link";
oSelf.xData = oTargetTreeNode;
def fLinkToNodeId(oSelf, sTargetId):
fAssertType("sTargetId", sTargetId, str);
oSelf.sType = "node-link";
oSelf.xData = sTargetId;
def fLinkToURL(oSelf, xURL):
fAssertType("xURL", xURL, mHTTPProtocol.cURL, str, bytes);
oSelf.sType = "url-link";
oSelf.xData = (
str(xURL.sbAbsolute, "ascii", "strict") if isinstance(xURL, mHTTPProtocol.cURL) else \
str(xURL, "ascii", "strict") if isinstance(xURL, bytes) else \
xURL
);
assert oSelf.xData, \
"Invalid URL: %s" % repr(xURL);
def foGetNodeById(oSelf, sId):
if oSelf.sId == sId: return oSelf;
for oChild in oSelf.__aoChildren:
oNode = oChild.foGetNodeById(sId);
if oNode: return oNode;
return None;
@property
def oParent(oSelf):
return oSelf.__oParent;
@property
def oRoot(oSelf):
# Ascend to the root node.
oNodeInChainToRoot = oSelf;
while oNodeInChainToRoot.oParent:
oNodeInChainToRoot = oNodeInChainToRoot.oParent;
return oNodeInChainToRoot;
@property
def aoChildren(oSelf):
return oSelf.__aoChildren[:];
@property
def aoDescendants(oSelf):
return oSelf.faoGetDescendantsWithCallback();
def faoGetDescendantsWithCallback(oSelf, fCallback = None):
aoDescendants = [];
for oChild in oSelf.__aoChildren:
fCallback and fCallback(oChild);
aoDescendants.append(oChild);
aoDescendants.extend(oChild.faoGetDescendantsWithCallback(fCallback));
return aoDescendants;
def fRemoveChild(oSelf, oChild):
assert oChild in oSelf.__aoChildren, \
"%s is not a child of %s" % (oChild.sName, oSelf.sName);
oSelf.__aoChildren.remove(oChild);
oChild.__oParent = None;
def fRemoveChildren(oSelf):
while oSelf.__aoChildren:
oSelf.__aoChildren.pop().__oParent = None;
def fRemove(oSelf):
oParent = oSelf.oParent;
assert oParent, \
"%s does not have a parent" % oSelf.sName;
oParent.fRemoveChild(oSelf);
def fAppendChild(oSelf, oChild):
assert oChild.oParent is None, \
"%s already has a parent" % oSelf.sName;
gfCheckIfIdIsUsedInTreeForNode(oChild.__sId, oSelf.oRoot, oChild);
oSelf.__aoChildren.append(oChild);
oChild.__oParent = oSelf;
def fDiscardUserState(oSelf):
oSelf.bOpened = None;
oSelf.bSelected = None;
for oChild in oSelf.__aoChildren:
oChild.fDiscardUserState();
def fdxGetJSON(oSelf):
dxJSON = {"text": oSelf.sName};
bForceDisabled = False;
asRemarks = [];
if oSelf.sType:
if oSelf.sType == "node-link":
oTargetNode = oSelf.oRoot.foGetNodeById(oSelf.xData) if isinstance(oSelf.xData, str) \
else oSelf.xData if isinstance(oSelf.xData, cTreeNode) \
else None;
if not oTargetNode or (oSelf.oRoot is not oTargetNode.oRoot):
asRemarks.append("Broken link to %s %s" %("external node" if oTargetNode else "unknown node", repr(oSelf.xData)));
bForceDisabled = True;
dxJSON["data"] = {
"sType": oSelf.sType,
};
else:
asRemarks.append("Link to " + repr(oTargetNode.sName));
dxJSON["data"] = {
"sType": oSelf.sType,
"sData": oTargetNode.sId,
};
else:
dxJSON["data"] = {
"sType": oSelf.sType,
"sData": oSelf.xData,
};
dxJSON["id"] = oSelf.sId;
if oSelf.sIconURL is not None:
dxJSON["icon"] = oSelf.sIconURL;
elif oSelf.oIconFile is not None:
dxJSON["icon"] = "icons/%s/%s" % (oSelf.sNamespace, oSelf.oIconFile.sName);
if oSelf.__aoChildren:
dxJSON["children"] = [oChild.fdxGetJSON() for oChild in oSelf.__aoChildren];
dxStateJSON = {};
if oSelf.bOpened is not None:
dxStateJSON["opened"] = oSelf.bOpened;
if bForceDisabled:
dxStateJSON["disabled"] = True;
elif oSelf.bDisabled is not None:
dxStateJSON["disabled"] = oSelf.bDisabled;
if oSelf.bSelected is not None:
dxStateJSON["selected"] = oSelf.bSelected;
if len(dxStateJSON) != 0:
dxJSON["state"] = dxStateJSON;
if oSelf.sToolTip:
asRemarks.insert(0, oSelf.sToolTip);
if asRemarks:
dxJSON.setdefault("a_attr", {})["title"] = "\n".join(asRemarks);
return dxJSON;
|
the-stack_106_15188
|
# -*- coding: utf-8 -*-
import scrapy
import urllib
import requests
class TbSpider(scrapy.Spider):
name = 'tb'
allowed_domains = ['tieba.baidu.com']
start_urls = ['http://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/m?kw=%E6%9D%8E%E6%AF%85&lp=9001']
def parse(self, response):
#根据帖子进行分组
div_list = response.xpath("//div[contains(@class,'i')]")
for div in div_list:
item = {}
item["href"] = div.xpath("./a/@href").extract_first()
item["title"] = div.xpath("./a/text()").extract_first()
item["img_list"] = []
if item["href"] is not None:
item["href"] = urllib.parse.urljoin(response.url,item["href"])
yield scrapy.Request(
item["href"],
callback=self.parse_detail,
meta = {"item":item}
)
#列表页的翻页
next_url = response.xpath("//a[text()='下一页']/@href").extract_first()
if next_url is not None:
next_url = urllib.parse.urljoin(response.url,next_url)
yield scrapy.Request(
next_url,
callback=self.parse,
)
def parse_detail(self,response):
item = response.meta["item"]
# if "img_list" not in item:
# item["img_list"] = response.xpath("//img[@class='BDE_Image']/@src").extract()
# else:
item["img_list"].extend(response.xpath("//img[@class='BDE_Image']/@src").extract())
next_url = response.xpath("//a[text()='下一页']/@href").extract_first()
if next_url is not None: #表示由下一页
next_url = urllib.parse.urljoin(response.url,next_url)
yield scrapy.Request(
next_url,
callback=self.parse_detail,
meta={"item":item}
)
else:
item["img_list"] = [requests.utils.unquote(i).split("src=")[-1] for i in item["img_list"]]
print(item)
# yield item
|
the-stack_106_15189
|
# The MIT License (MIT)
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.BaseLOD import BaseLOD
from lib.models.BasicDecoder import BasicDecoder
from lib.utils import PerfTimer
class MyActivation(nn.Module):
def forward(self, x):
return torch.sin(x)
class FeatureVolume(nn.Module):
def __init__(self, fdim, fsize):
super().__init__()
self.fsize = fsize
self.fdim = fdim
self.fm = nn.Parameter(torch.randn(1, fdim, fsize+1, fsize+1, fsize+1) * 0.01)
self.sparse = None
def forward(self, x):
N = x.shape[0]
if x.shape[1] == 3:
sample_coords = x.reshape(1, N, 1, 1, 3) # [N, 1, 1, 3]
sample = F.grid_sample(self.fm, sample_coords,
align_corners=True, padding_mode='border')[0,:,:,0,0].transpose(0,1)
else:
sample_coords = x.reshape(1, N, x.shape[1], 1, 3) # [N, 1, 1, 3]
sample = F.grid_sample(self.fm, sample_coords,
align_corners=True, padding_mode='border')[0,:,:,:,0].permute([1,2,0])
return sample
class OctreeSDF(BaseLOD):
def __init__(self, args, init=None):
super().__init__(args)
self.fdim = self.args.feature_dim
self.fsize = self.args.feature_size
self.hidden_dim = self.args.hidden_dim
self.pos_invariant = self.args.pos_invariant
self.features = nn.ModuleList([])
for i in range(self.args.num_lods):
self.features.append(FeatureVolume(self.fdim, (2**(i+self.args.base_lod))))
self.interpolate = self.args.interpolate
self.louts = nn.ModuleList([])
self.sdf_input_dim = self.fdim
if not self.pos_invariant:
self.sdf_input_dim += self.input_dim
self.num_decoder = 1 if args.joint_decoder else self.args.num_lods
for i in range(self.num_decoder):
self.louts.append(
nn.Sequential(
nn.Linear(self.sdf_input_dim, self.hidden_dim, bias=True),
nn.ReLU(),
nn.Linear(self.hidden_dim, 1, bias=True),
)
)
def encode(self, x):
# Disable encoding
return x
def sdf(self, x, lod=None, return_lst=False):
if lod is None:
lod = self.lod
# Query
l = []
samples = []
for i in range(self.num_lods):
# Query features
sample = self.features[i](x)
samples.append(sample)
# Sum queried features
if i > 0:
samples[i] += samples[i-1]
# Concatenate xyz
ex_sample = samples[i]
if not self.pos_invariant:
ex_sample = torch.cat([x, ex_sample], dim=-1)
if self.num_decoder == 1:
prev_decoder = self.louts[0]
curr_decoder = self.louts[0]
else:
prev_decoder = self.louts[i-1]
curr_decoder = self.louts[i]
d = curr_decoder(ex_sample)
# Interpolation mode
if self.interpolate is not None and lod is not None:
if i == len(self.louts) - 1:
return d
if lod+1 == i:
_ex_sample = samples[i-1]
if not self.pos_invariant:
_ex_sample = torch.cat([x, _ex_sample], dim=-1)
_d = prev_decoder(_ex_sample)
return (1.0 - self.interpolate) * _l + self.interpolate * d
# Get distance
else:
d = curr_decoder(ex_sample)
# Return distance if in prediction mode
if lod is not None and lod == i:
return d
l.append(d)
if self.training:
self.loss_preds = l
if return_lst:
return l
else:
return l[-1]
|
the-stack_106_15190
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
from framat import Model
REL_TOL = 1e-4
def get_cantilever_model():
model = Model()
mat = model.add_feature('material', uid='dummy')
mat.set('E', 1)
mat.set('G', 1)
mat.set('rho', 1)
cs = model.add_feature('cross_section', uid='dummy')
cs.set('A', 1)
cs.set('Iy', 1)
cs.set('Iz', 1)
cs.set('J', 1)
beam = model.add_feature('beam')
beam.add('node', [0, 0, 0], uid='root')
beam.add('node', [1, 0, 0], uid='tip')
beam.set('nelem', 10)
beam.add('material', {'from': 'root', 'to': 'tip', 'uid': 'dummy'})
beam.add('cross_section', {'from': 'root', 'to': 'tip', 'uid': 'dummy'})
beam.add('orientation', {'from': 'root', 'to': 'tip', 'up': [0, 0, 1]})
beam.add('point_load', {'at': 'tip', 'load': [0, 0, -1, 0, 0, 0]})
model.set_feature('bc').add('fix', {'node': 'root', 'fix': ['all']})
model.set_feature('post_proc')
return model
def test_tip_force():
model = get_cantilever_model()
# Test tip deflection for different discretisations
for nelem in (5, 17, 41, 83, 107):
model.get('beam')[0].set('nelem', nelem)
r = model.run()
deform = r.get('tensors').get('comp:U')
# Expected non-zero
assert -1/3 == pytest.approx(deform['uz'][-1], rel=1e-4)
assert 0.5 == pytest.approx(deform['thy'][-1], rel=REL_TOL)
# Expected zero
for p in ('ux', 'uy', 'thx', 'thz'):
assert 0 == pytest.approx(deform[p][-1], rel=REL_TOL)
|
the-stack_106_15193
|
import requests
import socket
import struct
import random
import queue
import threading
def vote(cid):
'''Vote for someone whose id is cid
Args:
cid: the contestant id
Return the HTTP request
Author: Li Zhineng <[email protected]>
Url: http://zhineng.li
'''
# IP Address from 0 ~ 4294967295
randInt = int(random.random() * (4294967295 + 1))
ip = socket.inet_ntoa(struct.pack('I',socket.htonl(randInt)))
# Number => String
cid = str(cid)
# Custom headers
headers = {
'X-Forwarded-For': ip,
'Referer': 'http://lcs.yicai.com'
}
obj = requests.get('http://lcs.yicai.com/do.php?ac=vote&inajax=1&op=vote&type=user&id=' + cid, headers = headers)
return obj.text
class _voteThread(threading.Thread):
'''Vote for someone (Just a Thread class)
Arg:
q: vote queue
'''
def __init__(self, q):
self.q = q
threading.Thread.__init__(self)
def run(self):
while True:
cid = self.q.get()
# Vote for someone
vote(cid)
# Signals to queue job is done
self.q.task_done()
def voteHelper(cid, tickets):
'''Vote helper (multithreading)
'''
# Create a new queue
voteQueue = queue.Queue()
# Tickets that you want to vote
[voteQueue.put(cid) for i in range(int(tickets))]
# Spawn a pool of threads
for i in range(20):
st = _voteThread(voteQueue)
st.setDaemon(True)
st.start()
# Wait on the queues until everything has been processed
voteQueue.join()
if __name__ == '__main__':
pass
|
the-stack_106_15198
|
import os
import tensorflow as tf
from src.dataset.dtd_dataset import DTDDataset
from src.models.fcn.simple_fcn import SimpleFCN
from src.models.u_net.u_net_model import UNet
from src.models.resnest.resnest import ResNest
from src.models.resnet.resnet import ResNet18
from src.settings.settings import Settings, Models
from src.utils.utils import create_experiment_folders
from src.utils.lr_tensorboard_callback import LRTensorBoardCallback
def check_if_GPUs():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been
print(e)
def scheduler(epoch, lr):
if epoch < 50:
return lr
else:
return lr * tf.math.exp(-0.1)
if __name__ == '__main__':
# check if GPU
check_if_GPUs()
# global settings
settings = Settings()
# create dataset
dataset = DTDDataset.get_instance(settings=settings,
log=settings.log)
# create and build the model
if settings.model is Models.SIMPLE_FCN:
# Simple FCN
lr = 1e-4
model = SimpleFCN(settings.n_classes,
settings.patch_size,
settings.patch_border,
settings.patch_channels,
dropout_rate=settings.dropout_rate)
elif settings.model is Models.U_NET:
# U-Net
lr = 1e-4
model = UNet(num_classes=settings.n_classes,
img_size=settings.patch_size,
img_border=settings.patch_border,
nr_channels=settings.patch_channels,
layer_depth=settings.layer_depth,
filters_root=settings.filters_root,
dropout_rate=settings.dropout_rate)
elif settings.model is Models.RESNEST:
# ResNeSt
lr = 1e-2
input_shape = [settings.patch_size,
settings.patch_size,
settings.patch_channels]
model = ResNest(
verbose=settings.log,
input_shape=input_shape,
n_classes=settings.n_classes,
dropout_rate=settings.dropout_rate,
# ResNeSt18: [2, 2, 2, 2], ResNeSt50: [3, 4, 6, 3]
blocks_set=[3, 4, 6, 3],
stem_width=32,
radix=2,
groups=1,
bottleneck_width=64,
deep_stem=False,
avg_down=False,
avd=True,
avd_first=False,
using_cb=False).build()
model.model_name = 'ResNeSt'
elif settings.model is Models.RESNET:
# ResNet
lr = 1e-3
model = ResNet18()
# build the model
in_shape = [1,
settings.patch_size,
settings.patch_size,
settings.patch_channels]
model.build(in_shape)
model.summary()
# create the paths for the experiment
paths = create_experiment_folders(dataset.name,
model.model_name,
post_fix='{0}-{1}'.format(str(lr),
str(settings.dropout_rate)))
# define the loss function
loss = tf.keras.losses.categorical_crossentropy
# define the optimizer
optimizer = tf.keras.optimizers.Adam(lr=lr)
# define the metrics to track and visualize in tensorboard
metrics = ['categorical_crossentropy',
'categorical_accuracy']
# compile the model
model.compile(loss=loss,
optimizer=optimizer,
metrics=metrics)
# define the callbacks
callbacks = [tf.keras.callbacks.ModelCheckpoint(paths['save'],
save_best_only=True),
tf.keras.callbacks.CSVLogger(os.path.join(paths['log'],
'training.log')),
tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=0),
LRTensorBoardCallback(),
tf.keras.callbacks.TensorBoard(log_dir=paths['tensorboard'],
update_freq=1,
histogram_freq=10,
profile_batch=0,
write_graph=True)]
# train the model
model.fit(dataset.train_ds,
validation_data=dataset.val_ds,
validation_steps=dataset.val_steps,
steps_per_epoch=dataset.train_steps,
epochs=settings.epochs,
callbacks=callbacks)
# evaluate the model on the test set
model.evaluate(dataset.test_ds,
callbacks=callbacks)
|
the-stack_106_15199
|
"""
Implementation of Hidden Interaction Tensor Factorization (HITF) model based on
pytorch.
"""
import time
import logging
import numpy as np
import torch
import torch.nn.functional as f
from functools import partial
eps = 1e-10
def P_plus(X, proj_eps):
return torch.clamp(X, min=proj_eps)
class HITF:
def __init__(self, R, proj_eps=1e-5, use_cuda=False, logger=None):
self.U_init = None
self.U = None
self.lmbda = None
self.R = R
self.proj_eps = proj_eps
self.use_cuda = use_cuda
self.logger = logger or logging.getLogger(__name__)
def _eval_nll(self, U, M, Dprime, verbose=False):
Mhat = U[0] @ torch.diag(U[1].sum(dim=0)) @ U[2].t()
Dhat = U[0] @ torch.diag(U[2].sum(dim=0)) @ U[1].t()
nll_M = Mhat - M * torch.log(torch.clamp(Mhat, min=1e-15))
nll_D = Dhat.clone()
nll_D[nll_D < 35] = torch.log(torch.clamp(torch.exp(nll_D[nll_D < 35]) - 1, min=1e-15))
nll_D = Dhat - Dprime * nll_D
if verbose:
return nll_M.sum(), nll_D.sum(), nll_M.sum() + nll_D.sum()
return nll_M.sum() + nll_D.sum()
def calculate_Phi(self, U, M):
Mhat = U[0] @ torch.diag(U[1].sum(dim=0)) @ U[2].t()
return M.t() / torch.clamp(Mhat.t(), min=eps)
def calculate_Psi(self, U, Dprime):
Dhat = U[0] @ torch.diag(U[2].sum(dim=0)) @ U[1].t()
return (Dprime / torch.clamp(1 - torch.exp(-Dhat), min=eps)).t()
def normalize_factors(self):
for n in range(len(self.U)):
self.lmbda *= self.U[n].norm(1, dim=0)
self.U[n] = f.normalize(self.U[n], 1, dim=0)
def redistribute_factors(self, n):
self.normalize_factors()
self.U[n] *= self.lmbda
self.lmbda = torch.ones(self.R).cuda() if self.use_cuda else torch.ones(self.R)
def _projected_line_search_armijo(self, X, grad, Sk, eval_func, proj_func,
desc_step, suff_desc, max_steps=100):
for t in range(max_steps):
if eval_func(proj_func(X + (desc_step ** t) * Sk)) - eval_func(X) <= suff_desc * ((proj_func(X + (desc_step ** t) * Sk)-X) * grad).sum():
return proj_func(X + (desc_step ** t) * Sk), t + 1
return X, -1 # No updating if maximum steps reached.
def _solve_subproblem(self, n, Xinit, M, Dprime, max_iter, grad_func, desc_step, suff_desc, outer):
X = Xinit.clone()
# U = []
eval_f = lambda X: self._eval_nll([self.U[k] if k != n else X for k in range(len(self.U))], M=M, Dprime=Dprime)
proj_func = partial(P_plus, proj_eps=self.proj_eps)
nll_init = eval_f(X)
nlls = []
steps = []
tic = time.time()
for iter_ in range(max_iter):
grad = grad_func([self.U[k] if k != n else X for k in range(len(self.U))])
X, t = self._projected_line_search_armijo(X, grad, -grad, eval_f, proj_func, desc_step, suff_desc)
nll = eval_f(X)
self.logger.debug(f' Outer: {outer}, U{n+1}, iter:{iter_+1}, t={t}, nll: {nll:f}')
nlls.append(nll)
steps.append(t)
if iter_ > 0 and abs((nlls[-2] - nlls[-1]) / nlls[-2]) < 1e-5:
break
toc = time.time()
iter_info = {
'inner_iter_time': toc - tic,
'nll_init': nll_init,
'inner_nlls': nlls,
'step_size': steps
}
self.logger.info(f' Outer: {outer}, U{n+1} updated with {len(nlls)} iterations, final nll: {nlls[-1]:f}.')
return X, iter_info
def decompose(self, M, Dprime, max_outer_iters=200,
max_inner_iters=100, desc_step=0.5, suff_desc=1e-4,
dump_file=None, random_state=None):
if isinstance(M, np.ndarray):
M = torch.from_numpy(M.astype(np.float32))
if isinstance(Dprime, np.ndarray):
Dprime = torch.from_numpy(Dprime.astype(np.float32))
# set random state if specified
if random_state is None:
st0 = np.random.get_state()
else:
self.logger.warning()
# for debug
# seed = 75
# np.random.seed(seed)
# dims = (M.shape[0], Dprime.shape[1], M.shape[1])
# self.U = [torch.from_numpy(np.random.rand(dim, self.R).astype(np.float32)) for dim in dims]
# self.lmbda = torch.ones(self.R)
# self.normalize_factors()
# self.lmbda = torch.ones(self.R) # to avoid too large initial value
# initialize
dims = (M.shape[0], Dprime.shape[1], M.shape[1])
U_init = [torch.rand(dim, self.R) for dim in dims]
self.U = [k.clone() for k in U_init]
self.lmbda = torch.ones(self.R)
self.normalize_factors()
self.lmbda = torch.ones(self.R) # to avoid too large initial value
if self.use_cuda:
# self.U_init = [k.cuda() for k in self.U_init]
self.U = [k.cuda() for k in self.U]
self.lmbda = self.lmbda.cuda()
M = M.cuda()
Dprime = Dprime.cuda()
Phi = lambda U: self.calculate_Phi(U, M)
Psi = lambda U: self.calculate_Psi(U, Dprime)
# define gradients
if self.use_cuda:
ones = lambda shape: torch.cuda.FloatTensor(shape).fill_(1)
eyes = torch.eye(self.R).cuda()
else:
ones = lambda shape: torch.ones(shape)
eyes = torch.eye(self.R)
grad_funcs = [
lambda U: 2 * ones(U[0].shape) - Phi(U).t() @ U[2] - Psi(U).t() @ U[1], # gradient w.r.t. U1
lambda U: 2 * ones(U[1].shape) - ones(U[1].shape) @ ((U[0].t() @ Phi(U).t() @ U[2]) * eyes) - Psi(U) @ U[0], # gradient w.r.t. U2
lambda U: 2 * ones(U[2].shape) - Phi(U) @ U[0] - ones(U[2].shape) @ ((U[0].t() @ Psi(U).t() @ U[1]) * eyes) ## gradient w.r.t. U3
]
self.iters_info = []
nll = self._eval_nll(self.U, M, Dprime, verbose=True)
tic = time.time()
for iter_ in range(max_outer_iters):
self.logger.info(f'Start the {iter_+1}-th iteration.')
iter_tic = time.time()
iter_infos = [None] * 3
for n in range(3):
self.redistribute_factors(n)
self.U[n], iter_infos[n] = self._solve_subproblem(n, self.U[n], M, Dprime, max_inner_iters, grad_funcs[n], desc_step, suff_desc, iter_)
time_elapsed = time.time() - iter_tic
nll_old = nll
nll = self._eval_nll(self.U, M, Dprime, verbose=True)
nll_delta = abs((nll_old[-1] - nll[-1]) / nll_old[-1])
fit_M = torch.norm(M - self.U[0] @ torch.diag(self.U[1].sum(dim=0)) @ self.U[2].t()) ** 2
Dhat = self.U[0] @ torch.diag(self.U[2].sum(dim=0)) @ self.U[1].t()
Dhat[Dhat > 0] = 1
fit_D = torch.norm(Dprime - Dhat) ** 2
iter_info = {
'inner_iter_infos': iter_infos,
'nll': nll,
'iter_time': time_elapsed
}
self.iters_info.append(iter_info)
self.logger.info(f'Iter {iter_}: {time_elapsed:.1f}s, negtative-ll {nll[-1]:.3f}, nll delta {nll_delta}, M fit {fit_M:.3f}, D fit {fit_D}')
self.redistribute_factors(0)
if dump_file:
np.savez(dump_file, iter=iter_,
U1=self.U[0].cpu().numpy(),
U2=self.U[1].cpu().numpy(),
U3=self.U[2].cpu().numpy())
if iter_ > 0 and abs(nll_delta) <= 1e-4:
break
self.logger.info(f'Decomposition is done, time: {time.time()-tic:.1f}s')
def project(self, M, Dprime, max_outer_iters=200,
max_inner_iters=100, desc_step=0.5, suff_desc=1e-4,
dump_file=None, random_state=None):
if isinstance(M, np.ndarray):
M = torch.from_numpy(M.astype(np.float32))
if isinstance(Dprime, np.ndarray):
Dprime = torch.from_numpy(Dprime.astype(np.float32))
dims = (M.shape[0], Dprime.shape[1], M.shape[1])
proj = torch.rand(dims[0], self.R)
if self.use_cuda:
proj = proj.cuda()
M = M.cuda()
Dprime = Dprime.cuda()
self.logger.info('Projecting testing data with dims ({}, {}, {}).'.format(*dims))
Phi = lambda U: self.calculate_Phi(U, M)
Psi = lambda U: self.calculate_Psi(U, Dprime)
ones = lambda shape: torch.ones(shape).cuda() if self.use_cuda else torch.ones(shape)
grad_func = lambda U: 2 * ones(U[0].shape) - Phi(U).t() @ U[2] - Psi(U).t() @ U[1] # gradient w.r.t. U1
for iter_ in range(max_outer_iters):
proj_old = proj.clone()
proj, iter_info = self._solve_subproblem(0, proj_old, M, Dprime, max_inner_iters, grad_func, desc_step, suff_desc, iter_)
Xchange = torch.norm(proj - proj_old)**2
if Xchange < 1e-4:
break
self.logger.info('Projection done with {} iterations.'.format(iter_+1))
return proj
|
the-stack_106_15200
|
import os
import sys
import cv2
import torch
import logging
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from typing import List, Tuple, Union
import torchvision.transforms as T
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../third_party', 'DGNet'))
from reIDmodel import ft_net, ft_netAB, ft_net_dense, PCB, PCB_test
from mot.structures import Detection
from .encode import Encoder, ENCODER_REGISTRY
@ENCODER_REGISTRY.register()
class DGNetEncoder(Encoder):
def __init__(self, model_path: str, name: str, input_size: Tuple[int] = (128, 256), **kwargs):
super(DGNetEncoder).__init__()
self.name = name
self.model = ft_netAB(751, norm=False, stride=1, pool='max')
state_dict = torch.load(model_path)
self.model.load_state_dict(state_dict['a'], strict=False)
self.model.classifier1.classifier = nn.Sequential()
self.model.classifier2.classifier = nn.Sequential()
self.model = self.model.eval().cuda()
self.size = input_size
self.transform = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
"""
# settings
ignored_params = list(map(id, self.model.classifiers.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, self.model.parameters())
self.optimizer_ft = optim.SGD([
{'params': base_params, 'lr': 0.0001},
{'params': self.model.classifiers.parameters(), 'lr': 0.0001},
], weight_decay=5e-4, momentum=0.9, nesterov=True)
self.scheduler = lr_scheduler.StepLR(self.optimizer_ft, step_size=40, gamma=0.1)
self.criterion = TripletLoss()
"""
def learn_online(self, feature_matrix, labels):
if feature_matrix:
self.model.train(True)
inputs = torch.Tensor(feature_matrix)
labels = torch.Tensor(labels)
inputs = Variable(inputs.cuda(), requires_grad=True)
labels = Variable(labels.cuda())
self.optimizer_ft.zero_grad()
loss, prec = self.criterion(inputs, labels)
loss.requires_grad_()
loss.backward()
self.scheduler.step()
print('Loss: {:.4f} Acc: {:.4f}'.format(loss, prec))
else:
print('Nothing to learn')
# Extract feature
def fliplr(self, img):
inv_idx = torch.arange(img.size(3) - 1, -1, -1).long() # N x C x H x W
img_flip = img.index_select(3, inv_idx)
return img_flip
def _preprocess(self, im_crops):
def _resize(im, size):
return cv2.resize(im.astype(np.float32) / 255., size)
im_batch = torch.cat([self.transform(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
return im_batch
def normlize(self, f):
# f = f.squeeze()
fnorm = torch.norm(f, p=2, dim=1, keepdim=True)
f = f.div(fnorm.expand_as(f))
return f
def encode(self, detections: List[Detection], full_img: np.ndarray) -> List[object]:
features = torch.FloatTensor()
all_crops = []
for detection in detections:
box = detection.box
crop = full_img[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
if crop.shape[0] * crop.shape[1] > 0:
all_crops.append(crop)
else:
all_crops.append(np.ones((10, 10, 3)).astype(np.float32) * 255)
if len(detections) != 0:
im_batch = self._preprocess(all_crops)
n, c, h, w = im_batch.shape
ff = torch.FloatTensor(n, 1024).zero_()
for i in range(2):
if (i == 1):
im_batch = self.fliplr(im_batch)
input_img = Variable(im_batch.cuda())
f, x = self.model(input_img)
x[0] = self.normlize(x[0])
x[1] = self.normlize(x[1])
f = torch.cat((x[0], x[1]), dim=1) # use 512-dim feature
f = f.data.cpu()
ff = ff + f
ff[:, 0:512] = self.normlize(ff[:, 0:512])
ff[:, 512:1024] = self.normlize(ff[:, 512:1024])
ff[:, 512:1024] = ff[:, 512:1024] * 0.7
return torch.cat((features, ff), 0).numpy()
else:
return []
|
the-stack_106_15201
|
# Copyright (c) 2019-2020 by Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Isolate Python 3.5 version-specific semantic actions here.
"""
from xdis import co_flags_is_async, iscode
from uncompyle6.semantics.consts import (
INDENT_PER_LEVEL,
PRECEDENCE,
TABLE_DIRECT,
)
from uncompyle6.semantics.helper import flatten_list, gen_function_parens_adjust
#######################
# Python 3.5+ Changes #
#######################
def customize_for_version35(self, version):
TABLE_DIRECT.update(
{
# nested await expressions like:
# return await (await bar())
# need parenthesis.
"await_expr": ("await %p", (0, PRECEDENCE["await_expr"]-1)),
"await_stmt": ("%|%c\n", 0),
"async_for_stmt": ("%|async for %c in %c:\n%+%|%c%-\n\n", 9, 1, 25),
"async_forelse_stmt": (
"%|async for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
9,
1,
25,
(27, "else_suite"),
),
"async_with_stmt": ("%|async with %c:\n%+%c%-", (0, "expr"), 3),
"async_with_as_stmt": (
"%|async with %c as %c:\n%+%c%-",
(0, "expr"),
(2, "store"),
3,
),
"unmap_dict": ("{**%C}", (0, -1, ", **")),
# "unmapexpr": ( "{**%c}", 0), # done by n_unmapexpr
}
)
def async_call(node):
self.f.write("async ")
node.kind == "call"
p = self.prec
self.prec = 80
self.template_engine(("%c(%P)", 0, (1, -4, ", ", 100)), node)
self.prec = p
node.kind == "async_call"
self.prune()
self.n_async_call = async_call
def n_build_list_unpack(node):
"""
prettyprint a list or tuple
"""
p = self.prec
self.prec = 100
lastnode = node.pop()
lastnodetype = lastnode.kind
# If this build list is inside a CALL_FUNCTION_VAR,
# then the first * has already been printed.
# Until I have a better way to check for CALL_FUNCTION_VAR,
# will assume that if the text ends in *.
last_was_star = self.f.getvalue().endswith("*")
if lastnodetype.startswith("BUILD_LIST"):
self.write("[")
endchar = "]"
flat_elems = flatten_list(node)
self.indent_more(INDENT_PER_LEVEL)
sep = ""
for elem in flat_elems:
if elem in ("ROT_THREE", "EXTENDED_ARG"):
continue
assert elem == "expr"
line_number = self.line_number
use_star = True
value = self.traverse(elem)
if value.startswith("("):
assert value.endswith(")")
use_star = False
value = value[1:-1].rstrip(
" "
) # Remove starting "(" and trailing ")" and additional spaces
if value == "":
pass
else:
if value.endswith(","): # if args has only one item
value = value[:-1]
if line_number != self.line_number:
sep += "\n" + self.indent + INDENT_PER_LEVEL[:-1]
else:
if sep != "":
sep += " "
if not last_was_star and use_star:
sep += "*"
pass
else:
last_was_star = False
self.write(sep, value)
sep = ","
self.write(endchar)
self.indent_less(INDENT_PER_LEVEL)
self.prec = p
self.prune()
return
self.n_build_list_unpack = n_build_list_unpack
def n_call(node):
p = self.prec
self.prec = 100
mapping = self._get_mapping(node)
table = mapping[0]
key = node
for i in mapping[1:]:
key = key[i]
pass
if key.kind.startswith("CALL_FUNCTION_VAR_KW"):
# Python 3.5 changes the stack position of
# *args: kwargs come after *args whereas
# in earlier Pythons, *args is at the end
# which simplifies things from our
# perspective. Python 3.6+ replaces
# CALL_FUNCTION_VAR_KW with
# CALL_FUNCTION_EX We will just swap the
# order to make it look like earlier
# Python 3.
entry = table[key.kind]
kwarg_pos = entry[2][1]
args_pos = kwarg_pos - 1
# Put last node[args_pos] after subsequent kwargs
while node[kwarg_pos] == "kwarg" and kwarg_pos < len(node):
# swap node[args_pos] with node[kwargs_pos]
node[kwarg_pos], node[args_pos] = node[args_pos], node[kwarg_pos]
args_pos = kwarg_pos
kwarg_pos += 1
elif key.kind.startswith("CALL_FUNCTION_VAR"):
# CALL_FUNCTION_VAR's top element of the stack contains
# the variable argument list, then comes
# annotation args, then keyword args.
# In the most least-top-most stack entry, but position 1
# in node order, the positional args.
argc = node[-1].attr
nargs = argc & 0xFF
kwargs = (argc >> 8) & 0xFF
# FIXME: handle annotation args
if nargs > 0:
template = ("%c(%P, ", 0, (1, nargs + 1, ", ", 100))
else:
template = ("%c(", 0)
self.template_engine(template, node)
args_node = node[-2]
if args_node in ("pos_arg", "expr"):
args_node = args_node[0]
if args_node == "build_list_unpack":
template = ("*%P)", (0, len(args_node) - 1, ", *", 100))
self.template_engine(template, args_node)
else:
if len(node) - nargs > 3:
template = ("*%c, %P)", nargs + 1, (nargs + kwargs + 1, -1, ", ", 100))
else:
template = ("*%c)", nargs + 1)
self.template_engine(template, node)
self.prec = p
self.prune()
else:
gen_function_parens_adjust(key, node)
self.prec = 100
self.default(node)
self.n_call = n_call
def is_async_fn(node):
code_node = node[0][0]
for n in node[0]:
if hasattr(n, "attr") and iscode(n.attr):
code_node = n
break
pass
pass
is_code = hasattr(code_node, "attr") and iscode(code_node.attr)
return is_code and co_flags_is_async(code_node.attr.co_flags)
def n_function_def(node):
if is_async_fn(node):
self.template_engine(("\n\n%|async def %c\n", -2), node)
else:
self.default(node)
self.prune()
self.n_function_def = n_function_def
def n_mkfuncdeco0(node):
if is_async_fn(node):
self.template_engine(("%|async def %c\n", 0), node)
else:
self.default(node)
self.prune()
self.n_mkfuncdeco0 = n_mkfuncdeco0
def unmapexpr(node):
last_n = node[0][-1]
for n in node[0]:
self.preorder(n)
if n != last_n:
self.f.write(", **")
pass
pass
self.prune()
pass
self.n_unmapexpr = unmapexpr
# FIXME: start here
def n_list_unpack(node):
"""
prettyprint an unpacked list or tuple
"""
p = self.prec
self.prec = 100
lastnode = node.pop()
lastnodetype = lastnode.kind
# If this build list is inside a CALL_FUNCTION_VAR,
# then the first * has already been printed.
# Until I have a better way to check for CALL_FUNCTION_VAR,
# will assume that if the text ends in *.
last_was_star = self.f.getvalue().endswith("*")
if lastnodetype.startswith("BUILD_LIST"):
self.write("[")
endchar = "]"
elif lastnodetype.startswith("BUILD_TUPLE"):
# Tuples can appear places that can NOT
# have parenthesis around them, like array
# subscripts. We check for that by seeing
# if a tuple item is some sort of slice.
no_parens = False
for n in node:
if n == "expr" and n[0].kind.startswith("build_slice"):
no_parens = True
break
pass
if no_parens:
endchar = ""
else:
self.write("(")
endchar = ")"
pass
elif lastnodetype.startswith("BUILD_SET"):
self.write("{")
endchar = "}"
elif lastnodetype.startswith("BUILD_MAP_UNPACK"):
self.write("{*")
endchar = "}"
elif lastnodetype.startswith("ROT_TWO"):
self.write("(")
endchar = ")"
else:
raise TypeError(
"Internal Error: n_build_list expects list, tuple, set, or unpack"
)
flat_elems = flatten_list(node)
self.indent_more(INDENT_PER_LEVEL)
sep = ""
for elem in flat_elems:
if elem in ("ROT_THREE", "EXTENDED_ARG"):
continue
assert elem == "expr"
line_number = self.line_number
value = self.traverse(elem)
if elem[0] == "tuple":
assert value[0] == "("
assert value[-1] == ")"
value = value[1:-1]
if value[-1] == ",":
# singleton tuple
value = value[:-1]
else:
value = "*" + value
if line_number != self.line_number:
sep += "\n" + self.indent + INDENT_PER_LEVEL[:-1]
else:
if sep != "":
sep += " "
if not last_was_star:
pass
else:
last_was_star = False
self.write(sep, value)
sep = ","
if lastnode.attr == 1 and lastnodetype.startswith("BUILD_TUPLE"):
self.write(",")
self.write(endchar)
self.indent_less(INDENT_PER_LEVEL)
self.prec = p
self.prune()
return
self.n_tuple_unpack = n_list_unpack
|
the-stack_106_15202
|
#!/usr/bin/env python3
"""
Created on 2022-01-29 15:27
@author: johannes
"""
RAIN_MAPPER = {
'rainh': ['date', 'hour'],
'raind': ['date'],
'rainw': ['week'],
'rainm': ['month'],
'raint': ['year']
}
def get_rainframe(df, parameter):
"""Return a grouped dataframe based on the timing attribute."""
if not df.empty:
return df.groupby(
[df['timestamp'].dt.__getattribute__(t_spec)
for t_spec in RAIN_MAPPER.get(parameter)]
).max().reset_index(drop=True)
else:
return df
|
the-stack_106_15203
|
import sys
sys.path.insert(0, "../../")
from pc_processor.dataset.semantic_kitti import SemanticKitti
import numpy as np
import os
import shutil
def createFovDataset(src_root, dst_root, seq):
dataset = SemanticKitti(
root=src_root,
sequences=[seq],
config_path="../../pc_processor/dataset/semantic_kitti/semantic-kitti.yaml"
)
if not os.path.isdir(dst_root):
os.makedirs(dst_root)
data_len = len(dataset)
for i in range(data_len):
print("processing {}|{} ...".format(data_len,i))
pointcloud, sem_label, inst_label = dataset.loadDataByIndex(i)
image = dataset.loadImage(i)
image = np.array(image)
seq_id, frame_id = dataset.parsePathInfoByIndex(i)
mapped_pointcloud, keep_mask = dataset.mapLidar2Camera(
seq_id, pointcloud[:, :3], image.shape[1], image.shape[0])
keep_pointcloud = pointcloud[keep_mask]
keep_sem_label = sem_label[keep_mask].astype(np.int32)
keep_inst_label = inst_label[keep_mask].astype(np.int32)
keep_label = (keep_inst_label << 16) + keep_sem_label
# check path
pointcloud_path = os.path.join(dst_root, seq_id, "velodyne")
if not os.path.isdir(pointcloud_path):
os.makedirs(pointcloud_path)
label_path = os.path.join(dst_root, seq_id, "labels")
if not os.path.isdir(label_path):
os.makedirs(label_path)
pointcloud_file = os.path.join(pointcloud_path, "{}.bin".format(frame_id))
label_file = os.path.join(label_path, "{}.label".format(frame_id))
keep_pointcloud.tofile(pointcloud_file)
keep_label.tofile(label_file)
print("copy image_2 folder ...")
# copy image and calib files
src_img_folder = os.path.join(src_root, "{:02d}".format(seq), "image_2")
dst_img_folder = os.path.join(dst_root, "{:02d}".format(seq), "image_2")
shutil.copytree(src_img_folder, dst_img_folder)
target_files = ["calib.txt", "poses.txt", "times.txt"]
print("copy calib files ...")
for f_name in target_files:
src_file_path = os.path.join(src_root, "{:02d}".format(seq), f_name)
dst_file_path = os.path.join(dst_root, "{:02d}".format(seq), f_name)
shutil.copyfile(src_file_path, dst_file_path)
"""
extract fov data from semantic-kitti and construct data set semantic-kitti-fov
"""
if __name__ == "__main__":
for seq in range(0, 11):
createFovDataset(
src_root="/path/to/semantic-kitti/sequences", # path to the original semantic-kitti dataset
dst_root="/path/to/semantic-kitti-fov/sequences", # path to the generated semantic-kitti-fov dataset
seq=seq
)
|
the-stack_106_15204
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import gettext
import iso8601
import mock
from oslo_versionedobjects import base as object_base
from oslo_versionedobjects import exception as object_exception
from oslo_versionedobjects import fixture as object_fixture
import six
from watcher.common import context
from watcher.objects import base
from watcher.objects import fields
from watcher.tests import base as test_base
gettext.install('watcher')
@base.WatcherObjectRegistry.register
class MyObj(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
VERSION = '1.5'
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField(),
'missing': fields.StringField()}
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@object_base.remotable_classmethod
def query(cls, context):
obj = cls(context)
obj.foo = 1
obj.bar = 'bar'
obj.obj_reset_changes()
return obj
@object_base.remotable
def marco(self, context=None):
return 'polo'
@object_base.remotable
def update_test(self, context=None):
if context and context.user == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@object_base.remotable
def save(self, context=None):
self.obj_reset_changes()
@object_base.remotable
def refresh(self, context=None):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@object_base.remotable
def modify_save_modify(self, context=None):
self.bar = 'meow'
self.save()
self.foo = 42
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@object_base.remotable_classmethod
def get(cls, *args, **kwargs):
pass
@base.WatcherObjectRegistry.register_if(False)
class WatcherTestSubclassedObject(MyObj):
fields = {'new_field': fields.StringField()}
class _LocalTest(test_base.TestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.WatcherObject.indirection_api = None
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.WatcherObject.indirection_api
base.WatcherObject.indirection_api = None
yield
base.WatcherObject.indirection_api = _api
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
def test_hydration_bad_ns(self):
primitive = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'foo',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 1}}
self.assertRaises(object_exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.data': {'foo': 1}}
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(expected, obj.obj_to_primitive())
def test_get_updates(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_object_property(self):
obj = MyObj(self.context, foo=1)
self.assertEqual(1, obj.foo)
def test_object_property_type_error(self):
obj = MyObj(self.context)
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj(self.context)
self.assertEqual('loaded!', obj.bar)
def test_load_in_base(self):
@base.WatcherObjectRegistry.register_if(False)
class Foo(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
fields = {'foobar': fields.IntegerField()}
obj = Foo(self.context)
self.assertRaisesRegex(
NotImplementedError, "Cannot load 'foobar' in the base class",
getattr, obj, 'foobar')
def test_loaded_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual('loaded!', obj.bar)
expected = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.changes': ['bar'],
'watcher_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(expected, obj.obj_to_primitive())
def test_changes_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
primitive = obj.obj_to_primitive()
self.assertIn('watcher_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(set(['foo']), obj2.obj_what_changed())
obj2.obj_reset_changes()
self.assertEqual(set(), obj2.obj_what_changed())
def test_unknown_objtype(self):
self.assertRaises(object_exception.UnsupportedObjectError,
base.WatcherObject.obj_class_from_name, 'foo', '1.0')
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext(user='alternate')
obj = MyObj.query(ctxt1)
obj.update_test(ctxt2)
self.assertEqual('alternate-context', obj.bar)
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(object_exception.OrphanedObjectError,
obj.update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.update_test(self.context)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.save()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.refresh()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(321, obj.foo)
self.assertEqual('refreshed', obj.bar)
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(set(['bar']), obj.obj_what_changed())
obj.modify_save_modify(self.context)
self.assertEqual(set(['foo']), obj.obj_what_changed())
self.assertEqual(42, obj.foo)
self.assertEqual('meow', obj.bar)
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual('bar', obj.bar)
result = obj.marco()
self.assertEqual('polo', result)
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(1, obj.foo)
obj.update_test()
self.assertEqual('updated', obj.bar)
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5, 0, 0, tzinfo=iso8601.iso8601.Utc())
datatime = fields.DateTimeField()
obj = MyObj(self.context)
obj.created_at = dt
obj.updated_at = dt
expected = {'watcher_object.name': 'MyObj',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.5',
'watcher_object.changes':
['created_at', 'updated_at'],
'watcher_object.data':
{'created_at': datatime.stringify(dt),
'updated_at': datatime.stringify(dt),
}
}
actual = obj.obj_to_primitive()
# watcher_object.changes is built from a set and order is undefined
self.assertEqual(sorted(expected['watcher_object.changes']),
sorted(actual['watcher_object.changes']))
del expected[
'watcher_object.changes'], actual['watcher_object.changes']
self.assertEqual(expected, actual)
def test_contains(self):
obj = MyObj(self.context)
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(self.context, foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(self.context, foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = (
list(base.WatcherObject.fields) +
list(base.WatcherPersistentObject.fields))
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(WatcherTestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(WatcherTestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(WatcherTestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
@base.WatcherObjectRegistry.register_if(False)
class TestObj(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj(self.context)
self.assertEqual(set(['created_at', 'updated_at', 'deleted_at',
'foo', 'bar']),
set(obj.obj_fields))
def test_refresh_object(self):
@base.WatcherObjectRegistry.register_if(False)
class TestObj(base.WatcherPersistentObject, base.WatcherObject,
base.WatcherObjectDictCompat):
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField()}
obj = TestObj(self.context)
current_obj = TestObj(self.context)
obj.foo = 10
obj.bar = 'obj.bar'
current_obj.foo = 2
current_obj.bar = 'current.bar'
obj.obj_refresh(current_obj)
self.assertEqual(obj.foo, 2)
self.assertEqual(obj.bar, 'current.bar')
def test_obj_constructor(self):
obj = MyObj(self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_assign_value_without_DictCompat(self):
class TestObj(base.WatcherObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField()}
obj = TestObj(self.context)
obj.foo = 10
err_message = ''
try:
obj['bar'] = 'value'
except TypeError as e:
err_message = six.text_type(e)
finally:
self.assertIn("'TestObj' object does not support item assignment",
err_message)
class TestObject(_LocalTest, _TestObject):
pass
# The hashes are help developers to check if the change of objects need a
# version bump. It is md5 hash of object fields and remotable methods.
# The fingerprint values should only be changed if there is a version bump.
expected_object_fingerprints = {
'Goal': '1.0-93881622db05e7b67a65ca885b4a022e',
'Strategy': '1.1-73f164491bdd4c034f48083a51bdeb7b',
'AuditTemplate': '1.1-b291973ffc5efa2c61b24fe34fdccc0b',
'Audit': '1.2-910522db78b7b1cb59df614754656db4',
'ActionPlan': '2.0-394f1abbf5d73d7b6675a118fe1a0284',
'Action': '2.0-1dd4959a7e7ac30c62ef170fe08dd935',
'EfficacyIndicator': '1.0-655b71234a82bc7478aff964639c4bb0',
'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576',
'Service': '1.0-4b35b99ada9677a882c9de2b30212f35',
'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3',
}
def get_watcher_objects():
"""Get Watcher versioned objects
This returns a dict of versioned objects which are
in the Watcher project namespace only. ie excludes
objects from os-vif and other 3rd party modules
:return: a dict mapping class names to lists of versioned objects
"""
all_classes = base.WatcherObjectRegistry.obj_classes()
watcher_classes = {}
for name in all_classes:
objclasses = all_classes[name]
if (objclasses[0].OBJ_PROJECT_NAMESPACE !=
base.WatcherObject.OBJ_PROJECT_NAMESPACE):
continue
watcher_classes[name] = objclasses
return watcher_classes
class TestObjectVersions(test_base.TestCase):
def test_object_version_check(self):
classes = base.WatcherObjectRegistry.obj_classes()
checker = object_fixture.ObjectVersionChecker(obj_classes=classes)
# Compute the difference between actual fingerprints and
# expect fingerprints. expect = actual = {} if there is no change.
expect, actual = checker.test_hashes(expected_object_fingerprints)
self.assertEqual(expect, actual,
"Some objects fields or remotable methods have been "
"modified. Please make sure the version of those "
"objects have been bumped and then update "
"expected_object_fingerprints with the new hashes. ")
class TestObjectSerializer(test_base.TestCase):
def test_object_serialization(self):
ser = base.WatcherObjectSerializer()
obj = MyObj(self.context)
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('watcher_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.WatcherObjectSerializer()
obj = MyObj(self.context)
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertFalse(isinstance(item, base.WatcherObject))
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
@mock.patch('watcher.objects.base.WatcherObject.indirection_api')
def _test_deserialize_entity_newer(self, obj_version, backported_to,
mock_indirection_api,
my_version='1.6'):
ser = base.WatcherObjectSerializer()
mock_indirection_api.object_backport_versions.return_value \
= 'backported'
@base.WatcherObjectRegistry.register
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj(self.context)
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(
mock_indirection_api.object_backport_versions.called)
else:
self.assertEqual('backported', result)
versions = object_base.obj_tree_get_versions('MyTestObj')
mock_indirection_api.object_backport_versions.assert_called_with(
self.context, primitive, versions)
def test_deserialize_entity_newer_version_backports(self):
"Test object with unsupported (newer) version"
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_same_revision_does_not_backport(self):
"Test object with supported revision"
self._test_deserialize_entity_newer('1.6', None)
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
"Test object with supported revision"
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
"Test object with supported (newer) revision"
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
"Test object with unsupported (newer) version and revision"
self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1')
class TestRegistry(test_base.TestCase):
@mock.patch('watcher.objects.base.objects')
def test_hook_chooses_newer_properly(self, mock_objects):
reg = base.WatcherObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyNewerObj(object):
VERSION = '1.123'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyNewerObj, 0)
self.assertEqual(MyNewerObj, mock_objects.MyObj)
@mock.patch('watcher.objects.base.objects')
def test_hook_keeps_newer_properly(self, mock_objects):
reg = base.WatcherObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyOlderObj(object):
VERSION = '1.1'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyOlderObj, 0)
self.assertEqual(MyObj, mock_objects.MyObj)
|
the-stack_106_15205
|
import logging
from typing import NamedTuple, Optional, Dict, Tuple
import transformers
from transformers import AutoModel
logger = logging.getLogger(__name__)
class TransformerSpec(NamedTuple):
model_name: str
override_weights_file: Optional[str] = None
override_weights_strip_prefix: Optional[str] = None
_model_cache: Dict[TransformerSpec, transformers.PreTrainedModel] = {}
def get(
model_name: str,
make_copy: bool,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
**kwargs,
) -> transformers.PreTrainedModel:
"""
Returns a transformer model from the cache.
# Parameters
model_name : `str`
The name of the transformer, for example `"bert-base-cased"`
make_copy : `bool`
If this is `True`, return a copy of the model instead of the cached model itself. If you want to modify the
parameters of the model, set this to `True`. If you want only part of the model, set this to `False`, but
make sure to `copy.deepcopy()` the bits you are keeping.
override_weights_file : `str`, optional
If set, this specifies a file from which to load alternate weights that override the
weights from huggingface. The file is expected to contain a PyTorch `state_dict`, created
with `torch.save()`.
override_weights_strip_prefix : `str`, optional
If set, strip the given prefix from the state dict when loading it.
"""
global _model_cache
spec = TransformerSpec(model_name, override_weights_file, override_weights_strip_prefix)
transformer = _model_cache.get(spec, None)
if transformer is None:
if override_weights_file is not None:
from allennlp.common.file_utils import cached_path
import torch
override_weights_file = cached_path(override_weights_file)
override_weights = torch.load(override_weights_file)
if override_weights_strip_prefix is not None:
def strip_prefix(s):
if s.startswith(override_weights_strip_prefix):
return s[len(override_weights_strip_prefix) :]
else:
return s
valid_keys = {
k
for k in override_weights.keys()
if k.startswith(override_weights_strip_prefix)
}
if len(valid_keys) > 0:
logger.info(
"Loading %d tensors from %s", len(valid_keys), override_weights_file
)
else:
raise ValueError(
f"Specified prefix of '{override_weights_strip_prefix}' means no tensors "
f"will be loaded from {override_weights_file}."
)
override_weights = {strip_prefix(k): override_weights[k] for k in valid_keys}
transformer = AutoModel.from_pretrained(
model_name,
state_dict=override_weights,
**kwargs,
)
else:
transformer = AutoModel.from_pretrained(
model_name,
**kwargs,
)
_model_cache[spec] = transformer
if make_copy:
import copy
return copy.deepcopy(transformer)
else:
return transformer
_tokenizer_cache: Dict[Tuple[str, frozenset], transformers.PreTrainedTokenizer] = {}
def get_tokenizer(model_name: str, **kwargs) -> transformers.PreTrainedTokenizer:
cache_key = (model_name, frozenset(kwargs.items()))
global _tokenizer_cache
tokenizer = _tokenizer_cache.get(cache_key, None)
if tokenizer is None:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name,
**kwargs,
)
_tokenizer_cache[cache_key] = tokenizer
return tokenizer
|
the-stack_106_15209
|
import typing as t
import torch
import torch.nn as nn
from ucsgnet.common import RNN_LATENT_SIZE, TrainingStage
from ucsgnet.ucsgnet.csg_layers import RelationLayer, Scaler
from ucsgnet.ucsgnet.extractors import FeatureExtractor
from ucsgnet.ucsgnet.shape_evaluators import CompundEvaluator, PlanesEvaluator
class CSGNet(nn.Module):
def __init__(
self,
extractor: FeatureExtractor,
decoder: nn.Module,
evaluator: t.Union[PlanesEvaluator, CompundEvaluator],
shapes_per_type: int,
out_shapes_per_layer: int,
binarizing_threshold: float,
num_csg_layers: int,
):
super().__init__()
self.encoder_ = extractor
self.decoder_ = decoder
self.evaluator_ = evaluator
self.shapes_per_type = shapes_per_type
self.out_shapes_per_layer = out_shapes_per_layer
self.binarizing_threshold = binarizing_threshold
self.use_planes = isinstance(self.evaluator_, PlanesEvaluator)
self.evaluators_count = 1 if self.use_planes else len(self.evaluator_)
self.num_output_shapes_from_evaluator = (
self.shapes_per_type * self.evaluators_count
)
layers = []
in_shapes = self.num_output_shapes_from_evaluator
out_shapes = self.out_shapes_per_layer
self.scaler_ = Scaler()
num_layers = num_csg_layers
for i in range(num_layers):
if i == num_layers - 1:
out_shapes = 1
layers.append(
RelationLayer(
in_shapes,
out_shapes,
self.binarizing_threshold,
extractor.out_features,
)
)
in_shapes = out_shapes * 4 + self.num_output_shapes_from_evaluator
self.csg_layers_: nn.ModuleList[RelationLayer] = nn.ModuleList(layers)
self._base_mode = TrainingStage.INITIAL_TRAINING
self.gru_encoder = nn.GRUCell(
input_size=extractor.out_features,
hidden_size=RNN_LATENT_SIZE,
bias=True,
)
self._gru_hidden_state = nn.Parameter(
torch.Tensor(1, RNN_LATENT_SIZE), requires_grad=True
)
self._retained_latent_code: t.Optional[torch.Tensor] = None
self._retained_shape_params: t.Optional[torch.Tensor] = None
nn.init.constant_(self._gru_hidden_state, 0.01)
def turn_fine_tuning_mode(self):
self.switch_mode(TrainingStage.FINE_TUNING)
def turn_initial_training_mode(self):
self.switch_mode(TrainingStage.INITIAL_TRAINING)
def switch_mode(self, new_mode: TrainingStage):
self._base_mode = new_mode
self.scaler_.switch_mode(new_mode)
for layer in self.csg_layers_: # type: RelationLayer
layer.switch_mode(new_mode)
def clear_retained_codes_and_params(self):
self._retained_shape_params = None
self._retained_latent_code = None
def forward(
self,
images: torch.Tensor,
points: torch.Tensor,
*,
return_distances_to_base_shapes: bool = False,
return_intermediate_output_csg: bool = False,
return_scaled_distances_to_shapes: bool = False,
retain_latent_code: bool = False,
retain_shape_params: bool = False
) -> t.Union[torch.Tensor, t.Tuple[torch.Tensor, ...]]:
batch_size = images.shape[0]
if retain_latent_code and self._retained_latent_code is not None:
code = self._retained_latent_code
else:
code = self.encoder_(images)
if retain_latent_code:
self._retained_latent_code = code
if retain_shape_params and self._retained_shape_params is not None:
shape_params = self._retained_shape_params
else:
shape_params = self.decoder_(code)
if retain_shape_params:
self._retained_shape_params = shape_params
if self.use_planes:
base_shapes = self.evaluator_(
shape_params, points
) # -> batch, num_points, num_shapes
else:
points = points.unsqueeze(
dim=1
) # broadcasting for different of shapes
base_shapes = self.evaluator_(
shape_params, points
) # -> batch, num_shapes, num_points
base_shapes = base_shapes.permute(
(0, 2, 1)
) # -> batch, num_points, num_shapes
scaled_shapes = 1 - self.scaler_(base_shapes)
last_distances = scaled_shapes
partial_distances = [last_distances]
code = self.gru_encoder(
code,
self._gru_hidden_state.expand(
[batch_size, self._gru_hidden_state.shape[1]]
),
)
for index, csg_layer in enumerate(
self.csg_layers_
): # type: (int, RelationLayer)
if index > 0:
last_distances = torch.cat(
(last_distances, scaled_shapes), dim=-1
)
last_distances = csg_layer(last_distances, code)
partial_distances.append(last_distances)
code = self.gru_encoder(
csg_layer.emit_parameters(batch_size), code
)
last_distances = last_distances[..., 0] # taking union
distances = last_distances.clamp(0, 1)
outputs = [distances]
if return_distances_to_base_shapes:
outputs.append(base_shapes)
if return_intermediate_output_csg:
outputs.append(partial_distances)
if return_scaled_distances_to_shapes:
outputs.append(scaled_shapes)
return tuple(outputs) if len(outputs) > 1 else outputs[0]
def get_latent_codes_for_each_layer(
self, images: torch.Tensor, points: torch.Tensor
) -> t.Dict[str, torch.Tensor]:
batch_size = images.shape[0]
code = self.encoder_(images)
shape_params = self.decoder_(code)
if self.use_planes:
base_shapes = self.evaluator_(
shape_params, points
) # -> batch, num_points, num_shapes
else:
points = points.unsqueeze(
dim=1
) # broadcasting for different of shapes
base_shapes = self.evaluator_(
shape_params, points
) # -> batch, num_shapes, num_points
base_shapes = base_shapes.permute(
(0, 2, 1)
) # -> batch, num_points, num_shapes
scaled_shapes = 1 - self.scaler_(base_shapes)
last_distances = scaled_shapes
partial_distances = [last_distances]
code = self.gru_encoder(
code,
self._gru_hidden_state.expand(
[batch_size, self._gru_hidden_state.shape[1]]
),
)
codes = {"base": code}
emits = {}
csg_layer: RelationLayer
for index, csg_layer in enumerate(self.csg_layers_):
if index > 0:
last_distances = torch.cat(
(last_distances, scaled_shapes), dim=-1
)
last_distances = csg_layer(last_distances, code)
partial_distances.append(last_distances)
emitted_parameters = csg_layer.emit_parameters(batch_size)
code = self.gru_encoder(emitted_parameters, code)
codes["layer_{}".format(index)] = code
emits["emits_{}".format(index)] = emitted_parameters
return codes, emits
|
the-stack_106_15211
|
import argparse
from datetime import datetime
import pandas as pd
import FundamentalAnalysis as fa # Financial Modeling Prep
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.dataframe_helpers import clean_df_index
from gamestonk_terminal.helper_funcs import (
long_number_format,
check_positive,
parse_known_args_and_warn,
get_flair,
)
from gamestonk_terminal.menu import session
from prompt_toolkit.completion import NestedCompleter
def print_menu(s_ticker, s_start, s_interval):
""" Print help """
s_intraday = (f"Intraday {s_interval}", "Daily")[s_interval == "1440min"]
if s_start:
print(f"\n{s_intraday} Stock: {s_ticker} (from {s_start.strftime('%Y-%m-%d')})")
else:
print(f"\n{s_intraday} Stock: {s_ticker}")
print("\nFinancial Modeling Prep API")
print(" help show this financial modeling prep menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print(" profile profile of the company")
print(" quote quote of the company")
print(" enterprise enterprise value of the company over time")
print(" dcf discounted cash flow of the company over time")
print(" income income statements of the company")
print(" balance balance sheet of the company")
print(" cash cash flow statement of the company")
print(" metrics key metrics of the company")
print(" ratios financial ratios of the company")
print(" growth financial statement growth of the company")
print("")
def menu(s_ticker, s_start, s_interval):
# Add list of arguments that the fundamental analysis parser accepts
fmp_parser = argparse.ArgumentParser(prog="fmp", add_help=False)
choices = [
"help",
"q",
"quit",
"profile",
"quote",
"enterprise",
"dcf",
"fmp_income",
"fmp_balance",
"fmp_cash",
"metrics",
"ratios",
"growth",
]
fmp_parser.add_argument("cmd", choices=choices)
completer = NestedCompleter.from_nested_dict({c: None for c in choices})
print_menu(s_ticker, s_start, s_interval)
# Loop forever and ever
while True:
# Get input command from user
if session:
as_input = session.prompt(
f"{get_flair()} (fa)>(fmp)> ",
completer=completer,
)
else:
as_input = input(f"{get_flair()} (fa)>(av)> ")
# Parse alpha vantage command of the list of possible commands
try:
(ns_known_args, l_args) = fmp_parser.parse_known_args(as_input.split())
except SystemExit:
print("The command selected doesn't exist\n")
continue
if ns_known_args.cmd == "help":
print_menu(s_ticker, s_start, s_interval)
elif ns_known_args.cmd == "q":
# Just leave the menu
return False
elif ns_known_args.cmd == "quit":
# Abandon the program
return True
# Details:
elif ns_known_args.cmd == "profile":
profile(l_args, s_ticker)
elif ns_known_args.cmd == "quote":
quote(l_args, s_ticker)
elif ns_known_args.cmd == "enterprise":
enterprise(l_args, s_ticker)
elif ns_known_args.cmd == "dcf":
discounted_cash_flow(l_args, s_ticker)
# Financial statement:
elif ns_known_args.cmd == "income":
income_statement(l_args, s_ticker)
elif ns_known_args.cmd == "balance":
balance_sheet(l_args, s_ticker)
elif ns_known_args.cmd == "cash":
cash_flow(l_args, s_ticker)
# Ratios:
elif ns_known_args.cmd == "metrics":
key_metrics(l_args, s_ticker)
elif ns_known_args.cmd == "ratios":
financial_ratios(l_args, s_ticker)
elif ns_known_args.cmd == "growth":
financial_statement_growth(l_args, s_ticker)
else:
print("Command not recognized!")
def profile(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="profile",
description="""
Prints information about, among other things, the industry, sector exchange and company
description. The following fields are expected: Address, Beta, Ceo, Changes, Cik, City
Company name, Country, Currency, Cusip, Dcf, Dcf diff, Default image, Description,
Exchange, Exchange short name, Full time employees, Image, Industry, Ipo date, Isin,
Last div, Mkt cap, Phone, Price, Range, Sector, State, Symbol, Vol avg, Website, Zip.
[Source: Financial Modeling Prep]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
df_fa = fa.profile(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
clean_df_index(df_fa)
print(df_fa.drop(index=["Description", "Image"]).to_string(header=False))
print(f"\nImage: {df_fa.loc['Image'][0]}")
print(f"\nDescription: {df_fa.loc['Description'][0]}")
print("")
except Exception as e:
print(e)
print("")
return
def quote(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="quote",
description="""
Prints actual information about the company which is, among other things, the day high,
market cap, open and close price and price-to-equity ratio. The following fields are
expected: Avg volume, Change, Changes percentage, Day high, Day low, Earnings
announcement, Eps, Exchange, Market cap, Name, Open, Pe, Previous close, Price, Price
avg200, Price avg50, Shares outstanding, Symbol, Timestamp, Volume, Year high, and Year
low. [Source: Financial Modeling Prep]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
df_fa = fa.quote(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
clean_df_index(df_fa)
df_fa.loc["Market cap"][0] = long_number_format(df_fa.loc["Market cap"][0])
df_fa.loc["Shares outstanding"][0] = long_number_format(
df_fa.loc["Shares outstanding"][0]
)
df_fa.loc["Volume"][0] = long_number_format(df_fa.loc["Volume"][0])
# Check if there is a valid earnings announcement
if df_fa.loc["Earnings announcement"][0]:
earning_announcement = datetime.strptime(
df_fa.loc["Earnings announcement"][0][0:19], "%Y-%m-%dT%H:%M:%S"
)
df_fa.loc["Earnings announcement"][
0
] = f"{earning_announcement.date()} {earning_announcement.time()}"
print(df_fa.to_string(header=False))
print("")
except Exception as e:
print(e)
print("")
return
def enterprise(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="enterprise",
description="""
Prints stock price, number of shares, market capitalization and
enterprise value over time. The following fields are expected: Add total debt,
Enterprise value, Market capitalization, Minus cash and cash equivalents, Number
of shares, Stock price, and Symbol. [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 40
if ns_parser.b_quarter:
df_fa = fa.enterprise(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.enterprise(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num, mask=False)
print(df_fa)
print("")
except Exception as e:
print(e)
print("")
return
def discounted_cash_flow(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="dcf",
description="""
Prints the discounted cash flow of a company over time including the DCF of today. The
following fields are expected: DCF, Stock price, and Date. [Source: Financial Modeling
Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 40
if ns_parser.b_quarter:
df_fa = fa.discounted_cash_flow(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.discounted_cash_flow(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num, mask=False)
print(df_fa)
print("")
except Exception as e:
print(e)
print("")
return
def income_statement(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="inc",
description="""
Prints a complete income statement over time. This can be either quarterly or annually.
The following fields are expected: Accepted date, Cost and expenses, Cost of
revenue, Depreciation and amortization, Ebitda, Ebitdaratio, Eps, Epsdiluted, Filling
date, Final link, General and administrative expenses, Gross profit, Gross profit
ratio, Income before tax, Income before tax ratio, Income tax expense, Interest
expense, Link, Net income, Net income ratio, Operating expenses, Operating income,
Operating income ratio, Other expenses, Period, Research and development expenses,
Revenue, Selling and marketing expenses, Total other income expenses net, Weighted
average shs out, Weighted average shs out dil [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 40
if ns_parser.b_quarter:
df_fa = fa.income_statement(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.income_statement(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num)
print(df_fa.drop(index=["Final link", "Link"]).to_string())
pd.set_option("display.max_colwidth", None)
print("")
print(df_fa.loc["Final link"].to_frame().to_string())
print("")
print(df_fa.loc["Link"].to_frame().to_string())
print("")
except Exception as e:
print(e)
print("")
return
def balance_sheet(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="bal",
description="""
Prints a complete balance sheet statement over time. This can be
either quarterly or annually. The following fields are expected: Accepted date,
Account payables, Accumulated other comprehensive income loss, Cash and cash
equivalents, Cash and short term investments, Common stock, Deferred revenue,
Deferred revenue non current, Deferred tax liabilities non current, Filling date,
Final link, Goodwill, Goodwill and intangible assets, Intangible assets, Inventory,
Link, Long term debt, Long term investments, Net debt, Net receivables, Other assets,
Other current assets, Other current liabilities, Other liabilities, Other non current
assets, Other non current liabilities, Othertotal stockholders equity, Period, Property
plant equipment net, Retained earnings, Short term debt, Short term investments, Tax
assets, Tax payables, Total assets, Total current assets, Total current liabilities,
Total debt, Total investments, Total liabilities, Total liabilities and stockholders
equity, Total non current assets, Total non current liabilities, and Total stockholders
equity. [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 40
if ns_parser.b_quarter:
df_fa = fa.balance_sheet_statement(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.balance_sheet_statement(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP
)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num)
print(df_fa.drop(index=["Final link", "Link"]).to_string())
pd.set_option("display.max_colwidth", None)
print("")
print(df_fa.loc["Final link"].to_frame().to_string())
print("")
print(df_fa.loc["Link"].to_frame().to_string())
print("")
except Exception as e:
print(e)
print("")
return
def cash_flow(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="cash",
description="""
Prints a complete cash flow statement over time. This can be either
quarterly or annually. The following fields are expected: Accepted date, Accounts
payables, Accounts receivables, Acquisitions net, Capital expenditure, Cash at
beginning of period, Cash at end of period, Change in working capital, Common stock
issued, Common stock repurchased, Debt repayment, Deferred income tax, Depreciation and
amortization, Dividends paid, Effect of forex changes on cash, Filling date, Final
link, Free cash flow, Inventory, Investments in property plant and equipment, Link, Net
cash provided by operating activities, Net cash used for investing activities, Net cash
used provided by financing activities, Net change in cash, Net income, Operating cash
flow, Other financing activities, Other investing activities, Other non cash items,
Other working capital, Period, Purchases of investments, Sales maturities of
investments, Stock based compensation. [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 40
if ns_parser.b_quarter:
df_fa = fa.cash_flow_statement(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.cash_flow_statement(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num)
print(df_fa.drop(index=["Final link", "Link"]).to_string())
pd.set_option("display.max_colwidth", None)
print("")
print(df_fa.loc["Final link"].to_frame().to_string())
print("")
print(df_fa.loc["Link"].to_frame().to_string())
print("")
except Exception as e:
print(e)
print("")
return
def key_metrics(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="metrics",
description="""
Prints a list of the key metrics of a company over time. This can be either
quarterly or annually. This includes, among other things, Return on Equity (ROE),
Working Capital, Current Ratio and Debt to Assets. The following fields are expected:
Average inventory, Average payables, Average receivables, Book value per share, Capex
per share, Capex to depreciation, Capex to operating cash flow, Capex to revenue, Cash
per share, Current ratio, Days of inventory on hand, Days payables outstanding, Days
sales outstanding, Debt to assets, Debt to equity, Dividend yield, Earnings yield,
Enterprise value, Enterprise value over EBITDA, Ev to free cash flow, Ev to operating
cash flow, Ev to sales, Free cash flow per share, Free cash flow yield, Graham net net,
Graham number, Income quality, Intangibles to total assets, Interest debt per share,
Inventory turnover, Market cap, Net current asset value, Net debt to EBITDA, Net income
per share, Operating cash flow per share, Payables turnover, Payout ratio, Pb ratio, Pe
ratio, Pfcf ratio, Pocfratio, Price to sales ratio, Ptb ratio, Receivables turnover,
Research and ddevelopement to revenue, Return on tangible assets, Revenue per share,
Roe, Roic, Sales general and administrative to revenue, Shareholders equity per
share, Stock based compensation to revenue, Tangible book value per share, and Working
capital. [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 50
if ns_parser.b_quarter:
df_fa = fa.key_metrics(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.key_metrics(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num)
print(df_fa)
print("")
except Exception as e:
print(e)
print("")
return
def financial_ratios(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="ratios",
description="""
Prints in-depth ratios of a company over time. This can be either quarterly or
annually. This contains, among other things, Price-to-Book Ratio, Payout Ratio and
Operating Cycle. The following fields are expected: Asset turnover, Capital expenditure
coverage ratio, Cash conversion cycle, Cash flow coverage ratios, Cash flow to debt
ratio, Cash per share, Cash ratio, Company equity multiplier, Current ratio, Days of
inventory outstanding, Days of payables outstanding, Days of sales outstanding, Debt
equity ratio, Debt ratio, Dividend paid and capex coverage ratio, Dividend payout ratio,
Dividend yield, Ebit per revenue, Ebt per ebit, Effective tax rate, Enterprise value
multiple, Fixed asset turnover, Free cash flow operating cash flow ratio, Free cash
flow per share, Gross profit margin, Inventory turnover, Long term debt to
capitalization, Net income per EBT, Net profit margin, Operating cash flow per share,
Operating cash flow sales ratio, Operating cycle, Operating profit margin, Payables
turnover, Payout ratio, Pretax profit margin, Price book value ratio, Price cash flow
ratio, Price earnings ratio, Price earnings to growth ratio, Price fair value,
Price sales ratio, Price to book ratio, Price to free cash flows ratio, Price to
operating cash flows ratio, Price to sales ratio, Quick ratio, Receivables turnover,
Return on assets, Return on capital employed, Return on equity, Short term coverage
ratios, and Total debt to capitalization. [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 40
if ns_parser.b_quarter:
df_fa = fa.financial_ratios(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.financial_ratios(s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num)
print(df_fa)
print("")
except Exception as e:
print(e)
print("")
return
def financial_statement_growth(l_args, s_ticker):
parser = argparse.ArgumentParser(
add_help=False,
prog="growth",
description=""" Prints the growth of several financial statement items and ratios over
time. This can be either annually and quarterly. These are, among other things, Revenue
Growth (3, 5 and 10 years), inventory growth and operating cash flow growth (3, 5 and 10
years). The following fields are expected: Asset growth, Book valueper share growth, Debt
growth, Dividendsper share growth, Ebitgrowth, Epsdiluted growth, Epsgrowth, Five y
dividendper share growth per share, Five y net income growth per share, Five y operating c
f growth per share, Five y revenue growth per share, Five y shareholders equity growth per
share, Free cash flow growth, Gross profit growth, Inventory growth, Net income growth,
Operating cash flow growth, Operating income growth, Rdexpense growth, Receivables growth,
Revenue growth, Sgaexpenses growth, Ten y dividendper share growth per share, Ten y net
income growth per share, Ten y operating c f growth per share, Ten y revenue growth per
share, Ten y shareholders equity growth per share, Three y dividendper share growth per
share, Three y net income growth per share, Three y operating c f growth per share, Three y
revenue growth per share, Three y shareholders equity growth per share, Weighted average
shares diluted growth, and Weighted average shares growth [Source: Financial Modeling Prep]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=1,
help="Number of latest years/quarters.",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter fundamental data flag.",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
if ns_parser.n_num == 1:
pd.set_option("display.max_colwidth", None)
else:
pd.options.display.max_colwidth = 50
if ns_parser.b_quarter:
df_fa = fa.financial_statement_growth(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP, period="quarter"
)
else:
df_fa = fa.financial_statement_growth(
s_ticker, cfg.API_KEY_FINANCIALMODELINGPREP
)
df_fa = clean_metrics_df(df_fa, num=ns_parser.n_num)
print(df_fa)
print("")
except Exception as e:
print(e)
print("")
return
def clean_metrics_df(df_fa: pd.DataFrame, num: int, mask: bool = True) -> pd.DataFrame:
df_fa = df_fa.iloc[:, 0:num]
if mask:
df_fa = df_fa.mask(df_fa.astype(object).eq(num * ["None"])).dropna()
df_fa = df_fa.mask(df_fa.astype(object).eq(num * ["0"])).dropna()
df_fa = df_fa.applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa.columns.name = "Fiscal Date Ending"
df_fa = df_fa.rename(
index={
"Enterprise value over e b i t d a": "Enterprise value over EBITDA",
"Net debt to e b i t d a": "Net debt to EBITDA",
"D c f": "DCF",
"Net income per e b t": "Net income per EBT",
}
)
return df_fa
|
the-stack_106_15212
|
import datajoint as dj
import pathlib
import pynwb
from pynwb import NWBHDF5IO
import warnings
warnings.filterwarnings('ignore')
exported_nwb_dir = dj.config['stores']['nwb_store']['stage']
nwb_session_dir = pathlib.Path(exported_nwb_dir, 'session')
nwb_mp_dir = pathlib.Path(exported_nwb_dir, 'membrane_potential')
nwb_session_dir.mkdir(parents=True, exist_ok=True)
nwb_mp_dir.mkdir(parents=True, exist_ok=True)
class NWBFile(dj.AttributeAdapter):
""" Adapter for: pynwb.file.NWBFile"""
attribute_type = 'filepath@nwb_store'
def put(self, nwb):
save_file_name = ''.join([nwb.identifier, '.nwb'])
save_fp = nwb_session_dir / save_file_name
print(f'Write NWBFile: {save_file_name}')
_write_nwb(save_fp, nwb)
return save_fp.as_posix()
def get(self, path):
io = NWBHDF5IO(str(pathlib.Path(path)), mode='r')
nwb = io.read()
nwb.io = io
return nwb
class Device(dj.AttributeAdapter):
""" Adapter for: pynwb.device.Device"""
attribute_type = 'longblob'
def put(self, nwb_device):
return {'name': nwb_device.name, 'object_id': nwb_device.object_id}
def get(self, device_dict):
nwb_device = pynwb.device.Device(name=device_dict['name'])
nwb_device.__dict__['_Container__object_id'] = device_dict['object_id'] # preserve object_id
return nwb_device
class IntracellularElectrode(dj.AttributeAdapter):
""" Adapter for: pynwb.icephys.IntracellularElectrode"""
attribute_type = 'longblob'
def put(self, electrode):
return dict(name=electrode.name, device=Device().put(electrode.device),
description=electrode.description, filtering=electrode.filtering,
location=electrode.location, object_id=electrode.object_id)
def get(self, ic_electrode_dict):
electrode = pynwb.icephys.IntracellularElectrode(
name=ic_electrode_dict['name'],
device=Device().get(ic_electrode_dict['device']),
description=ic_electrode_dict['description'],
filtering=ic_electrode_dict['filtering'],
location=ic_electrode_dict['location'])
electrode.__dict__['_Container__object_id'] = ic_electrode_dict['object_id'] # preserve object_id
return electrode
class PatchClampSeries(dj.AttributeAdapter):
""" Adapter for: pynwb.icephys.PatchClampSeries"""
attribute_type = 'filepath@nwb_store'
def put(self, patch_clamp):
nwb = patch_clamp.parent
nwb.add_device(patch_clamp.electrode.device)
nwb.add_ic_electrode(patch_clamp.electrode)
nwb.add_acquisition(patch_clamp)
save_file_name = ''.join([nwb.identifier + '_{}'.format(patch_clamp.name), '.nwb'])
save_fp = nwb_mp_dir / save_file_name
print(f'Write PatchClampSeries: {save_file_name}')
_write_nwb(save_fp, nwb, manager=nwb.io.manager)
return save_fp.as_posix()
def get(self, path):
io = NWBHDF5IO(str(pathlib.Path(path)), mode='r')
nwb = io.read()
patch_clamp = [obj for obj in nwb.objects.values()
if obj.neurodata_type == 'PatchClampSeries'][0]
patch_clamp.io = io
return patch_clamp
# ============= HELPER FUNCTIONS ===============
def _write_nwb(save_fp, nwb2write, manager=None):
try:
with NWBHDF5IO(save_fp.as_posix(), mode='w', manager=manager) as io:
io.write(nwb2write)
except Exception as e:
if save_fp.exists():
save_fp.unlink()
raise e
# ==== instantiate dj.AttributeAdapter objects ====
nwb_file = NWBFile()
device = Device()
patch_clamp_series = PatchClampSeries()
ic_electrode = IntracellularElectrode()
|
the-stack_106_15213
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientApiTest(TestCase):
"""Test public facing ingredients"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test login is required to access list"""
res =self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test ingredients can be retrieved by authorized users"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email = "[email protected]",
password = "test123"
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(
name='salt',
user = self.user
)
Ingredient.objects.create(
name='carrot',
user = self.user
)
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test ingredients for authenticated users are returned"""
user2 = get_user_model().objects.create_user(
email = '[email protected]',
password = 'testpass'
)
Ingredient.objects.create(user=user2, name="pepper")
ingredient = Ingredient.objects.create(user=self.user, name="salt")
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test only authenticated user can create ingredient"""
payload = {'name': 'salt'}
res = self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user = self.user,
name = payload['name']
).exists()
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_create_invalid_ingredient_fails(self):
"""Test only valid ingredients can be created"""
payload = {'name':''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
the-stack_106_15214
|
import threading
from collections import OrderedDict
from random import Random
from typing import Dict, Iterator, List, Optional, Union, Tuple
import numpy as np
from torch.utils.data import DataLoader, Dataset, Sampler
from rdkit import Chem
from .scaler import StandardScaler
from chemprop.features import get_features_generator
from chemprop.features import BatchMolGraph, MolGraph
from chemprop.features import is_explicit_h, is_reaction
from chemprop.rdkit import make_mol
# Cache of graph featurizations
CACHE_GRAPH = True
SMILES_TO_GRAPH: Dict[str, MolGraph] = {}
def cache_graph() -> bool:
r"""Returns whether :class:`~chemprop.features.MolGraph`\ s will be cached."""
return CACHE_GRAPH
def set_cache_graph(cache_graph: bool) -> None:
r"""Sets whether :class:`~chemprop.features.MolGraph`\ s will be cached."""
global CACHE_GRAPH
CACHE_GRAPH = cache_graph
def empty_cache():
r"""Empties the cache of :class:`~chemprop.features.MolGraph` and RDKit molecules."""
SMILES_TO_GRAPH.clear()
SMILES_TO_MOL.clear()
# Cache of RDKit molecules
CACHE_MOL = True
SMILES_TO_MOL: Dict[str, Union[Chem.Mol, Tuple[Chem.Mol, Chem.Mol]]] = {}
def cache_mol() -> bool:
r"""Returns whether RDKit molecules will be cached."""
return CACHE_MOL
def set_cache_mol(cache_mol: bool) -> None:
r"""Sets whether RDKit molecules will be cached."""
global CACHE_MOL
CACHE_MOL = cache_mol
class MoleculeDatapoint:
"""A :class:`MoleculeDatapoint` contains a single molecule and its associated features and targets."""
def __init__(self,
smiles: List[str],
targets: List[Optional[float]] = None,
row: OrderedDict = None,
data_weight: float = 1,
features: np.ndarray = None,
features_generator: List[str] = None,
atom_features: np.ndarray = None,
atom_descriptors: np.ndarray = None,
bond_features: np.ndarray = None,
overwrite_default_atom_features: bool = False,
overwrite_default_bond_features: bool = False):
"""
:param smiles: A list of the SMILES strings for the molecules.
:param targets: A list of targets for the molecule (contains None for unknown target values).
:param row: The raw CSV row containing the information for this molecule.
:param data_weight: Weighting of the datapoint for the loss function.
:param features: A numpy array containing additional features (e.g., Morgan fingerprint).
:param features_generator: A list of features generators to use.
:param atom_descriptors: A numpy array containing additional atom descriptors to featurize the molecule
:param bond_features: A numpy array containing additional bond features to featurize the molecule
:param overwrite_default_atom_features: Boolean to overwrite default atom features by atom_features
:param overwrite_default_bond_features: Boolean to overwrite default bond features by bond_features
"""
if features is not None and features_generator is not None:
raise ValueError('Cannot provide both loaded features and a features generator.')
self.smiles = smiles
self.targets = targets
self.row = row
self.data_weight = data_weight
self.features = features
self.features_generator = features_generator
self.atom_descriptors = atom_descriptors
self.atom_features = atom_features
self.bond_features = bond_features
self.overwrite_default_atom_features = overwrite_default_atom_features
self.overwrite_default_bond_features = overwrite_default_bond_features
self.is_reaction = is_reaction()
self.is_explicit_h = is_explicit_h()
# Generate additional features if given a generator
if self.features_generator is not None:
self.features = []
for fg in self.features_generator:
features_generator = get_features_generator(fg)
for m in self.mol:
if not self.is_reaction:
if m is not None and m.GetNumHeavyAtoms() > 0:
self.features.extend(features_generator(m))
# for H2
elif m is not None and m.GetNumHeavyAtoms() == 0:
# not all features are equally long, so use methane as dummy molecule to determine length
self.features.extend(np.zeros(len(features_generator(Chem.MolFromSmiles('C')))))
else:
if m[0] is not None and m[1] is not None and m[0].GetNumHeavyAtoms() > 0:
self.features.extend(features_generator(m[0]))
elif m[0] is not None and m[1] is not None and m[0].GetNumHeavyAtoms() == 0:
self.features.extend(np.zeros(len(features_generator(Chem.MolFromSmiles('C')))))
self.features = np.array(self.features)
# Fix nans in features
replace_token = 0
if self.features is not None:
self.features = np.where(np.isnan(self.features), replace_token, self.features)
# Fix nans in atom_descriptors
if self.atom_descriptors is not None:
self.atom_descriptors = np.where(np.isnan(self.atom_descriptors), replace_token, self.atom_descriptors)
# Fix nans in atom_features
if self.atom_features is not None:
self.atom_features = np.where(np.isnan(self.atom_features), replace_token, self.atom_features)
# Fix nans in bond_descriptors
if self.bond_features is not None:
self.bond_features = np.where(np.isnan(self.bond_features), replace_token, self.bond_features)
# Save a copy of the raw features and targets to enable different scaling later on
self.raw_features, self.raw_targets = self.features, self.targets
self.raw_atom_descriptors, self.raw_atom_features, self.raw_bond_features = \
self.atom_descriptors, self.atom_features, self.bond_features
@property
def mol(self) -> Union[List[Chem.Mol], List[Tuple[Chem.Mol, Chem.Mol]]]:
"""Gets the corresponding list of RDKit molecules for the corresponding SMILES list."""
mol = make_mols(self.smiles, self.is_reaction, self.is_explicit_h)
if cache_mol():
for s, m in zip(self.smiles, mol):
SMILES_TO_MOL[s] = m
return mol
@property
def number_of_molecules(self) -> int:
"""
Gets the number of molecules in the :class:`MoleculeDatapoint`.
:return: The number of molecules.
"""
return len(self.smiles)
def set_features(self, features: np.ndarray) -> None:
"""
Sets the features of the molecule.
:param features: A 1D numpy array of features for the molecule.
"""
self.features = features
def set_atom_descriptors(self, atom_descriptors: np.ndarray) -> None:
"""
Sets the atom descriptors of the molecule.
:param atom_descriptors: A 1D numpy array of features for the molecule.
"""
self.atom_descriptors = atom_descriptors
def set_atom_features(self, atom_features: np.ndarray) -> None:
"""
Sets the atom features of the molecule.
:param atom_features: A 1D numpy array of features for the molecule.
"""
self.atom_features = atom_features
def set_bond_features(self, bond_features: np.ndarray) -> None:
"""
Sets the bond features of the molecule.
:param bond_features: A 1D numpy array of features for the molecule.
"""
self.bond_features = bond_features
def extend_features(self, features: np.ndarray) -> None:
"""
Extends the features of the molecule.
:param features: A 1D numpy array of extra features for the molecule.
"""
self.features = np.append(self.features, features) if self.features is not None else features
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return len(self.targets)
def set_targets(self, targets: List[Optional[float]]):
"""
Sets the targets of a molecule.
:param targets: A list of floats containing the targets.
"""
self.targets = targets
def reset_features_and_targets(self) -> None:
"""Resets the features (atom, bond, and molecule) and targets to their raw values."""
self.features, self.targets = self.raw_features, self.raw_targets
self.atom_descriptors, self.atom_features, self.bond_features = \
self.raw_atom_descriptors, self.raw_atom_features, self.raw_bond_features
class MoleculeDataset(Dataset):
r"""A :class:`MoleculeDataset` contains a list of :class:`MoleculeDatapoint`\ s with access to their attributes."""
def __init__(self, data: List[MoleculeDatapoint]):
r"""
:param data: A list of :class:`MoleculeDatapoint`\ s.
"""
self._data = data
self._scaler = None
self._batch_graph = None
self._random = Random()
def smiles(self, flatten: bool = False) -> Union[List[str], List[List[str]]]:
"""
Returns a list containing the SMILES list associated with each :class:`MoleculeDatapoint`.
:param flatten: Whether to flatten the returned SMILES to a list instead of a list of lists.
:return: A list of SMILES or a list of lists of SMILES, depending on :code:`flatten`.
"""
if flatten:
return [smiles for d in self._data for smiles in d.smiles]
return [d.smiles for d in self._data]
def mols(self, flatten: bool = False) -> Union[List[Chem.Mol], List[List[Chem.Mol]], List[Tuple[Chem.Mol, Chem.Mol]], List[List[Tuple[Chem.Mol, Chem.Mol]]]]:
"""
Returns a list of the RDKit molecules associated with each :class:`MoleculeDatapoint`.
:param flatten: Whether to flatten the returned RDKit molecules to a list instead of a list of lists.
:return: A list of SMILES or a list of lists of RDKit molecules, depending on :code:`flatten`.
"""
if flatten:
return [mol for d in self._data for mol in d.mol]
return [d.mol for d in self._data]
@property
def number_of_molecules(self) -> int:
"""
Gets the number of molecules in each :class:`MoleculeDatapoint`.
:return: The number of molecules.
"""
return self._data[0].number_of_molecules if len(self._data) > 0 else None
def batch_graph(self) -> List[BatchMolGraph]:
r"""
Constructs a :class:`~chemprop.features.BatchMolGraph` with the graph featurization of all the molecules.
.. note::
The :class:`~chemprop.features.BatchMolGraph` is cached in after the first time it is computed
and is simply accessed upon subsequent calls to :meth:`batch_graph`. This means that if the underlying
set of :class:`MoleculeDatapoint`\ s changes, then the returned :class:`~chemprop.features.BatchMolGraph`
will be incorrect for the underlying data.
:return: A list of :class:`~chemprop.features.BatchMolGraph` containing the graph featurization of all the
molecules in each :class:`MoleculeDatapoint`.
"""
if self._batch_graph is None:
self._batch_graph = []
mol_graphs = []
for d in self._data:
mol_graphs_list = []
for s, m in zip(d.smiles, d.mol):
if s in SMILES_TO_GRAPH:
mol_graph = SMILES_TO_GRAPH[s]
else:
if len(d.smiles) > 1 and (d.atom_features is not None or d.bond_features is not None):
raise NotImplementedError('Atom descriptors are currently only supported with one molecule '
'per input (i.e., number_of_molecules = 1).')
mol_graph = MolGraph(m, d.atom_features, d.bond_features,
overwrite_default_atom_features=d.overwrite_default_atom_features,
overwrite_default_bond_features=d.overwrite_default_bond_features)
if cache_graph():
SMILES_TO_GRAPH[s] = mol_graph
mol_graphs_list.append(mol_graph)
mol_graphs.append(mol_graphs_list)
self._batch_graph = [BatchMolGraph([g[i] for g in mol_graphs]) for i in range(len(mol_graphs[0]))]
return self._batch_graph
def features(self) -> List[np.ndarray]:
"""
Returns the features associated with each molecule (if they exist).
:return: A list of 1D numpy arrays containing the features for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].features is None:
return None
return [d.features for d in self._data]
def atom_features(self) -> List[np.ndarray]:
"""
Returns the atom descriptors associated with each molecule (if they exit).
:return: A list of 2D numpy arrays containing the atom descriptors
for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].atom_features is None:
return None
return [d.atom_features for d in self._data]
def atom_descriptors(self) -> List[np.ndarray]:
"""
Returns the atom descriptors associated with each molecule (if they exit).
:return: A list of 2D numpy arrays containing the atom descriptors
for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].atom_descriptors is None:
return None
return [d.atom_descriptors for d in self._data]
def bond_features(self) -> List[np.ndarray]:
"""
Returns the bond features associated with each molecule (if they exit).
:return: A list of 2D numpy arrays containing the bond features
for each molecule or None if there are no features.
"""
if len(self._data) == 0 or self._data[0].bond_features is None:
return None
return [d.bond_features for d in self._data]
def data_weights(self) -> List[float]:
"""
Returns the loss weighting associated with each molecule
"""
return [d.data_weight for d in self._data]
def targets(self) -> List[List[Optional[float]]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats (or None) containing the targets.
"""
return [d.targets for d in self._data]
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return self._data[0].num_tasks() if len(self._data) > 0 else None
def features_size(self) -> int:
"""
Returns the size of the additional features vector associated with the molecules.
:return: The size of the additional features vector.
"""
return len(self._data[0].features) if len(self._data) > 0 and self._data[0].features is not None else None
def atom_descriptors_size(self) -> int:
"""
Returns the size of custom additional atom descriptors vector associated with the molecules.
:return: The size of the additional atom descriptor vector.
"""
return len(self._data[0].atom_descriptors[0]) \
if len(self._data) > 0 and self._data[0].atom_descriptors is not None else None
def atom_features_size(self) -> int:
"""
Returns the size of custom additional atom features vector associated with the molecules.
:return: The size of the additional atom feature vector.
"""
return len(self._data[0].atom_features[0]) \
if len(self._data) > 0 and self._data[0].atom_features is not None else None
def bond_features_size(self) -> int:
"""
Returns the size of custom additional bond features vector associated with the molecules.
:return: The size of the additional bond feature vector.
"""
return len(self._data[0].bond_features[0]) \
if len(self._data) > 0 and self._data[0].bond_features is not None else None
def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0,
scale_atom_descriptors: bool = False, scale_bond_features: bool = False) -> StandardScaler:
"""
Normalizes the features of the dataset using a :class:`~chemprop.data.StandardScaler`.
The :class:`~chemprop.data.StandardScaler` subtracts the mean and divides by the standard deviation
for each feature independently.
If a :class:`~chemprop.data.StandardScaler` is provided, it is used to perform the normalization.
Otherwise, a :class:`~chemprop.data.StandardScaler` is first fit to the features in this dataset
and is then used to perform the normalization.
:param scaler: A fitted :class:`~chemprop.data.StandardScaler`. If it is provided it is used,
otherwise a new :class:`~chemprop.data.StandardScaler` is first fitted to this
data and is then used.
:param replace_nan_token: A token to use to replace NaN entries in the features.
:param scale_atom_descriptors: If the features that need to be scaled are atom features rather than molecule.
:param scale_bond_features: If the features that need to be scaled are bond descriptors rather than molecule.
:return: A fitted :class:`~chemprop.data.StandardScaler`. If a :class:`~chemprop.data.StandardScaler`
is provided as a parameter, this is the same :class:`~chemprop.data.StandardScaler`. Otherwise,
this is a new :class:`~chemprop.data.StandardScaler` that has been fit on this dataset.
"""
if len(self._data) == 0 or \
(self._data[0].features is None and not scale_bond_features and not scale_atom_descriptors):
return None
if scaler is not None:
self._scaler = scaler
elif self._scaler is None:
if scale_atom_descriptors and not self._data[0].atom_descriptors is None:
features = np.vstack([d.raw_atom_descriptors for d in self._data])
elif scale_atom_descriptors and not self._data[0].atom_features is None:
features = np.vstack([d.raw_atom_features for d in self._data])
elif scale_bond_features:
features = np.vstack([d.raw_bond_features for d in self._data])
else:
features = np.vstack([d.raw_features for d in self._data])
self._scaler = StandardScaler(replace_nan_token=replace_nan_token)
self._scaler.fit(features)
if scale_atom_descriptors and not self._data[0].atom_descriptors is None:
for d in self._data:
d.set_atom_descriptors(self._scaler.transform(d.raw_atom_descriptors))
elif scale_atom_descriptors and not self._data[0].atom_features is None:
for d in self._data:
d.set_atom_features(self._scaler.transform(d.raw_atom_features))
elif scale_bond_features:
for d in self._data:
d.set_bond_features(self._scaler.transform(d.raw_bond_features))
else:
for d in self._data:
d.set_features(self._scaler.transform(d.raw_features.reshape(1, -1))[0])
return self._scaler
def normalize_targets(self) -> StandardScaler:
"""
Normalizes the targets of the dataset using a :class:`~chemprop.data.StandardScaler`.
The :class:`~chemprop.data.StandardScaler` subtracts the mean and divides by the standard deviation
for each task independently.
This should only be used for regression datasets.
:return: A :class:`~chemprop.data.StandardScaler` fitted to the targets.
"""
targets = [d.raw_targets for d in self._data]
scaler = StandardScaler().fit(targets)
scaled_targets = scaler.transform(targets).tolist()
self.set_targets(scaled_targets)
return scaler
def set_targets(self, targets: List[List[Optional[float]]]) -> None:
"""
Sets the targets for each molecule in the dataset. Assumes the targets are aligned with the datapoints.
:param targets: A list of lists of floats (or None) containing targets for each molecule. This must be the
same length as the underlying dataset.
"""
assert len(self._data) == len(targets)
for i in range(len(self._data)):
self._data[i].set_targets(targets[i])
def reset_features_and_targets(self) -> None:
"""Resets the features (atom, bond, and molecule) and targets to their raw values."""
for d in self._data:
d.reset_features_and_targets()
def __len__(self) -> int:
"""
Returns the length of the dataset (i.e., the number of molecules).
:return: The length of the dataset.
"""
return len(self._data)
def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:
r"""
Gets one or more :class:`MoleculeDatapoint`\ s via an index or slice.
:param item: An index (int) or a slice object.
:return: A :class:`MoleculeDatapoint` if an int is provided or a list of :class:`MoleculeDatapoint`\ s
if a slice is provided.
"""
return self._data[item]
class MoleculeSampler(Sampler):
"""A :class:`MoleculeSampler` samples data from a :class:`MoleculeDataset` for a :class:`MoleculeDataLoader`."""
def __init__(self,
dataset: MoleculeDataset,
class_balance: bool = False,
shuffle: bool = False,
seed: int = 0):
"""
:param class_balance: Whether to perform class balancing (i.e., use an equal number of positive
and negative molecules). Set shuffle to True in order to get a random
subset of the larger class.
:param shuffle: Whether to shuffle the data.
:param seed: Random seed. Only needed if :code:`shuffle` is True.
"""
super(Sampler, self).__init__()
self.dataset = dataset
self.class_balance = class_balance
self.shuffle = shuffle
self._random = Random(seed)
if self.class_balance:
indices = np.arange(len(dataset))
has_active = np.array([any(target == 1 for target in datapoint.targets) for datapoint in dataset])
self.positive_indices = indices[has_active].tolist()
self.negative_indices = indices[~has_active].tolist()
self.length = 2 * min(len(self.positive_indices), len(self.negative_indices))
else:
self.positive_indices = self.negative_indices = None
self.length = len(self.dataset)
def __iter__(self) -> Iterator[int]:
"""Creates an iterator over indices to sample."""
if self.class_balance:
if self.shuffle:
self._random.shuffle(self.positive_indices)
self._random.shuffle(self.negative_indices)
indices = [index for pair in zip(self.positive_indices, self.negative_indices) for index in pair]
else:
indices = list(range(len(self.dataset)))
if self.shuffle:
self._random.shuffle(indices)
return iter(indices)
def __len__(self) -> int:
"""Returns the number of indices that will be sampled."""
return self.length
def construct_molecule_batch(data: List[MoleculeDatapoint]) -> MoleculeDataset:
r"""
Constructs a :class:`MoleculeDataset` from a list of :class:`MoleculeDatapoint`\ s.
Additionally, precomputes the :class:`~chemprop.features.BatchMolGraph` for the constructed
:class:`MoleculeDataset`.
:param data: A list of :class:`MoleculeDatapoint`\ s.
:return: A :class:`MoleculeDataset` containing all the :class:`MoleculeDatapoint`\ s.
"""
data = MoleculeDataset(data)
data.batch_graph() # Forces computation and caching of the BatchMolGraph for the molecules
return data
class MoleculeDataLoader(DataLoader):
"""A :class:`MoleculeDataLoader` is a PyTorch :class:`DataLoader` for loading a :class:`MoleculeDataset`."""
def __init__(self,
dataset: MoleculeDataset,
batch_size: int = 50,
num_workers: int = 8,
class_balance: bool = False,
shuffle: bool = False,
seed: int = 0):
"""
:param dataset: The :class:`MoleculeDataset` containing the molecules to load.
:param batch_size: Batch size.
:param num_workers: Number of workers used to build batches.
:param class_balance: Whether to perform class balancing (i.e., use an equal number of positive
and negative molecules). Class balance is only available for single task
classification datasets. Set shuffle to True in order to get a random
subset of the larger class.
:param shuffle: Whether to shuffle the data.
:param seed: Random seed. Only needed if shuffle is True.
"""
self._dataset = dataset
self._batch_size = batch_size
self._num_workers = num_workers
self._class_balance = class_balance
self._shuffle = shuffle
self._seed = seed
self._context = None
self._timeout = 0
is_main_thread = threading.current_thread() is threading.main_thread()
if not is_main_thread and self._num_workers > 0:
self._context = 'forkserver' # In order to prevent a hanging
self._timeout = 3600 # Just for sure that the DataLoader won't hang
self._sampler = MoleculeSampler(
dataset=self._dataset,
class_balance=self._class_balance,
shuffle=self._shuffle,
seed=self._seed
)
super(MoleculeDataLoader, self).__init__(
dataset=self._dataset,
batch_size=self._batch_size,
sampler=self._sampler,
num_workers=self._num_workers,
collate_fn=construct_molecule_batch,
multiprocessing_context=self._context,
timeout=self._timeout
)
@property
def targets(self) -> List[List[Optional[float]]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats (or None) containing the targets.
"""
if self._class_balance or self._shuffle:
raise ValueError('Cannot safely extract targets when class balance or shuffle are enabled.')
return [self._dataset[index].targets for index in self._sampler]
@property
def iter_size(self) -> int:
"""Returns the number of data points included in each full iteration through the :class:`MoleculeDataLoader`."""
return len(self._sampler)
def __iter__(self) -> Iterator[MoleculeDataset]:
r"""Creates an iterator which returns :class:`MoleculeDataset`\ s"""
return super(MoleculeDataLoader, self).__iter__()
def make_mols(smiles: List[str], reaction: bool, keep_h: bool):
"""
Builds a list of RDKit molecules (or a list of tuples of molecules if reaction is True) for a list of smiles.
:param smiles: List of SMILES strings.
:param reaction: Boolean whether the SMILES strings are to be treated as a reaction.
:param keep_h: Boolean whether to keep hydrogens in the input smiles. This does not add hydrogens, it only keeps them if they are specified.
:return: List of RDKit molecules or list of tuple of molecules.
"""
if reaction:
mol = [SMILES_TO_MOL[s] if s in SMILES_TO_MOL else (make_mol(s.split(">")[0], keep_h), make_mol(s.split(">")[-1], keep_h)) for s in smiles]
else:
mol = [SMILES_TO_MOL[s] if s in SMILES_TO_MOL else make_mol(s, keep_h) for s in smiles]
return mol
|
the-stack_106_15215
|
import logging
import time
from gym.common.defs.tools import PROBER_IPERF
from gym.agent.probers.prober import Prober
logger = logging.getLogger()
class ProberIperf(Prober):
PARAMETERS = {
'port':'-p',
'duration':'-t',
'protocol':'-u',
'server':'-s',
'client':'-c',
}
METRICS = [
'bandwidth',
]
def __init__(self):
Prober.__init__(self, id=PROBER_IPERF, name="iperf",
parameters=ProberIperf.PARAMETERS,
metrics=ProberIperf.METRICS)
self._command = 'iperf'
def options(self, opts):
options = self.serialize(opts)
opts = []
stop = False
timeout = 0
if '-c' in options:
time.sleep(0.5)
for k,v in options.items():
if k == '-s':
stop = True
if k == '-t':
timeout = float(v)
if k == '-u' or k == '-s':
opts.extend([k])
else:
opts.extend([k,v])
opts.extend(['-f','m'])
return opts, stop, timeout
def parser(self, out):
_eval = []
lines = [line for line in out.split('\n') if line.strip()]
if len(lines) == 7:
bandwidth = lines[-1].split(' ')[-2]
units = lines[-1].split(' ')[-1]
m = {
"name": "throughput",
"series": False,
"type": "float",
"unit": units,
"value": float(bandwidth),
}
_eval = [m]
elif len(lines) == 11 or len(lines) == 8:
bandwidth = lines[-1].split(' ')[-13]
units = lines[-1].split(' ')[-12]
m = {
"name": "throughput",
"series": False,
"type": "float",
"unit": units,
"value": float(bandwidth),
}
_eval = [m]
return _eval
if __name__ == '__main__':
app = ProberIperf()
print(app.main())
|
the-stack_106_15217
|
import requests
from bs4 import BeautifulSoup
import sys
import json
from typing import *
def get_html(s: requests.Session, state_id: str, action_str: str, btn_id: str) -> BeautifulSoup:
print(f"Making req: {action_str}: {btn_id}")
url = "https://splus.cumulus.vub.ac.be/SWS/v3/evenjr/NL/STUDENTSET/studentset.aspx"
data = {
"__VIEWSTATE": state_id,
"__EVENTTARGET": action_str, # Clicked event
"__EVENTARGUMENT": btn_id, # clicked btn
}
req = s.post(url, data=data)
html = BeautifulSoup(req.text, features="html5lib")
return html, html.find(id='__VIEWSTATE').get("value")
def get_sub_ids(section_html: BeautifulSoup) -> List[str]:
td = section_html.find_all("td", class_="tCell")
td.extend(section_html.find_all("td", class_="tCellSelected"))
return list(map(lambda elem: elem['id'], td))
def get_sets(html: BeautifulSoup) -> List[BeautifulSoup]:
return html.find_all("td", class_="td-set")
def get_ical(s: requests.Session, initial_html: BeautifulSoup):
tree = {}
# get all classes
dlObject = initial_html.find(id="dlObject")
dlObject_options = dlObject.find_all("option")
opt_vals = list(map(lambda i: {"id": i.get("value"), "value": i.contents[0]}, dlObject_options))
print(f"({len(opt_vals)})", end="")
# Select class
for opt in opt_vals:
ical_html = ""
try:
url = "https://splus.cumulus.vub.ac.be/SWS/v3/evenjr/NL/STUDENTSET/Default.aspx"
evt_validation = initial_html.find(id="__EVENTVALIDATION").get("value")
evt_view_state = initial_html.find(id="__VIEWSTATE").get("value")
evt_radio = initial_html.find(id="RadioType_2").get("value")
data = {
"__EVENTVALIDATION": evt_validation,
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__LASTFOCUS": "",
"__VIEWSTATE": evt_view_state,
"tLinkType": "setbytag",
"tWildcard": "",
"dlObject": opt["id"], # Gekozen vak
"lbWeeks": [
"1;2;3;4;5;6;7;8;9;10;11;12;13;14",
"22;23;24;25;26;27;28;29;32;33;34;35;36"
],
"lbDays": "1;2;3;4;5;6",
"dlPeriod": "2;3;4;5;6;7;8;9;10;11;12;13;14;15;16;17;18;19;20;21;22;23;24;25;26;27;28;29;30;31;32;33",
"RadioType": evt_radio,
"bGetTimetable": "Bekijk+het+lesrooster",
}
opt_id = opt["id"]
#print(f"Making request to Default with {opt_id}")
print(".", end="")
select_req = s.post(url, data=data) # Send request
#select_html = BeautifulSoup(select_req.text)
# ical
url = "https://splus.cumulus.vub.ac.be/SWS/v3/evenjr/NL/STUDENTSET/showtimetable.aspx"
ical_req = s.get(url) # Go to ical screen
ical_html = BeautifulSoup(ical_req.text, features="html5lib")
url_input = ical_html.find(id="ical_url")
ical_url = url_input.get("value")
tree[opt["id"]] = {
"url": ical_url,
**opt
}
except AttributeError as err:
opt_id = opt["id"]
print(f"ICal could not be found for {opt}, error: {err}", file=sys.stderr)
print(ical_html, file=sys.stderr)
print() # esthetics
# Next calendar
print("Going back")
url = "https://splus.cumulus.vub.ac.be/SWS/v3/evenjr/NL/STUDENTSET/studentset.aspx?"
back_req = s.get(url)
back_html = BeautifulSoup(back_req.text, features="html5lib")
view_state = back_html.find(id="__VIEWSTATE").get("value")
return tree, view_state
def get_opleid(s: requests.Session, state_id: str, initial_html: BeautifulSoup):
tree = {}
sets = get_sets(initial_html)
opleidingen = get_sub_ids(sets[2])
for opleiding in opleidingen:
tree[opleiding] = {}
html, state_id = get_html(s, state_id, "tTagClicked", opleiding)
ical, state_id = get_ical(s, html)
tree[opleiding] = ical
return tree, state_id
def get_fac(s: requests.Session, state_id: str, inital_html: BeautifulSoup):
tree = {}
sets = get_sets(inital_html)
fac = get_sub_ids(sets[1])
for f in fac:
html, state_id = get_html(s, state_id, "tDepartmentClicked", f)
opl, state_id = get_opleid(s, state_id, html)
tree[f] = opl
return tree, state_id
def get_type(s: requests.Session, state_id: str, initial_html: BeautifulSoup):
tree = {}
sets = get_sets(initial_html)
types = get_sub_ids(sets[0])
for t in types:
html, state_id = get_html(s, state_id, "tTypeClicked", t)
fac, state_id = get_fac(s, state_id, html)
tree[t] = fac
return tree, state_id
# Buildng tree
with requests.Session() as s:
s.headers["User-Agent"] = "Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0"
s.headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
url = "https://splus.cumulus.vub.ac.be/SWS/v3/evenjr/NL/STUDENTSET/studentset.aspx"
req = s.get(url) # Inital req
html = BeautifulSoup(req.text, features="html5lib")
view_state = html.find(id='__VIEWSTATE').get("value")
print("[*] Set Content Type to form validation")
s.headers["Content-Type"] = "application/x-www-form-urlencoded"
tree = get_type(s, view_state, html)
data_tree = json.dumps(tree[0])
with open("data_tree_dummy.json", "w") as f:
f.write(data_tree)
|
the-stack_106_15218
|
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_counts = opt.save_counts
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz, stride=stride)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
print(names)
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
counts_path = str(save_dir / 'counts.txt') # counts.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
s_det = ''
# Print results
for c in det[:, -1].unique():
if s_det != '':
s_det += ', '
n = (det[:, -1] == c).sum() # detections per class
s_det += f"{n} {names[int(c)]}{'s' * (n > 1)}" # add to string
s += s_det + ' '
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or view_img: # Add bbox to image
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
if save_counts:
with open(counts_path, 'a') as f:
f.write(f'{p.name} - {s_det} \n')
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-counts', action='store_true', help='save object counts to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
|
the-stack_106_15220
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from lib.powlib import get_class_name
from conf.config import myapp
from lib.application import log_handler
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
#fileConfig(myapp["logfile"])
logging.getLogger('alembic').addHandler(log_handler)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
#
# this must load all models to give alembic insight
# to the declarative schema
#
import os
exclude_list=["modelobject", "basemodel", "elastic", "tinydb", "tinymodel", "sqlmodel", "mongomodel"]
#
# the list of modules (excluding _ones and basemodel. Add more you dont want
# to be loaded or inspected to exclude_list above.)
#
mods=[]
module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'models/sql'))
for mod in os.listdir( module_path ):
mod = mod.split(".")[0]
if not mod.startswith("_") and not mod in exclude_list:
tmp_mod=mod.split("_")[-1]
# exclude all ending with _observer (which are not models but observers by convention ;)
if tmp_mod != "observer":
mods.append(mod)
from sqlalchemy import engine_from_config, pool, MetaData
print(mods)
class_list = []
# load all the models from their modules (mods)
import importlib
for m in mods:
mod = importlib.import_module('models.sql.' + m)
#model_class_name = m.capitalize()
model_class_name = get_class_name(m)
klass = getattr(mod, model_class_name)
class_list.append(getattr(klass, "metadata"))
# alembic support multiple model files
# see: http://liuhongjiang.github.io/hexotech/2015/10/14/alembic-support-multiple-model-files/
def combine_metadata(*args):
m = MetaData()
for metadata in args:
for t in metadata.tables.values():
t.to_metadata(m)
return m
# hand over the model class list to combine_metadata
# using lists and *args see: http://stackoverflow.com/questions/28986787/how-to-build-arguments-for-a-python-function-in-a-variable
# ans see: tests/test_args.py
target_metadata = combine_metadata(*class_list)
#from dblib import Base
#target_metadata = Base.metadata
# orig: target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
print("SQLAlchemy URL: " + str(url))
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
the-stack_106_15221
|
"""
This file offers the methods to automatically retrieve the graph Cellulomonas sp. Root930.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CellulomonasSpRoot930(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Cellulomonas sp. Root930 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Cellulomonas sp. Root930 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CellulomonasSpRoot930",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_15224
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from os.path import exists, join
from os import makedirs
import pickle
image_selection = np.array([0, 0], dtype='float32')
OUTPUT_SIZE = [6000, 9000]
def remove_fisheye(img):
K = np.array([[1001.7792, 0., 962.3616],
[0., 1012.7232, 561.2760],
[0., 0., 1.]])
# zero distortion coefficients work well for this image
D = np.array([-0.31331, 0.12965, 0.00073, -0.00022])
# use Knew to scale the output
Knew = K.copy()
Knew[(0, 1), (0, 1)] = 0.4 * Knew[(0, 1), (0, 1)]
# return cv2.fisheye.undistortImage(img, K, D=D, Knew=Knew)
return img
# noinspection PyArgumentList
def compute_transformation(video_file, debug=False):
""" Computes the proper transformation by letting the user select appropriate points in the image """
# noinspection PyCompatibility
def get_coords(select_image):
# noinspection PyCompatibility
def getxy(event, row, col, flags, param):
global image_selection
if event == cv2.EVENT_LBUTTONDOWN:
image_selection = np.vstack([image_selection, np.hstack([row, col])])
cv2.circle(select_image, (row, col), 3, (0, 0, 255), -1)
cv2.imshow('image', select_image)
print("(row, col) = ", (row, col))
# Read the image
# Set mouse CallBack event
cv2.namedWindow('image')
cv2.setMouseCallback('image', getxy)
# show the image
print("Click to select a point OR press ANY KEY to continue...")
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# obtain the matrix of the selected points
selected_points = image_selection[1:, :]
return np.float32(selected_points)
cap = cv2.VideoCapture(video_file)
ret, distorted_img = cap.read()
# remove fisheye distortion
img = remove_fisheye(distorted_img)
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# compute corner pairs
original_corners = get_coords(img)
print(np.array(original_corners))
new_corners = np.float32([[0., 0.], [210., 0.], [210., 297.], [0., 297.]]) * 4. + 4200.
# perform homography
transformation = cv2.getPerspectiveTransform(original_corners, new_corners)
dst = cv2.warpPerspective(img, transformation, tuple(OUTPUT_SIZE))
if debug:
plt.figure()
plt.imshow(cv2.cvtColor(distorted_img, cv2.COLOR_BGR2RGB))
plt.figure()
plt.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))
plt.show()
cap.release()
cv2.destroyAllWindows()
return transformation
def pre_process_image(img, transformation, largest_contour):
""" processes image for further evalution """
# remove fisheye distortion
frame = remove_fisheye(img)
# apply perspective warping
dst = cv2.warpPerspective(frame, transformation, tuple(OUTPUT_SIZE))
# get region of interest
cropped = dst[1050:-950, 3500:6200]
dst = cropped.copy()
# find largest contour
if largest_contour is None:
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)
lower_range = np.array([0, 0, 0], dtype=np.uint8)
upper_range = np.array([255, 255, 70], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_range, upper_range)
# close blobs
kernel = np.ones((40, 40), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
largest_contour = max(contours, key=cv2.contourArea)
mask = np.zeros(dst.shape, np.uint8)
cv2.drawContours(mask, [largest_contour], 0, 255, -1)
mask = mask[:, :, 0]
res = cv2.bitwise_and(dst, dst, mask=mask)
return res, cropped, largest_contour
def detect_car(fgbg, frame):
# substract background
fgmask = fgbg.apply(frame)
# close masks
kernel = np.ones((15, 15), np.uint8)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
kernel = np.ones((50, 50), np.uint8)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_DILATE, kernel)
# find largest contour and compute centroid
cx, cy = (0, 0)
_, contours, _ = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
largest_contour = max(contours, key=cv2.contourArea)
# compute centroid of contour
moment = cv2.moments(largest_contour)
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
return (cx, cy), fgmask
def detect_center_coords(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_range = np.array([0, 0, 200], dtype=np.uint8)
upper_range = np.array([255, 255, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_range, upper_range)
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cx, cy = (0, 0)
if contours:
largest_contour = max(contours, key=cv2.contourArea)
# compute centroid of contour
moment = cv2.moments(largest_contour)
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
return (cx, cy), mask
# noinspection PyArgumentList
def convert_video(video, debug=False):
""" Normalises each frame in the given video files """
makedirs('output', exist_ok=True)
# compute transformation matrix
transformation_file = join('output', 'transformation_' + str(video) + '.pickle')
if exists(transformation_file):
with open(transformation_file, 'rb') as pickle_file:
transformation = pickle.load(pickle_file)
else:
transformation = compute_transformation(video_file=video, debug=debug)
with open(transformation_file, 'wb') as pickle_file:
pickle.dump(transformation, pickle_file)
# output definition
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(join('output', 'result.avi'), fourcc, 1.0, (700, 1000))
fgbg = cv2.createBackgroundSubtractorMOG2()
cap = cv2.VideoCapture(video)
largest_contour = None
center_coords = None
frame_counter = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
# perform pre-processing
pre_processed_image, cropped_image, largest_contour = pre_process_image(frame,
transformation,
largest_contour)
# detect car
car_coords, car_mask = detect_car(fgbg, pre_processed_image)
# detect coords of center
if center_coords is None:
center_coords, center_mask = detect_center_coords(pre_processed_image)
# show results when ready
if frame_counter > 3:
cv2.line(cropped_image, center_coords, car_coords, (255, 0, 0), 10)
cv2.circle(cropped_image, car_coords, 30, (0, 0, 255), -1)
cv2.circle(cropped_image, center_coords, 30, (0, 255, 255), -1)
to_show = np.concatenate((cropped_image,
pre_processed_image,
cv2.bitwise_and(pre_processed_image, pre_processed_image, mask=center_mask),
cv2.bitwise_and(pre_processed_image, pre_processed_image, mask=car_mask)),
axis=1)
cv2.imshow('converted video', cv2.resize(to_show, (0, 0), fx=0.15, fy=0.15))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_counter += 1
out.write(cv2.resize(to_show, (0, 0), fx=0.1, fy=0.1))
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
convert_video(video=0, debug=True)
|
the-stack_106_15226
|
#
# Compares the full and lumped thermal models for a single layer Li-ion cell
#
import pybamm
import numpy as np
# load model
pybamm.set_logging_level("INFO")
options = {"thermal": "x-full"}
full_thermal_model = pybamm.lithium_ion.SPMe(options)
options = {"thermal": "x-lumped"}
lumped_thermal_model = pybamm.lithium_ion.SPMe(options)
models = [full_thermal_model, lumped_thermal_model]
# load parameter values and process models and geometry
param = models[0].default_parameter_values
# for x-full, cooling is only implemented on the surfaces
# so set other forms of cooling to zero for comparison.
param.update(
{
"Negative current collector"
+ " surface heat transfer coefficient [W.m-2.K-1]": 5,
"Positive current collector"
+ " surface heat transfer coefficient [W.m-2.K-1]": 5,
"Negative tab heat transfer coefficient [W.m-2.K-1]": 0,
"Positive tab heat transfer coefficient [W.m-2.K-1]": 0,
"Edge heat transfer coefficient [W.m-2.K-1]": 0,
}
)
for model in models:
param.process_model(model)
# set mesh
var_pts = {"x_n": 10, "x_s": 10, "x_p": 10, "r_n": 10, "r_p": 10}
# discretise models
for model in models:
# create geometry
geometry = model.default_geometry
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, models[-1].default_submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
# solve model
solutions = [None] * len(models)
t_eval = np.linspace(0, 3600, 100)
for i, model in enumerate(models):
solver = pybamm.ScipySolver(atol=1e-8, rtol=1e-8)
solution = solver.solve(model, t_eval)
solutions[i] = solution
# plot
output_variables = [
"Terminal voltage [V]",
"X-averaged cell temperature [K]",
"Cell temperature [K]",
]
labels = ["Full thermal model", "Lumped thermal model"]
plot = pybamm.QuickPlot(solutions, output_variables, labels)
plot.dynamic_plot()
|
the-stack_106_15227
|
import asyncio
import aiohttp
from aiohttp import WSMsgType
class NStream:
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36"
def __init__(self, *, next_cookie, on_message):
self.next_cookie = next_cookie
self.ready_event = asyncio.Event()
self.client_session = aiohttp.ClientSession()
self.sock = None
self.on_message = on_message
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.sock:
await self.sock.close()
self.sock = None
await self.client_session.close()
async def connect(self):
self.sock = await self.client_session.ws_connect(
"wss://www.nordnet.fi/ws/2/public",
origin="https://www.nordnet.fi",
protocols=["NEXT"],
headers={
"Cookie": ("NEXT=%s" % self.next_cookie),
"User-Agent": self.USER_AGENT,
},
autoclose=False,
)
return asyncio.create_task(self.pump())
async def subscribe(self, thing_id):
thing_id = int(thing_id)
await self.ready_event.wait()
await self.sock.send_json(
{"cmd": "subscribe", "args": {"t": "price", "id": thing_id}}
)
await self.sock.send_json(
{"cmd": "subscribe", "args": {"t": "depth", "id": thing_id}}
)
await self.sock.send_json(
{"cmd": "subscribe", "args": {"t": "trade", "id": thing_id}}
)
async def pump(self):
while self.sock:
msg = await self.sock.receive()
if msg.type == WSMsgType.CLOSED:
break
if msg.type == WSMsgType.TEXT:
data = msg.json()
data_type = data.get("type")
data_data = data.get("data", {})
if data_type == "ack":
if data_data.get("cmd") == "login":
self.ready_event.set()
await self.on_message(type=data_type, **data_data)
|
the-stack_106_15228
|
'''
Codes are from
https://github.com/dmlc/dgl/tree/master/examples/pytorch
'''
import torch
import torch.nn as nn
from dgl.nn.pytorch import GraphConv, GATConv
def positive_safe_sigmoid(x):
return torch.sigmoid(x) + 1e-8
class GCN(nn.Module):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation):
super(GCN, self).__init__()
self.g = g
self.layers = nn.ModuleList()
# input layer
self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
# output layer
self.layers.append(GraphConv(n_hidden, n_classes, activation=positive_safe_sigmoid))
def forward(self, features):
h = features
for i, layer in enumerate(self.layers):
h = layer(self.g, h)
return h
class GAT(nn.Module):
def __init__(self,
g,
activation,
in_dim=1,
num_classes=1,
num_layers=1,
num_hidden=8,
num_heads=8,
num_out_heads=1,
feat_drop=0,
attn_drop=0,
negative_slope=0.2,
residual=False):
heads = ([num_heads] * num_layers) + [num_out_heads]
super(GAT, self).__init__()
self.g = g
self.num_layers = num_layers
self.gat_layers = nn.ModuleList()
self.activation = activation
# input projection (no residual)
self.gat_layers.append(GATConv(
in_dim, num_hidden, heads[0],
feat_drop, attn_drop, negative_slope, False, self.activation))
# hidden layers
for l in range(1, num_layers):
# due to multi-head, the in_dim = num_hidden * num_heads
self.gat_layers.append(GATConv(
num_hidden * heads[l-1], num_hidden, heads[l],
feat_drop, attn_drop, negative_slope, residual, self.activation))
# output projection
self.gat_layers.append(GATConv(
num_hidden * heads[-2], num_classes, heads[-1],
feat_drop, attn_drop, negative_slope, residual, positive_safe_sigmoid))
def forward(self, inputs):
h = inputs
for l in range(self.num_layers):
h = self.gat_layers[l](self.g, h).flatten(1)
# output projection
logits = self.gat_layers[-1](self.g, h).mean(1)
return logits
|
the-stack_106_15231
|
#!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
from captcha.utils import *
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (str): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet, ignore_case=True):
self._ignore_case = ignore_case
if self._ignore_case:
alphabet = alphabet.lower()
self.alphabet = alphabet + '-' # for `-1` index
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1
def encode(self, text):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
if isinstance(text, str):
text = [
self.dict[char.lower() if self._ignore_case else char]
for char in text
]
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
text, _ = self.encode(text)
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def oneHot(v, v_length, nc):
batchSize = v_length.size(0)
maxLength = v_length.max()
v_onehot = torch.FloatTensor(batchSize, maxLength, nc).fill_(0)
acc = 0
for i in range(batchSize):
length = v_length[i]
label = v[acc:acc + length].view(-1, 1).long()
v_onehot[i, :length].scatter_(1, label, 1.0)
acc += length
return v_onehot
def loadData(v, data):
v.data.resize_(data.size()).copy_(data)
def prettyPrint(v):
print('Size {0}, Type: {1}'.format(str(v.size()), v.data.type()))
print('| Max: %f | Min: %f | Mean: %f' % (v.max().data[0], v.min().data[0],
v.mean().data[0]))
def assureRatio(img):
"""Ensure imgH <= imgW."""
b, c, h, w = img.size()
if h > w:
main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
img = main(img)
return img
|
the-stack_106_15232
|
import pytest
from mixer.backend.django import mixer
pytestmark = pytest.mark.django_db
class TestPost:
def test_model(self):
obj = mixer.blend('comments.Post')
assert obj.pk == 1, 'Should create a Post instance'
def test_get_excerpt(self):
obj = mixer.blend('comments.Post', body='Hello World!')
result = obj.get_excerpt(5)
expected = 'Hello'
assert result == expected, ('Should return the given number of characters')
|
the-stack_106_15234
|
'''
MEDIUM 3. Longest Substring Without Repeating Characters
Given a string, find the length of the longest substring without repeating characters.
Example 1:
Input: "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
'''
def longestSubString(ogString):
#Keep track of char found and the last index that was seen
mapa = {}
indexBajo = 0
maxLen = 0
#The right length of the window goes on
for indexAlto in range(len(ogString)):
#If the char is not in the dict, nothing happens, just sum the len
if ogString[indexAlto] not in mapa:
print('El valor no estaba en el mapa c:')
maxLen = max(maxLen, indexAlto-indexBajo+1)
else:
print("El ultimo index del char",ogString[indexAlto], "es", mapa[ogString[indexAlto]])
#If the last time the char was seen, is in the curr window, update the left length
if mapa[ogString[indexAlto]] >= indexBajo:
print("El char ", ogString[indexAlto], "estaba en el ventana we :(")
indexBajo = mapa[ogString[indexAlto]] + 1
#If that's not the case, it means that we saw the char in the past, but is not in the curr window
else:
print("El char ", ogString[indexAlto], "no estaba en la ventana we :D")
maxLen = max(maxLen, indexAlto-indexBajo+1)
mapa[ogString[indexAlto]] = indexAlto
print(mapa)
return maxLen
a = longestSubString('acbdbacd')
print(a)
|
the-stack_106_15242
|
import sys
import time
import json
import arena
import re
import math
HOST = "arenaxr.org"
SCENE = "roomtest5"
sign_links = ['Link 1','https://3dwarehouse.sketchup.com/model/f2991e0c12644c9ff87d99a411d2b1c5/University-of-Southern-California-USC-Tower?hl=en','Link 2','https://www.f1.com','Link 3','https://www.eet.com']
waypoints = []
waypoints.append((0.0, 0.2, 0.0))
waypoints.append((-15.8, 0.2, -3.0))
waypoints.append((-1.3, 0.2, 6.4))
waypoints.append((-0.5, 0.4, 33.9))
waypoints.append((-11.5, 0.2, 38.7))
waypoints.append((-18.1, 0.2, 48.7))
waypoints.append((-46.4, 0.3, 44.1))
def drawpath(waypoints, name_seed, persist ):
print( waypoints)
lastPt = None
if name_seed == -1:
pathStr = "path" + str(random.randint(0, 1000000))
else:
pathStr = "path" + str(name_seed)
stepCnt=0
for pt in waypoints:
if lastPt is None:
lastPt = pt
continue
dist = math.sqrt( ((pt[0]-lastPt[0])*(pt[0]-lastPt[0]))+((pt[1]-lastPt[1])*(pt[1]-lastPt[1])) +((pt[2]-lastPt[2])*(pt[2]-lastPt[2])) )
totalSteps = int(dist / 0.3)
stepX = (pt[0]-lastPt[0])/(totalSteps)
stepY = (pt[1]-lastPt[1])/(totalSteps)
stepZ = (pt[2]-lastPt[2])/(totalSteps)
for i in range(0,totalSteps):
x=lastPt[0]+i*stepX
y=lastPt[1]+i*stepY
z=lastPt[2]+i*stepZ
pathobjstr = pathStr + str(stepCnt)
stepCnt+=1
arena.Object(objType=arena.Shape.circle,
objName=pathobjstr,
location=(x,y,z),
color=(0,255,255),scale=(0.1,0.1,1),
rotation=(-0.7,0.0,0.0,0.7),
data='{"material": {"transparent":true,"opacity": 0.3}}',
persist=persist);
lastPt = pt
def signs():
arena.Object(
objName="signpost1",
url="store/users/wiselab/models/signpost/scene.gltf",
objType=arena.Shape.gltf_model,
scale=(0.1, 0.1, 0.1),
rotation=(0, 0.3, 0, 1),
location=(-18.2, 0.2, -2.3),
clickable=True,
persist=True
)
arena.init(HOST, "realm", SCENE)
print("starting main loop")
drawpath(waypoints, 0, True)
signs()
arena.handle_events()
|
the-stack_106_15246
|
# -*- coding: utf-8 -*-
'''
Many aspects of the salt payload need to be managed, from the return of
encrypted keys to general payload dynamics and packaging, these happen
in here
'''
# Import python libs
from __future__ import absolute_import
# import sys # Use if sys is commented out below
import logging
import gc
import datetime
# Import salt libs
import salt.log
import salt.crypt
import salt.transport.frame
from salt.exceptions import SaltReqTimeoutError
from salt.utils import immutabletypes
# Import third party libs
import salt.ext.six as six
try:
import zmq
except ImportError:
# No need for zeromq in local mode
pass
log = logging.getLogger(__name__)
HAS_MSGPACK = False
try:
# Attempt to import msgpack
import msgpack
# There is a serialization issue on ARM and potentially other platforms
# for some msgpack bindings, check for it
if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
raise ImportError
HAS_MSGPACK = True
except ImportError:
# Fall back to msgpack_pure
try:
import msgpack_pure as msgpack # pylint: disable=import-error
HAS_MSGPACK = True
except ImportError:
# TODO: Come up with a sane way to get a configured logfile
# and write to the logfile when this error is hit also
LOG_FORMAT = '[%(levelname)-8s] %(message)s'
salt.log.setup_console_logger(log_format=LOG_FORMAT)
log.fatal('Unable to import msgpack or msgpack_pure python modules')
# Don't exit if msgpack is not available, this is to make local mode
# work without msgpack
#sys.exit(salt.defaults.exitcodes.EX_GENERIC)
if HAS_MSGPACK and not hasattr(msgpack, 'exceptions'):
class PackValueError(Exception):
'''
older versions of msgpack do not have PackValueError
'''
class exceptions(object):
'''
older versions of msgpack do not have an exceptions module
'''
PackValueError = PackValueError()
msgpack.exceptions = exceptions()
def package(payload):
'''
This method for now just wraps msgpack.dumps, but it is here so that
we can make the serialization a custom option in the future with ease.
'''
return msgpack.dumps(payload)
def unpackage(package_):
'''
Unpackages a payload
'''
return msgpack.loads(package_, use_list=True)
def format_payload(enc, **kwargs):
'''
Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict.
'''
payload = {'enc': enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload['load'] = load
return package(payload)
class Serial(object):
'''
Create a serialization object, this object manages all message
serialization in Salt
'''
def __init__(self, opts):
if isinstance(opts, dict):
self.serial = opts.get('serial', 'msgpack')
elif isinstance(opts, str):
self.serial = opts
else:
self.serial = 'msgpack'
def loads(self, msg, encoding=None, raw=False):
'''
Run the correct loads serialization format
:param encoding: Useful for Python 3 support. If the msgpack data
was encoded using "use_bin_type=True", this will
differentiate between the 'bytes' type and the
'str' type by decoding contents with 'str' type
to what the encoding was set as. Recommended
encoding is 'utf-8' when using Python 3.
If the msgpack data was not encoded using
"use_bin_type=True", it will try to decode
all 'bytes' and 'str' data (the distinction has
been lost in this case) to what the encoding is
set as. In this case, it will fail if any of
the contents cannot be converted.
'''
try:
gc.disable() # performance optimization for msgpack
if msgpack.version >= (0, 4, 0):
# msgpack only supports 'encoding' starting in 0.4.0.
# Due to this, if we don't need it, don't pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack.
ret = msgpack.loads(msg, use_list=True, encoding=encoding)
else:
ret = msgpack.loads(msg, use_list=True)
if six.PY3 and encoding is None and not raw:
ret = salt.transport.frame.decode_embedded_strs(ret)
except Exception as exc:
log.critical('Could not deserialize msgpack message.'
'This often happens when trying to read a file not in binary mode'
'To see message payload, enable debug logging and retry. Exception: {0}'.format(exc))
log.debug('Msgpack deserialization failure on message: {0}'.format(msg))
gc.collect()
raise
finally:
gc.enable()
return ret
def load(self, fn_):
'''
Run the correct serialization to load a file
'''
data = fn_.read()
fn_.close()
if data:
if six.PY3:
return self.loads(data, encoding='utf-8')
else:
return self.loads(data)
def dumps(self, msg, use_bin_type=False):
'''
Run the correct dumps serialization format
:param use_bin_type: Useful for Python 3 support. Tells msgpack to
differentiate between 'str' and 'bytes' types
by encoding them differently.
Since this changes the wire protocol, this
option should not be used outside of IPC.
'''
try:
if msgpack.version >= (0, 4, 0):
# msgpack only supports 'use_bin_type' starting in 0.4.0.
# Due to this, if we don't need it, don't pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack.
return msgpack.dumps(msg, use_bin_type=use_bin_type)
else:
return msgpack.dumps(msg)
except (OverflowError, msgpack.exceptions.PackValueError):
# msgpack can't handle the very long Python longs for jids
# Convert any very long longs to strings
# We borrow the technique used by TypeError below
def verylong_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = verylong_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = verylong_encoder(entry)
return obj
# This is a spurious lint failure as we are gating this check
# behind a check for six.PY2.
if six.PY2 and isinstance(obj, long) and long > pow(2, 64): # pylint: disable=incompatible-py3-code
return str(obj)
elif six.PY3 and isinstance(obj, int) and int > pow(2, 64):
return str(obj)
else:
return obj
if msgpack.version >= (0, 4, 0):
return msgpack.dumps(verylong_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(verylong_encoder(msg))
except TypeError as e:
# msgpack doesn't support datetime.datetime datatype
# So here we have converted datetime.datetime to custom datatype
# This is msgpack Extended types numbered 78
def default(obj):
return msgpack.ExtType(78, obj)
def dt_encode(obj):
datetime_str = obj.strftime("%Y%m%dT%H:%M:%S.%f")
if msgpack.version >= (0, 4, 0):
return msgpack.packb(datetime_str, default=default, use_bin_type=use_bin_type)
else:
return msgpack.packb(datetime_str, default=default)
def datetime_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = datetime_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = datetime_encoder(entry)
return obj
if isinstance(obj, datetime.datetime):
return dt_encode(obj)
else:
return obj
def immutable_encoder(obj):
log.debug('IMMUTABLE OBJ: {0}'.format(obj))
if isinstance(obj, immutabletypes.ImmutableDict):
return dict(obj)
if isinstance(obj, immutabletypes.ImmutableList):
return list(obj)
if isinstance(obj, immutabletypes.ImmutableSet):
return set(obj)
if "datetime.datetime" in str(e):
if msgpack.version >= (0, 4, 0):
return msgpack.dumps(datetime_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(datetime_encoder(msg))
elif "Immutable" in str(e):
if msgpack.version >= (0, 4, 0):
return msgpack.dumps(msg, default=immutable_encoder, use_bin_type=use_bin_type)
else:
return msgpack.dumps(msg, default=immutable_encoder)
if msgpack.version >= (0, 2, 0):
# Should support OrderedDict serialization, so, let's
# raise the exception
raise
# msgpack is < 0.2.0, let's make its life easier
# Since OrderedDict is identified as a dictionary, we can't
# make use of msgpack custom types, we will need to convert by
# hand.
# This means iterating through all elements of a dictionary or
# list/tuple
def odict_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = odict_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = odict_encoder(entry)
return obj
return obj
if msgpack.version >= (0, 4, 0):
return msgpack.dumps(odict_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(odict_encoder(msg))
except (SystemError, TypeError) as exc: # pylint: disable=W0705
log.critical('Unable to serialize message! Consider upgrading msgpack. '
'Message which failed was {failed_message} '
'with exception {exception_message}').format(msg, exc)
def dump(self, msg, fn_):
'''
Serialize the correct data into the named file object
'''
if six.PY2:
fn_.write(self.dumps(msg))
else:
# When using Python 3, write files in such a way
# that the 'bytes' and 'str' types are distinguishable
# by using "use_bin_type=True".
fn_.write(self.dumps(msg, use_bin_type=True))
fn_.close()
class SREQ(object):
'''
Create a generic interface to wrap salt zeromq req calls.
'''
def __init__(self, master, id_='', serial='msgpack', linger=0, opts=None):
self.master = master
self.id_ = id_
self.serial = Serial(serial)
self.linger = linger
self.context = zmq.Context()
self.poller = zmq.Poller()
self.opts = opts
@property
def socket(self):
'''
Lazily create the socket.
'''
if not hasattr(self, '_socket'):
# create a new one
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
self._set_tcp_keepalive()
if self.master.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts:
if 'tcp_keepalive' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def clear_socket(self):
'''
delete socket if you have it
'''
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace('Unregistering socket: {0}'.format(socket))
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace('Unregistering socket: {0}'.format(socket))
self.poller.unregister(socket[0])
del self._socket
def send(self, enc, load, tries=1, timeout=60):
'''
Takes two arguments, the encryption type and the base payload
'''
payload = {'enc': enc}
payload['load'] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll(timeout * 1000)
tried += 1
if polled:
break
if tries > 1:
log.info('SaltReqTimeoutError: after {0} seconds. (Try {1} of {2})'.format(
timeout, tried, tries))
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
'SaltReqTimeoutError: after {0} seconds, ran {1} tries'.format(timeout * tried, tried)
)
return self.serial.loads(self.socket.recv())
def send_auto(self, payload, tries=1, timeout=60):
'''
Detect the encryption type based on the payload
'''
enc = payload.get('enc', 'clear')
load = payload.get('load', {})
return self.send(enc, load, tries, timeout)
def destroy(self):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, 1)
socket[0].close()
self.poller.unregister(socket[0])
if self.socket.closed is False:
self.socket.setsockopt(zmq.LINGER, 1)
self.socket.close()
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
|
the-stack_106_15247
|
#Uses python3
import sys
import queue
def Dijkstra(adj, s, cost, t):
dist = list()
prev = list()
inf = 0
for c in cost:
inf += sum(c)
inf += 1
for u in range(0, len(adj)):
dist.append(inf)
prev.append(None)
dist[s] = 0
H = queue.PriorityQueue()
for i, d in enumerate(dist):
H.put((d, i))
processed = set()
while not H.empty():
u = H.get()[1]
if u in processed:
pass
for i, v in enumerate(adj[u]):
if dist[v] > dist[u] + cost[u][i]:
dist[v] = dist[u] + cost[u][i]
prev[v] = u
H.put((dist[v], v))
processed.add(v)
if dist[t]< inf:
return dist[t]
else:
return -1
def distance(adj, cost, s, t):
return Dijkstra(adj, s, cost, t)
if __name__ == '__main__':
input = sys.stdin.read()
# with open('test', 'r') as f:
# input = f.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:]
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
s, t = data[0] - 1, data[1] - 1
print(distance(adj, cost, s, t))
|
the-stack_106_15250
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
# Fetch test results from Google Storage
import os
import json
import requests
import gzip
from argparse import ArgumentParser
from datetime import datetime
from collections import defaultdict
from typing import List
from multiprocessing.pool import ThreadPool
STORAGE_API_AT = 'https://www.googleapis.com/storage/v1/b/fpga-tool-perf/o'
TESTRES_PREFIX = 'artifacts/prod/foss-fpga-tools/fpga-tool-perf'
TEST_RESULT_DELIMITER = 'results-generic-all.json.gz'
DOWNLOAD_BASE_URL = 'https://storage.googleapis.com/fpga-tool-perf'
# Iterage over all result pages in GCS JSON API.
def iter_result_pages(url: str):
req_url = url
while True:
resp = requests.get(
url=req_url, headers={'Content-Type': 'application/json'}
)
data = resp.json()
yield data
next_page_token = data.get('nextPageToken')
if not next_page_token:
break
req_url = f'{url}&pageToken={next_page_token}'
def get_compound_result_file_path(test_run: int, builds: str):
url = f'{STORAGE_API_AT}?delimiter={TEST_RESULT_DELIMITER}' \
f'&prefix={TESTRES_PREFIX}/{builds}/{test_run}/'
for data in iter_result_pages(url):
prefixes = data.get('prefixes')
if prefixes:
return prefixes[0]
def download_meta(path: str, binary: bool = False):
print(f'Downloading `{path}`')
req_url = f'{DOWNLOAD_BASE_URL}/{path}'
resp = requests.get(
url=req_url, headers={"Content-Type": "application/json"}
)
if not binary:
return resp.text
return resp.content
def are_results_compound(prefixes: List[str]):
return len(prefixes) == 1
def datetime_from_str(s: str):
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
def merge_results(metas, filter=None):
earliest_dt = datetime.now()
projects = defaultdict(lambda: {'results': defaultdict(lambda: [])})
res_types = [
'board', 'toolchain', 'runtime', 'resources', 'maximum_memory_use',
'max_freq', 'device', 'status', 'versions'
]
for meta in metas:
try:
dt = datetime_from_str(meta['date'])
if dt < earliest_dt:
earliest_dt = dt
results = projects[meta['project']]['results']
if filter is not None:
if not filter(meta):
continue
# This is a rather incomplete list, but it should do the job
for res_type in res_types:
results[res_type].append(meta[res_type])
except KeyError as e:
print(f'Skipping a meta file because of {e}')
for project in projects.values():
project['date'] = \
f'{earliest_dt.year}-{earliest_dt.month}-{earliest_dt.day}T' \
f'{earliest_dt.hour}:{earliest_dt.minute}:{earliest_dt.second}'
return projects
def get_legacy_metas(gcs_paths: list, test_no: int):
for path in gcs_paths:
try:
meta_json = download_meta(path)
except requests.exceptions.ConnectionError as e:
print(f'ERROR: failed to download {path}: {e}')
continue
meta: dict
try:
meta = json.loads(meta_json)
except json.decoder.JSONDecodeError:
# Yes this has actually happened once for some reason
print('ERROR: CAN\'T DECODE THE JSON FROM GCS')
with open(f'faulty_json-{test_no}.json', 'w') as f:
f.write(meta_json)
continue
yield meta
def download_and_merge_legacy(gcs_paths: list, test_no: int):
def accept_generic_all_build_only(meta):
return meta['build_type'] == 'generic-all' and meta['build'] == '000'
metas = get_legacy_metas(gcs_paths, test_no)
merged = merge_results(metas, filter=accept_generic_all_build_only)
return merged
def download_and_split_compound(gcs_compound_path: str):
meta_json_gz = download_meta(gcs_compound_path, binary=True)
meta_json = gzip.decompress(meta_json_gz).decode()
meta = json.loads(meta_json)
projects = defaultdict(lambda: {'results': defaultdict(lambda: [])})
meta_results = meta['results']
meta_projects = meta_results['project']
for idx, project in enumerate(meta_projects):
project_res = projects[project]['results']
for k, v in meta_results.items():
if k in ['project']:
continue
project_res[k].append(v[idx])
for project in projects.values():
project['date'] = meta['date']
return projects
def get_test_run_numbers(start: int, end: str, builds: str):
url = f'{STORAGE_API_AT}?delimiter=/&prefix={TESTRES_PREFIX}/{builds}/'
for data in iter_result_pages(url):
for prefix in data['prefixes']:
no = int(prefix.split('/')[-2])
if no >= start and (end == '_' or no <= int(end)):
yield no
# -------------------------------------------------------------------- #
def get_download_specs(test_info):
test_no = test_info['test_no']
builds = test_info['builds']
print(f'Preparing downloads for test {test_no}')
gcs_compound_path = None
gcs_paths = None
try:
gcs_compound_path = get_compound_result_file_path(test_no, builds)
except Exception as e:
print(
f'Failed to fetch patches for test run no. {test_no}, cause: {e}'
)
return None
if gcs_compound_path:
return dict(test_no=test_no, paths=gcs_compound_path, compound=True)
return None
def download_from_specs(specs):
test_no = specs['test_no']
if specs['compound']:
merged = download_and_split_compound(specs['paths'])
else:
merged = download_and_merge_legacy(specs['paths'], test_no)
for project_name, merged_data in merged.items():
project_dir = os.path.join(specs['output_dir'], project_name)
out_filename = os.path.join(project_dir, f'meta-{test_no}.json')
os.makedirs(project_dir, exist_ok=True)
merged_json = json.dumps(merged_data, indent=4)
with open(out_filename, 'w') as f:
f.write(merged_json)
print(f'Downloaded test no. {test_no}')
def main():
parser = ArgumentParser()
parser.add_argument(
'--builds', type=str, help='Builds type (e.g. `nightly`)'
)
parser.add_argument('--from-build', type=int, help='First test run number')
parser.add_argument(
'--to-build',
default="_",
type=str,
help='Last test run number (use `_` for "latest")'
)
parser.add_argument(
'--output-dir', type=str, help='Output directory for downloaded data'
)
parser.add_argument(
'--pool-size', type=int, default=8, help='Size of thread pool'
)
args = parser.parse_args()
if not os.path.isdir(args.output_dir):
print('ERROR: Output path is not a directory!')
exit(-1)
print(f'Using {args.pool_size} parallel threads.')
pool = ThreadPool(args.pool_size)
test_numbers = list(
get_test_run_numbers(args.from_build, args.to_build, args.builds)
)
print('Preparing downloads ...', flush=True)
tests = [
dict(test_no=test_no, builds=args.builds) for test_no in test_numbers
]
download_specs = pool.map(get_download_specs, tests)
download_specs = list(
filter(None, download_specs)
) # remove None resulting from errors
for specs in download_specs:
specs['output_dir'] = args.output_dir
url_count = 0
for specs in download_specs:
if not specs['compound']:
url_count += len(specs['paths'])
else:
url_count += 1
print(f'Downloading {url_count} URLs ...', flush=True)
results = pool.map(download_from_specs, download_specs)
print('Done')
if __name__ == "__main__":
main()
|
the-stack_106_15251
|
"""Module for creating socket to receive packets.
"""
from builtins import str
from builtins import object
import errno
import socket
import signal
import traceback
from select import select
from multiprocessing import Process, Queue
from optparse import OptionParser
from scapy.data import *
from scapy.config import conf
from scapy.utils import PcapReader
from scapy import plist
from scapy.layers.inet import IP, TCP, UDP, ICMP
from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest
try:
# Running from the source repo "test".
from tcutils.pkgs.Traffic.traffic.core.profile import *
from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger
from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL
from tcutils.pkgs.Traffic.traffic.utils.util import *
except ImportError:
# Distributed and installed as package
from traffic.core.profile import *
from traffic.utils.logger import LOGGER, get_logger
from traffic.utils.globalvars import LOG_LEVEL
from traffic.utils.util import *
LOGGER = "%s.core.listener" % LOGGER
log = get_logger(name=LOGGER, level=LOG_LEVEL)
MTU = 65565
class CaptureBase(Process):
def __init__(self, name, **kwargs):
super(CaptureBase, self).__init__()
self.kwargs = kwargs
log.debug("Filter is: %s", self.kwargs['filter'])
self.capture = True
self.pcap = []
self.filtered_pcap = []
self.corrupted_pcap = []
self.resultsfile = "/tmp/%s.results" % name
@conf.commands.register
def sniff(self, count=0, store=1, timeout=None, stopperTimeout=None, stopper=None, chksum=False, *arg, **karg):
"""Sniff packets
sniff([count=0,] [store=1,] [stopper] + args) -> list of packets
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
timeout: stop sniffing after a given time (default: None)
stopperTimeout: break the select to check the returned value of
stopper: function returning true or false to stop the sniffing process
"""
self.chksum = chksum
c = 0 # Total packets
L2socket = conf.L2listen
self.sock = L2socket(type=ETH_P_ALL, *arg, **karg)
if timeout is not None:
stoptime = time.time() + timeout
remain = None
if stopperTimeout is not None:
stopperStoptime = time.time() + stopperTimeout
remainStopper = None
last_pkt = None
while self.capture:
if timeout is not None:
remain = stoptime - time.time()
if remain <= 0:
break
sel = select([self.sock], [], [], remain)
if self.sock in sel[0]:
p = self.sock.recv(MTU)
else:
p = self.sock.recv(MTU)
if p is None:
continue
if p == last_pkt:
last_pkt = None
# Sniff sniffs packet twice; workarund for it
# When time permits, we should debug this
log.debug("Duplicate, Skip counting this packet")
continue
last_pkt = p
log.debug(repr(p))
# Discard the first ssh keepalive packet
try:
dport = p[TCP].dport
sport = p[TCP].sport
if dport == 22 or sport == 22:
log.debug("Discard the ssh keepalive packet")
continue
except IndexError:
pass
if store:
self.pcap.append(p)
if self.count_tcp(p):
c += 1
log.debug("Total packets received: %s", c)
self.update_result(c, len(self.corrupted_pcap))
if count > 0 and c >= count:
break
if stopper and stopper(p):
break
continue
if self.count_icmp(p):
c += 1
log.debug("Total packets received: %s", c)
self.update_result(c, len(self.corrupted_pcap))
if count > 0 and c >= count:
break
if stopper and stopper(p):
break
continue
if self.count_udp(p):
c += 1
log.debug("Total packets received: %s", c)
self.update_result(c, len(self.corrupted_pcap))
if count > 0 and c >= count:
break
if stopper and stopper(p):
break
continue
def checksum(self, p, proto):
return self.verify_l3_checksum(p) and self.verify_l4_checksum(p, proto)
def verify_l3_checksum(self, p):
try:
l3_chksum = p[IP].chksum
except IndexError:
log.debug("skipping checksum verification for v6 packets")
return True
log.debug("Received L3 checksum: %s", l3_chksum)
del p[IP].chksum
p = p.__class__(bytes(p))
log.debug("Calculated L3 checksum: %s", p[IP].chksum)
if p[IP].chksum == l3_chksum:
return True
return False
def verify_l4_checksum(self, p, proto):
try:
l4_chksum = p[proto].chksum
del p[proto].chksum
except AttributeError:
l4_chksum = p[proto].cksum
del p[proto].cksum
log.debug("Received L4 checksum: %s", l4_chksum)
p = p.__class__(bytes(p))
try:
calc_l4_chksum = p[proto].chksum
except AttributeError:
calc_l4_chksum = p[proto].cksum
log.debug("Calculated L4 checksum: %s", calc_l4_chksum)
if calc_l4_chksum == l4_chksum:
return True
return False
def count_tcp(self, p):
try:
proto = p[IP].proto
af = IP
except IndexError:
try:
proto = p[IPv6].nh
af = IPv6
except IndexError:
return 0
if proto == 6:
log.debug("Protocol is TCP")
if self.chksum and not self.checksum(p, TCP):
self.corrupted_pcap.append(p)
if ((af is IPv6) or not p[IP].frag == "MF") and p[TCP].flags == 24:
# count only TCP PUSH ACK packet.
log.debug("Packet is unfagmented and tcp flag is PUSH")
self.filtered_pcap.append(p)
return 1
return 0
def count_udp(self, p):
try:
proto = p[IP].proto
af = IP
except IndexError:
try:
proto = p[IPv6].nh
af = IPv6
except IndexError:
return 0
if proto == 17:
log.debug("Protocol is UDP")
if self.chksum and not self.checksum(p, UDP):
self.corrupted_pcap.append(p)
if af is IPv6 or not p[IP].frag == "MF":
# count only unfragmented packet.
log.debug("Packet is unfagmented")
self.filtered_pcap.append(p)
return 1
return 0
def count_icmp(self, p):
try:
icmp_type = p[ICMP].type
proto = ICMP
except IndexError:
try:
icmp_type = p[IPv6][ICMPv6EchoRequest].type
proto = ICMPv6EchoRequest
except IndexError:
return 0
if (proto is ICMP and icmp_type == 8) or \
(proto is ICMPv6EchoRequest and icmp_type == 128):
# count only ICMP Echo Request
log.debug("ICMP echo request")
self.filtered_pcap.append(p)
if self.chksum and not self.checksum(p, proto):
self.corrupted_pcap.append(p)
return 1
return 0
def run(self):
try:
self.sniff(**self.kwargs)
except socket.error as xxx_todo_changeme:
(code, msg) = xxx_todo_changeme.args
if code != errno.EINTR:
raise
except Exception as err:
log.warn(traceback.format_exc())
finally:
self.sock.close()
self.pcap = plist.PacketList(self.filtered_pcap, "Sniffed")
log.debug("Total packets received: %s", len(self.pcap))
self.update_result(len(self.pcap), len(self.corrupted_pcap))
def update_result(self, recv, corrupt):
result = "Received=%s\nCorrupted=%s" % (recv, corrupt)
fd = open(self.resultsfile, 'w')
fd.write(result)
fd.flush()
fd.close()
def stop(self):
self.capture = False
self.terminate()
self.sock.close()
class ListenerBase(Process):
def __init__(self, sock):
super(ListenerBase, self).__init__()
self.sock = sock
self.listen = True
def run(self):
try:
while self.listen:
pkt = self.sock.recv(MTU)
except socket.error as xxx_todo_changeme1:
(code, msg) = xxx_todo_changeme1.args
if code != errno.EINTR:
raise
def stop(self):
self.listen = False
self.terminate()
self.sock.close()
class UDPListener(ListenerBase):
def __init__(self, ip, port):
af = socket.AF_INET
if is_v6(ip):
af = socket.AF_INET6
sock = socket.socket(af, socket.SOCK_DGRAM)
sock.bind((ip, int(port)))
super(UDPListener, self).__init__(sock)
class TCPListener(ListenerBase):
def __init__(self, ip, port):
af = socket.AF_INET
if is_v6(ip):
af = socket.AF_INET6
sock = socket.socket(af, socket.SOCK_STREAM)
sock.bind((ip, int(port)))
sock.listen(1)
super(TCPListener, self).__init__(sock)
def run(self):
while self.listen:
conn, address = self.sock.accept()
# self.sock.recv(MTU)
class PktListener(object):
def __init__(self, params):
self.profile_name = params.name
self.profile = load(params.profile)
self.stream = self.profile.stream
log.debug("Profile: %s", self.profile.__dict__)
log.debug("Stream: %s", self.stream.__dict__)
log.debug("Stream L3: %s", self.stream.l3.__dict__)
if self.stream.l4 is not None:
log.debug("Stream L4: %s", self.stream.l4.__dict__)
self.create_listener()
self.create_sniffer()
self.pcap = 0
def _join(self, *args):
return " ".join(args)
def _make_filter(self):
capfilter = ''
proto = self.stream.get_l4_proto()
if proto:
proto = 'icmp6' if proto == 'icmpv6' else proto
capfilter = self._join(capfilter, proto)
if hasattr(self.stream.l4, 'dport'):
capfilter = self._join(
capfilter, "port", str(self.stream.l4.dport))
return capfilter
def create_listener(self):
if self.profile.listener:
listen_at = self.profile.listener
else:
listen_at = self.stream.l3.dst
self.listener = None
if self.stream.get_l4_proto() == 'tcp':
self.listener = TCPListener(listen_at, self.stream.l4.dport)
elif self.stream.get_l4_proto() == 'udp':
self.listener = UDPListener(listen_at, self.stream.l4.dport)
if self.listener:
self.listener.daemon = 1
def _standard_traffic(self):
count = self.profile.count
return count
def _burst_traffic(self):
count = self.profile.burst_count * self.profile.count
return count
def _continuous_traffic(self):
pass
def create_sniffer(self):
kwargs = {}
if self.profile.iface:
kwargs.update({'iface': self.profile.iface})
if not self.profile.capfilter:
capfilter = self._make_filter()
else:
capfilter = self.profile.capfilter
kwargs.update({'filter': capfilter})
if (isinstance(self.profile, ContinuousProfile) or
isinstance(self.profile, ContinuousSportRange)):
self._continuous_traffic()
elif isinstance(self.profile, BurstProfile):
kwargs.update({'count': self._burst_traffic()})
elif isinstance(self.profile, StandardProfile):
kwargs.update({'count': self._standard_traffic()})
if self.profile.stopper:
kwargs.update({'stopper': self.profile.stopper})
if self.profile.timeout:
kwargs.update({'timeout': self.profile.timeout})
if self.profile.chksum:
kwargs.update({'chksum': self.profile.chksum})
self.sniffer = CaptureBase(self.profile_name, **kwargs)
self.sniffer.daemon = 1
def start(self):
# Set the signal handler
signal.signal(signal.SIGTERM, self.handler)
try:
if self.listener:
self.listener.start()
self.sniffer.start()
self.sniffer.join()
except Exception as err:
log.warn(traceback.format_exc())
finally:
self.stop()
def stop(self):
try:
self.sniffer.stop()
if self.listener:
self.listener.stop()
except:
pass
finally:
self.pcap = len(self.sniffer.pcap)
def handler(self, signum, frame):
self.stop()
class ListenerArgParser(object):
def parse(self):
parser = OptionParser()
parser.add_option("-n", "--name",
dest="name",
help="Name for this traffic profile.")
parser.add_option("-p", "--profile",
dest="profile",
help="Traffic profile to be used to receive packets.")
parser.add_option("-S", "--stop",
dest="stop",
action="store_true",
default=False,
help="Stop this traffic listener.")
parser.add_option("-P", "--poll",
dest="poll",
action="store_true",
default=False,
help="poll for packets recieved at traffic listener.")
opts, args = parser.parse_args()
return opts
|
the-stack_106_15252
|
import sys
sys.path.insert(1, '../Phase1')
from features_images import FeaturesImages
import misc
import os
from pathlib import Path
from PCA import PCAModel
from SVD import SVD
from NMF import NMFModel
from LDA import LDAModel
class Decomposition:
def __init__(self, decomposition_name='', k_components=10, feature_extraction_model_name=None, test_folder_path=None,
metadata_images_list=None, metadata_label=None):
self.decomposition_name = decomposition_name
self.k_components = k_components
self.decomposition_model = None
self.feature_extraction_model_name = feature_extraction_model_name
self.test_folder_path = test_folder_path
self.feature_extraction_object = FeaturesImages(self.feature_extraction_model_name, self.test_folder_path)
self.feature_extraction_model = self.feature_extraction_object.get_model()
self.database_matrix = []
self.database_image_id = []
self.reduced_pickle_file_folder = os.path.join(os.path.dirname(__file__), 'pickle_files')
self.metadata_images_list = metadata_images_list
self.metadata_label = metadata_label or ''
self.set_database_matrix()
def set_database_matrix(self):
parent_directory_path = Path(os.path.dirname(__file__)).parent
pickle_file_directory = os.path.join(parent_directory_path, 'Phase1')
print('Getting the Model Features from Phase1')
self.feature_extraction_object.compute_features_images_folder()
database_images_features = misc.load_from_pickle(pickle_file_directory, self.feature_extraction_model_name)
if self.metadata_images_list is not None:
print("Taking images based on metadata")
for image_id in self.metadata_images_list:
self.database_matrix.append(database_images_features[image_id])
self.database_image_id.append(image_id)
else:
for image_id, feature_vector in database_images_features.items():
self.database_matrix.append(feature_vector)
self.database_image_id.append(image_id)
def dimensionality_reduction(self):
# self.set_database_matrix()
# Note : when we have number of images <=20 or features <=20 , we are getting an error
# this is because the database_matrix has <=20 images and the reduction models,
# should have n_components parameters <= n,m
# Hence, we have to take the min(min(len(self.database_matrix[0]),len(self.database_matrix)),20)
if self.decomposition_name == 'PCA':
self.decomposition_model = PCAModel(self.database_matrix, self.k_components, self.database_image_id)
elif self.decomposition_name == 'SVD':
self.decomposition_model = SVD(self.database_matrix, self.k_components, self.database_image_id)
elif self.decomposition_name == 'NMF':
self.decomposition_model = NMFModel(self.database_matrix, self.k_components, self.database_image_id)
elif self.decomposition_name == 'LDA':
self.decomposition_model = LDAModel(self.database_matrix, self.k_components, self.database_image_id)
self.decomposition_model.decompose()
print('Decomposition Complete')
decomposed_database_matrix = self.decomposition_model.get_decomposed_data_matrix()
reduced_dimension_folder_images_dict = {}
for image_id, reduced_feature_vector in zip(self.database_image_id, decomposed_database_matrix):
reduced_dimension_folder_images_dict[image_id] = reduced_feature_vector
if self.metadata_label != '':
misc.save2pickle(reduced_dimension_folder_images_dict, self.reduced_pickle_file_folder,
feature=(self.feature_extraction_model_name+'_'+self.decomposition_name+
'_' + self.metadata_label))
else:
misc.save2pickle(reduced_dimension_folder_images_dict, self.reduced_pickle_file_folder,
feature=(self.feature_extraction_model_name + '_' + self.decomposition_name))
|
the-stack_106_15254
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
@testing.parameterize(*(testing.product({
'contiguous': ['C', None],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
}) + testing.product({
'contiguous': [None],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
})))
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestConvolution2DFunction(testing.FunctionTestCase):
def setUp(self):
self.batches = 2
self.in_channels_a_group = 3
self.out_channels_a_group = 2
self.in_channels = self.in_channels_a_group * self.groups
self.out_channels = self.out_channels_a_group * self.groups
self.kh, self.kw = (3, 3)
self.stride = 2
self.pad = (
int(self.kh / 2) * self.dilate, int(self.kw / 2) * self.dilate)
self.check_forward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.check_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.check_double_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
if numpy.float16 in (self.x_dtype, self.W_dtype):
self.check_forward_options.update({
'atol': 1e-3, 'rtol': 1e-2
})
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3
})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2
})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
W = numpy.random.normal(
0, numpy.sqrt(1. / (self.kh * self.kw * self.in_channels_a_group)),
(self.out_channels, self.in_channels_a_group, self.kh, self.kw)
).astype(self.W_dtype)
x = numpy.random.uniform(
-1, 1, (self.batches, self.in_channels, 4, 3)).astype(self.x_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(
-1, 1, self.out_channels).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
"""
Current forward_expected implementation depends on
F.convolution_2d itself and thus it's only capable
of checking consistency between backends, not absolute
correctness of computations
"""
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
with chainer.using_config('use_ideep', 'never'):
y_expected = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
return y_expected.array,
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
return F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups),
@testing.parameterize(*(testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False, True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
}) + testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [2],
'groups': [1, 2],
})))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
self.x = cuda.cupy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto')
if self.dilate > 1 and cuda.cuda.cudnn.getVersion() < 6000:
self.should_call_cudnn = False
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.convolution_2d(x, W, None, stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'nobias': [True, False],
'groups': [1, 2],
}))
@attr.gpu
@attr.cudnn
class TestConvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels_a_group = 64
out_channels_a_group = 64
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (16, 64)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
self.should_call_cudnn = True
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def test_called(self):
with testing.patch(
'cupy.cudnn.convolution_backward_filter', autospec=True) as f:
# cuDNN version >= v3 supports `cudnn_deterministic` option
self._run()
# in Convolution2DFunction.backward_gpu()
assert f.called == self.should_call_cudnn
def test_cudnn_deterministic(self):
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, x_data, W_data, b_data, gy_data):
if not self.c_contiguous:
x_data = numpy.asfortranarray(x_data)
W_data = numpy.asfortranarray(W_data)
gy_data = numpy.asfortranarray(gy_data)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(gy_data.flags.c_contiguous)
b = numpy.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
return x_data, W_data, b_data, gy_data
def _run(self):
with chainer.using_config('use_cudnn', 'always'):
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = tuple(
cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
y = F.convolution_2d(x, W, b, stride=self.stride, pad=self.pad,
cover_all=False, groups=self.groups)
return x, W, b, y
class TestConvolution2DBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_2d(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
class TestConvolution2DInvalidDilation(unittest.TestCase):
n_batches = 2
in_channels = 3
out_channels = 2
dilate = 0
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
def check_invalid_dilation(self, x_data, w_data):
x = chainer.Variable(x_data)
w = chainer.Variable(w_data)
F.convolution_2d(x, w, dilate=self.dilate)
def test_invalid_dilation_cpu(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'never'):
self.check_invalid_dilation(x, w)
@attr.ideep
def test_invalid_dilation_cpu_ideep(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'always'):
self.check_invalid_dilation(x, w)
@attr.gpu
def test_invalid_dilation_gpu(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'never'):
self.check_invalid_dilation(x, w)
@attr.cudnn
def test_invalid_dilation_gpu_cudnn(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'always'):
self.check_invalid_dilation(x, w)
testing.run_module(__name__, __file__)
|
the-stack_106_15255
|
# libs
from django.db import connections
# local
from membership.management.integrity.runner import register, output_errors
__all__ = [
'address_and_member_match',
'users_deleted_from_member',
]
@register
def address_and_member_match(file=None):
"""
Check that the member_id of a User match the member_id of the User's Address
"""
results = dict()
membership_db = connections['membership']
with membership_db.cursor() as cursor:
cursor.execute('SELECT COUNT(*) FROM "user"')
results['records_inspected'] = cursor.fetchone()[0]
cursor.execute("""
SELECT "user".id, "user".member_id, "user".address_id FROM "user"
LEFT JOIN address ON "user".address_id = address.id
LEFT JOIN member ON ("user".member_id = member.id AND address.member_id = member.id)
WHERE
member.id IS NULL
AND "user".deleted IS NULL
AND address.deleted IS NULL
AND member.deleted IS NULL
ORDER BY
"user".id
""")
invalid_records = cursor.fetchall()
results['errors_found'] = len(invalid_records)
if len(invalid_records) > 0:
error_header = (
'The following Users have an invalid combination of Member and Address IDs:\n'
' ID | Member | Address\n'
)
output_errors(file, error_header, invalid_records)
return results
@register
def users_deleted_from_member(file=None):
"""
Check that if a Member is deleted, the Users in that Member are also deleted
"""
results = dict()
membership_db = connections['membership']
with membership_db.cursor() as cursor:
cursor.execute('SELECT COUNT(*) FROM "user"')
results['records_inspected'] = cursor.fetchone()[0]
cursor.execute("""
SELECT "user".id, member_id from "user"
LEFT JOIN member ON "user".member_id = member.id
WHERE
"user".deleted IS NULL
AND member.deleted IS NOT NULL
ORDER BY
"user".id
""")
not_deleted_users = cursor.fetchall()
results['errors_found'] = len(not_deleted_users)
if len(not_deleted_users) > 0:
error_header = (
'The following Users were not deleted even though their Member have been deleted:\n'
' User | Member \n'
)
output_errors(file, error_header, not_deleted_users)
return results
|
the-stack_106_15256
|
"""
Showcases cylindrical and spherical colour models computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box("Cylindrical & Spherical Colour Models")
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
message_box(
f'Converting to the "HSV" colourspace from given "RGB" colourspace '
f"values:\n\n\t{RGB}"
)
print(colour.RGB_to_HSV(RGB))
print("\n")
HSV = np.array([0.99603944, 0.93246304, 0.45620519])
message_box(
f'Converting to the "RGB" colourspace from given "HSV" colourspace '
f"values:\n\n\t{HSV}"
)
print(colour.HSV_to_RGB(HSV))
print("\n")
message_box(
f'Converting to the "HSL" colourspace from given "RGB" colourspace '
f"values:\n\n\t{RGB}"
)
print(colour.RGB_to_HSL(RGB))
print("\n")
HSL = np.array([0.99603944, 0.87347144, 0.24350795])
message_box(
f'Converting to the "RGB" colourspace from given "HSL" colourspace '
f"values:\n\n\t{HSL}"
)
print(colour.HSL_to_RGB(HSL))
print("\n")
message_box(
f'Converting to the "HCL" colourspace from given "RGB" colourspace '
f"values:\n\n\t{RGB}"
)
print(colour.RGB_to_HCL(RGB))
print("\n")
HCL = np.array([0.99603944, 0.87347144, 0.24350795])
message_box(
f'Converting to the "RGB" colourspace from given "HCL" colourspace '
f"values:\n\n\t{HCL}"
)
print(colour.HCL_to_RGB(HCL))
print("\n")
|
the-stack_106_15258
|
# -*- coding: latin-1 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import os
import sys
from app.curses_util import *
import app.ci_program
import app.fake_curses_testing
import app.prefs
kTestFile = u'#application_test_file_with_unlikely_file_name~'
class ApplicationTestCases(app.fake_curses_testing.FakeCursesTestCase):
def setUp(self):
self.longMessage = True
if os.path.isfile(kTestFile):
os.unlink(kTestFile)
self.assertFalse(os.path.isfile(kTestFile))
app.fake_curses_testing.FakeCursesTestCase.setUp(self)
def test_bracketed_paste(self):
self.runWithTestFile(kTestFile, [
self.displayCheck(2, 7, [u" "]),
curses.ascii.ESC, app.curses_util.BRACKETED_PASTE_BEGIN,
u't', u'e',
225, 186, 191, # Send an "" in utf-8.
u't',
curses.ascii.ESC, app.curses_util.BRACKETED_PASTE_END,
self.displayCheck(2, 7, [u'te\u1ebft ']),
CTRL_Q, u'n'])
def test_backspace(self):
self.runWithTestFile(kTestFile, [
self.displayCheck(2, 7, [u" "]), self.writeText(u"tex"),
self.displayCheck(2, 7, [u"tex "]), KEY_BACKSPACE1, u"t",
self.displayCheck(2, 7, [u"tet "]), CTRL_Q, u"n"])
def test_cursor_moves(self):
self.runWithTestFile(kTestFile, [
self.displayCheck(0, 0, [
u" ci . ",
u" ",
u" 1 "]),
self.cursorCheck(2, 7),
self.writeText(u'test\napple\norange'),
self.cursorCheck(4, 13),
self.selectionCheck(2, 6, 0, 0, 0),
KEY_UP, self.cursorCheck(3, 12), self.selectionCheck(1, 5, 0, 0, 0),
KEY_UP, self.cursorCheck(2, 11), self.selectionCheck(0, 4, 0, 0, 0),
KEY_UP, self.cursorCheck(2, 7), self.selectionCheck(0, 0, 0, 0, 0),
KEY_RIGHT, KEY_RIGHT, KEY_RIGHT, KEY_RIGHT,
KEY_LEFT, self.cursorCheck(2, 10), self.selectionCheck(0, 3, 0, 0, 0),
KEY_LEFT, self.cursorCheck(2, 9), self.selectionCheck(0, 2, 0, 0, 0),
KEY_DOWN, self.cursorCheck(3, 9), self.selectionCheck(1, 2, 0, 0, 0),
KEY_DOWN, self.cursorCheck(4, 9), self.selectionCheck(2, 2, 0, 0, 0),
KEY_RIGHT, self.cursorCheck(4, 10), self.selectionCheck(2, 3, 0, 0, 0),
KEY_DOWN, self.cursorCheck(4, 13), self.selectionCheck(2, 6, 0, 0, 0),
KEY_HOME, self.cursorCheck(4, 7), self.selectionCheck(2, 0, 0, 0, 0),
KEY_END, self.cursorCheck(4, 13), self.selectionCheck(2, 6, 0, 0, 0),
KEY_SHIFT_UP, self.cursorCheck(3, 12), self.selectionCheck(1, 5, 2, 6, 3),
KEY_SHIFT_LEFT, self.cursorCheck(3, 11), self.selectionCheck(1, 4, 2, 6, 3),
KEY_SHIFT_RIGHT, self.cursorCheck(3, 12), self.selectionCheck(1, 5, 2, 6, 3),
KEY_SHIFT_RIGHT, self.cursorCheck(4, 7), self.selectionCheck(2, 0, 2, 6, 3),
KEY_SHIFT_RIGHT, self.cursorCheck(4, 8), self.selectionCheck(2, 1, 2, 6, 3),
CTRL_Q, u'n']);
def test_cursor_select_first_line(self):
self.runWithTestFile(kTestFile, [
self.cursorCheck(2, 7),
self.writeText(u'test\napple\norange'),
self.cursorCheck(4, 13), self.selectionCheck(2, 6, 0, 0, 0),
KEY_SHIFT_UP,
self.cursorCheck(3, 12), self.selectionCheck(1, 5, 2, 6, 3),
KEY_SHIFT_UP,
self.cursorCheck(2, 11), self.selectionCheck(0, 4, 2, 6, 3),
# Regression test: shift down past the end of the document should select
# to end of document (i.e. end of line).
KEY_SHIFT_UP,
self.cursorCheck(2, 7), self.selectionCheck(0, 0, 2, 6, 3),
# Same for non-selection.
KEY_DOWN, KEY_END,
self.cursorCheck(3, 12), self.selectionCheck(1, 5, 0, 0, 0),
KEY_UP,
self.cursorCheck(2, 11), self.selectionCheck(0, 4, 0, 0, 0),
KEY_UP,
self.cursorCheck(2, 7), self.selectionCheck(0, 0, 0, 0, 0),
# The goalCol should track the desired goal column.
KEY_DOWN,
self.cursorCheck(3, 12), self.selectionCheck(1, 5, 0, 0, 0),
CTRL_Q, u'n']);
def test_cursor_select_last_line(self):
self.runWithTestFile(kTestFile, [
self.cursorCheck(2, 7),
self.writeText(u'test\napple\norange'),
self.cursorCheck(4, 13), self.selectionCheck(2, 6, 0, 0, 0),
CTRL_G, u't',
self.cursorCheck(2, 7), self.selectionCheck(0, 0, 0, 0, 0),
KEY_SHIFT_DOWN,
self.cursorCheck(3, 7), self.selectionCheck(1, 0, 0, 0, 3),
KEY_SHIFT_DOWN,
self.cursorCheck(4, 7), self.selectionCheck(2, 0, 0, 0, 3),
# Regression test: shift down past the end of the document should select
# to end of document (i.e. end of line).
KEY_SHIFT_DOWN,
self.cursorCheck(4, 13), self.selectionCheck(2, 6, 0, 0, 3),
# Same for non-selection.
KEY_UP, KEY_HOME,
self.cursorCheck(3, 7), self.selectionCheck(1, 0, 2, 6, 0),
KEY_DOWN,
self.cursorCheck(4, 7), self.selectionCheck(2, 0, 2, 6, 0),
KEY_DOWN,
self.cursorCheck(4, 13), self.selectionCheck(2, 6, 2, 6, 0),
# The goalCol should track the desired goal column.
KEY_UP,
self.cursorCheck(3, 7), self.selectionCheck(1, 0, 2, 6, 0),
CTRL_Q, u'n']);
def test_ctrl_cursor_moves(self):
self.runWithTestFile(kTestFile, [
self.displayCheck(0, 0, [
u" ci . ",
u" ",
u" 1 "]),
self.cursorCheck(2, 7),
self.writeText(u'test\napple bananaCarrot DogElephantFrog\norange'),
self.cursorCheck(4, 13),
self.selectionCheck(2, 6, 0, 0, 0),
KEY_CTRL_LEFT, self.cursorCheck(4, 7), self.selectionCheck(2, 0, 0, 0, 0),
KEY_CTRL_SHIFT_RIGHT, self.cursorCheck(4, 13), self.selectionCheck(2, 6, 2, 0, 3),
KEY_CTRL_SHIFT_LEFT, self.cursorCheck(4, 7), self.selectionCheck(2, 0, 2, 0, 3),
KEY_CTRL_SHIFT_LEFT, self.cursorCheck(3, 38), self.selectionCheck(1, 34, 2, 0, 3),
KEY_CTRL_SHIFT_LEFT, self.cursorCheck(3, 23), self.selectionCheck(1, 19, 2, 0, 3),
KEY_CTRL_SHIFT_LEFT, self.cursorCheck(3, 22), self.selectionCheck(1, 18, 2, 0, 3),
KEY_CTRL_RIGHT, self.cursorCheck(3, 23), self.selectionCheck(1, 19, 1, 18, 0),
CTRL_Q, u'n']);
def test_select_line(self):
#self.setMovieMode(True)
self.runWithTestFile(kTestFile, [
self.displayCheck(0, 0, [
u" ci . ",
u" ",
u" 1 "]),
self.cursorCheck(2, 7),
self.writeText(u'test\napple\norange'),
self.cursorCheck(4, 13),
self.selectionCheck(2, 6, 0, 0, 0),
CTRL_L,
self.selectionCheck(2, 6, 2, 0, 4),
self.displayCheckStyle(0, 0, 1, len(u" ci "), app.prefs.color[u'logo']),
KEY_UP,
self.selectionCheck(1, 5, 2, 6, 0),
CTRL_L,
self.selectionCheck(2, 0, 1, 0, 4),
CTRL_L,
self.selectionCheck(2, 6, 1, 0, 4),
self.addMouseInfo(0, 2, 10, curses.BUTTON1_PRESSED),
curses.KEY_MOUSE,
self.selectionCheck(2, 6, 0, 3, 2),
CTRL_Q, u'n']);
def test_select_line_via_line_numbers(self):
self.runWithTestFile(kTestFile, [
self.displayCheck(0, 0, [
u" ci . ",
u" ",
u" 1 "]),
self.cursorCheck(2, 7),
u'a', u'b', u'c', CTRL_J, u'd', u'e', CTRL_J, u'f', u'g', u'h', u'i',
self.cursorCheck(4, 11),
self.addMouseInfo(0, 3, 2, curses.BUTTON1_PRESSED),
curses.KEY_MOUSE,
CTRL_L,
CTRL_Q, u'n']);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.