filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_25859 | from pathlib import Path
import pytest
from poetry.core.factory import Factory
from poetry.core.toml import TOMLFile
fixtures_dir = Path(__file__).parent / "fixtures"
def test_create_poetry():
poetry = Factory().create_poetry(fixtures_dir / "sample_project")
package = poetry.package
assert package.name == "my-package"
assert package.version.text == "1.2.3"
assert package.description == "Some description."
assert package.authors == ["Sébastien Eustace <[email protected]>"]
assert package.license.id == "MIT"
assert (
package.readmes[0].relative_to(fixtures_dir).as_posix()
== "sample_project/README.rst"
)
assert package.homepage == "https://python-poetry.org"
assert package.repository_url == "https://github.com/python-poetry/poetry"
assert package.keywords == ["packaging", "dependency", "poetry"]
assert package.python_versions == "~2.7 || ^3.6"
assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0"
dependencies = {}
for dep in package.requires:
dependencies[dep.name] = dep
cleo = dependencies["cleo"]
assert cleo.pretty_constraint == "^0.6"
assert not cleo.is_optional()
pendulum = dependencies["pendulum"]
assert pendulum.pretty_constraint == "branch 2.0"
assert pendulum.is_vcs()
assert pendulum.vcs == "git"
assert pendulum.branch == "2.0"
assert pendulum.source == "https://github.com/sdispater/pendulum.git"
assert pendulum.allows_prereleases()
assert not pendulum.develop
tomlkit = dependencies["tomlkit"]
assert tomlkit.pretty_constraint == "rev 3bff550"
assert tomlkit.is_vcs()
assert tomlkit.vcs == "git"
assert tomlkit.rev == "3bff550"
assert tomlkit.source == "https://github.com/sdispater/tomlkit.git"
assert tomlkit.allows_prereleases()
assert not tomlkit.develop
requests = dependencies["requests"]
assert requests.pretty_constraint == "^2.18"
assert not requests.is_vcs()
assert not requests.allows_prereleases()
assert requests.is_optional()
assert requests.extras == frozenset({"security"})
pathlib2 = dependencies["pathlib2"]
assert pathlib2.pretty_constraint == "^2.2"
assert pathlib2.python_versions == ">=2.7 <2.8"
assert not pathlib2.is_optional()
demo = dependencies["demo"]
assert demo.is_file()
assert not demo.is_vcs()
assert demo.name == "demo"
assert demo.pretty_constraint == "*"
demo = dependencies["my-package"]
assert not demo.is_file()
assert demo.is_directory()
assert not demo.is_vcs()
assert demo.name == "my-package"
assert demo.pretty_constraint == "*"
simple_project = dependencies["simple-project"]
assert not simple_project.is_file()
assert simple_project.is_directory()
assert not simple_project.is_vcs()
assert simple_project.name == "simple-project"
assert simple_project.pretty_constraint == "*"
functools32 = dependencies["functools32"]
assert functools32.name == "functools32"
assert functools32.pretty_constraint == "^3.2.3"
assert (
str(functools32.marker)
== 'python_version ~= "2.7" and sys_platform == "win32" or python_version in'
' "3.4 3.5"'
)
dataclasses = dependencies["dataclasses"]
assert dataclasses.name == "dataclasses"
assert dataclasses.pretty_constraint == "^0.7"
assert dataclasses.python_versions == ">=3.6.1 <3.7"
assert (
str(dataclasses.marker)
== 'python_full_version >= "3.6.1" and python_version < "3.7"'
)
assert "db" in package.extras
classifiers = package.classifiers
assert classifiers == [
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
assert package.all_classifiers == [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def test_create_poetry_with_packages_and_includes():
poetry = Factory().create_poetry(
fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include"
)
package = poetry.package
assert package.packages == [
{"include": "extra_dir/**/*.py"},
{"include": "extra_dir/**/*.py"},
{"include": "my_module.py"},
{"include": "package_with_include"},
{"include": "tests", "format": "sdist"},
{"include": "for_wheel_only", "format": ["wheel"]},
{"include": "src_package", "from": "src"},
]
assert package.include == [
{"path": "extra_dir/vcs_excluded.txt", "format": []},
{"path": "notes.txt", "format": []},
]
def test_create_poetry_with_multi_constraints_dependency():
poetry = Factory().create_poetry(
fixtures_dir / "project_with_multi_constraints_dependency"
)
package = poetry.package
assert len(package.requires) == 2
def test_validate():
complete = TOMLFile(fixtures_dir / "complete.toml")
content = complete.read()["tool"]["poetry"]
assert Factory.validate(content) == {"errors": [], "warnings": []}
def test_validate_fails():
complete = TOMLFile(fixtures_dir / "complete.toml")
content = complete.read()["tool"]["poetry"]
content["this key is not in the schema"] = ""
expected = (
"Additional properties are not allowed "
"('this key is not in the schema' was unexpected)"
)
assert Factory.validate(content) == {"errors": [expected], "warnings": []}
def test_strict_validation_success_on_multiple_readme_files():
with_readme_files = TOMLFile(fixtures_dir / "with_readme_files" / "pyproject.toml")
content = with_readme_files.read()["tool"]["poetry"]
assert Factory.validate(content, strict=True) == {"errors": [], "warnings": []}
def test_strict_validation_fails_on_readme_files_with_unmatching_types():
with_readme_files = TOMLFile(fixtures_dir / "with_readme_files" / "pyproject.toml")
content = with_readme_files.read()["tool"]["poetry"]
content["readme"][0] = "README.md"
assert Factory.validate(content, strict=True) == {
"errors": [
"Declared README files must be of same type: found text/markdown,"
" text/x-rst"
],
"warnings": [],
}
def test_create_poetry_fails_on_invalid_configuration():
with pytest.raises(RuntimeError) as e:
Factory().create_poetry(
Path(__file__).parent / "fixtures" / "invalid_pyproject" / "pyproject.toml"
)
expected = """\
The Poetry configuration is invalid:
- 'description' is a required property
"""
assert str(e.value) == expected
def test_create_poetry_omits_dev_dependencies_iff_with_dev_is_false():
poetry = Factory().create_poetry(fixtures_dir / "sample_project", with_groups=False)
assert not any("dev" in r.groups for r in poetry.package.all_requires)
poetry = Factory().create_poetry(fixtures_dir / "sample_project")
assert any("dev" in r.groups for r in poetry.package.all_requires)
def test_create_poetry_fails_with_invalid_dev_dependencies_iff_with_dev_is_true():
with pytest.raises(ValueError) as err:
Factory().create_poetry(fixtures_dir / "project_with_invalid_dev_deps")
assert "does not exist" in str(err.value)
Factory().create_poetry(
fixtures_dir / "project_with_invalid_dev_deps", with_groups=False
)
|
the-stack_106_25860 | import os
from os import path
from setuptools import find_packages, setup
VERSION = "0.5.1"
INSTALL_REQUIRES = [
"Django>=2.2.9,<3",
"bleach==3.1.4",
"bleach-whitelist>=0.0.10",
"cryptography>=2.7",
"django-after-response>=0.2.2",
"django-bootstrap4>=0.0.7",
"djangorestframework>=3.9.2",
"emoji-data-python==1.1.0",
"jsonfield>=2.0.2",
"markdown2>=2.3.7",
"python-slugify>=1.2.6",
"slackclient>=1.3,<2",
"statuspageio>=0.0.1",
]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# load README.md
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="django-incident-response",
version=VERSION,
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(exclude="demo"),
install_requires=INSTALL_REQUIRES,
package_dir={"response": "response"},
python_requires=">3.6",
include_package_data=True,
license="MIT License", # example license
description="A real-time incident response and reporting tool",
url="https://github.com/monzo/response",
author="Chris Evans",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
)
|
the-stack_106_25861 | """
Test the session save feature
"""
import os
import tempfile
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SessionSaveTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def raw_transcript_builder(self, cmd, res):
raw = "(lldb) " + cmd + "\n"
if res.GetOutputSize():
raw += res.GetOutput()
if res.GetErrorSize():
raw += res.GetError()
return raw
@skipIfWindows
@no_debug_info_test
def test_session_save(self):
raw = ""
interpreter = self.dbg.GetCommandInterpreter()
settings = [
'settings set interpreter.echo-commands true',
'settings set interpreter.echo-comment-commands true',
'settings set interpreter.stop-command-source-on-error false'
]
for setting in settings:
interpreter.HandleCommand(setting, lldb.SBCommandReturnObject())
inputs = [
'# This is a comment', # Comment
'help session', # Valid command
'Lorem ipsum' # Invalid command
]
for cmd in inputs:
res = lldb.SBCommandReturnObject()
interpreter.HandleCommand(cmd, res)
raw += self.raw_transcript_builder(cmd, res)
self.assertTrue(interpreter.HasCommands())
self.assertTrue(len(raw) != 0)
# Check for error
cmd = 'session save /root/file'
interpreter.HandleCommand(cmd, res)
self.assertFalse(res.Succeeded())
raw += self.raw_transcript_builder(cmd, res)
tf = tempfile.NamedTemporaryFile()
output_file = tf.name
res = lldb.SBCommandReturnObject()
interpreter.HandleCommand('session save ' + output_file, res)
self.assertTrue(res.Succeeded())
raw += self.raw_transcript_builder(cmd, res)
with open(output_file, "r") as file:
content = file.read()
# Exclude last line, since session won't record it's own output
lines = raw.splitlines()[:-1]
for line in lines:
self.assertIn(line, content)
td = tempfile.TemporaryDirectory()
res = lldb.SBCommandReturnObject()
interpreter.HandleCommand('settings set interpreter.save-session-directory ' + td.name, res)
self.assertTrue(res.Succeeded())
res = lldb.SBCommandReturnObject()
interpreter.HandleCommand('session save', res)
self.assertTrue(res.Succeeded())
raw += self.raw_transcript_builder(cmd, res)
with open(os.path.join(td.name, os.listdir(td.name)[0]), "r") as file:
content = file.read()
# Exclude last line, since session won't record it's own output
lines = raw.splitlines()[:-1]
for line in lines:
self.assertIn(line, content)
@skipIfWindows
@no_debug_info_test
def test_session_save_on_quit(self):
raw = ""
interpreter = self.dbg.GetCommandInterpreter()
td = tempfile.TemporaryDirectory()
settings = [
'settings set interpreter.echo-commands true',
'settings set interpreter.echo-comment-commands true',
'settings set interpreter.stop-command-source-on-error false',
'settings set interpreter.save-session-on-quit true',
'settings set interpreter.save-session-directory ' + td.name,
]
for setting in settings:
res = lldb.SBCommandReturnObject()
interpreter.HandleCommand(setting, res)
raw += self.raw_transcript_builder(setting, res)
self.dbg.Destroy(self.dbg)
with open(os.path.join(td.name, os.listdir(td.name)[0]), "r") as file:
content = file.read()
# Exclude last line, since session won't record it's own output
lines = raw.splitlines()[:-1]
for line in lines:
self.assertIn(line, content)
|
the-stack_106_25863 | import torch
from torch.nn import Linear as Lin
from torch.nn import ReLU
from torch.nn import Sequential as Seq
from torch_geometric.nn import GlobalAttention
def test_global_attention():
channels, batch_size = (32, 10)
gate_nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, 1))
nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, channels))
glob = GlobalAttention(gate_nn, nn)
assert glob.__repr__() == (
'GlobalAttention(gate_nn=Sequential(\n'
' (0): Linear(in_features=32, out_features=32, bias=True)\n'
' (1): ReLU()\n'
' (2): Linear(in_features=32, out_features=1, bias=True)\n'
'), nn=Sequential(\n'
' (0): Linear(in_features=32, out_features=32, bias=True)\n'
' (1): ReLU()\n'
' (2): Linear(in_features=32, out_features=32, bias=True)\n'
'))')
x = torch.randn((batch_size**2, channels))
batch = torch.arange(batch_size, dtype=torch.long)
batch = batch.view(-1, 1).repeat(1, batch_size).view(-1)
assert glob(x, batch).size() == (batch_size, channels)
assert glob(x, batch, batch_size + 1).size() == (batch_size + 1, channels)
|
the-stack_106_25864 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
from typing import Any
from typing import List
from typing import Tuple
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlespeech.s2t.decoders.scorers.scorer_interface import BatchScorerInterface
from paddlespeech.s2t.models.lm_interface import LMInterface
from paddlespeech.s2t.modules.encoder import TransformerEncoder
from paddlespeech.s2t.modules.mask import subsequent_mask
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
class TransformerLM(nn.Layer, LMInterface, BatchScorerInterface):
def __init__(self,
n_vocab: int,
pos_enc: str=None,
embed_unit: int=128,
att_unit: int=256,
head: int=2,
unit: int=1024,
layer: int=4,
dropout_rate: float=0.5,
emb_dropout_rate: float=0.0,
att_dropout_rate: float=0.0,
tie_weights: bool=False,
**kwargs):
nn.Layer.__init__(self)
if pos_enc == "sinusoidal":
pos_enc_layer_type = "abs_pos"
elif pos_enc is None:
pos_enc_layer_type = "no_pos"
else:
raise ValueError(f"unknown pos-enc option: {pos_enc}")
self.embed = nn.Embedding(n_vocab, embed_unit)
if emb_dropout_rate == 0.0:
self.embed_drop = None
else:
self.embed_drop = nn.Dropout(emb_dropout_rate)
self.encoder = TransformerEncoder(
input_size=embed_unit,
output_size=att_unit,
attention_heads=head,
linear_units=unit,
num_blocks=layer,
dropout_rate=dropout_rate,
attention_dropout_rate=att_dropout_rate,
input_layer="linear",
pos_enc_layer_type=pos_enc_layer_type,
concat_after=False,
static_chunk_size=1,
use_dynamic_chunk=False,
use_dynamic_left_chunk=False)
self.decoder = nn.Linear(att_unit, n_vocab)
logger.info("Tie weights set to {}".format(tie_weights))
logger.info("Dropout set to {}".format(dropout_rate))
logger.info("Emb Dropout set to {}".format(emb_dropout_rate))
logger.info("Att Dropout set to {}".format(att_dropout_rate))
if tie_weights:
assert (
att_unit == embed_unit
), "Tie Weights: True need embedding and final dimensions to match"
self.decoder.weight = self.embed.weight
def _target_mask(self, ys_in_pad):
ys_mask = ys_in_pad != 0
m = subsequent_mask(paddle.shape(ys_mask)[-1]).unsqueeze(0)
return ys_mask.unsqueeze(-2) & m
def forward(self, x: paddle.Tensor, t: paddle.Tensor
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Compute LM loss value from buffer sequences.
Args:
x (paddle.Tensor): Input ids. (batch, len)
t (paddle.Tensor): Target ids. (batch, len)
Returns:
tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
batch_size = paddle.shape(x)[0]
xm = x != 0
xlen = xm.sum(axis=1)
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(x))
else:
emb = self.embed(x)
h, _ = self.encoder(emb, xlen)
y = self.decoder(h)
loss = F.cross_entropy(
y.view(-1, paddle.shape(y)[-1]), t.view(-1), reduction="none")
mask = xm.to(loss.dtype)
logp = loss * mask.view(-1)
nll = logp.view(batch_size, -1).sum(-1)
nll_count = mask.sum(-1)
logp = logp.sum()
count = mask.sum()
return logp / count, logp, count, nll, nll_count
# beam search API (see ScorerInterface)
def score(self, y: paddle.Tensor, state: Any,
x: paddle.Tensor) -> Tuple[paddle.Tensor, Any]:
"""Score new token.
Args:
y (paddle.Tensor): 1D paddle.int64 prefix tokens.
state: Scorer state for prefix tokens
x (paddle.Tensor): encoder feature that generates ys.
Returns:
tuple[paddle.Tensor, Any]: Tuple of
paddle.float32 scores for next token (n_vocab)
and next state for ys
"""
y = y.unsqueeze(0)
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(y))
else:
emb = self.embed(y)
h, _, cache = self.encoder.forward_one_step(
emb, self._target_mask(y), cache=state)
h = self.decoder(h[:, -1])
logp = F.log_softmax(h).squeeze(0)
return logp, cache
# batch beam search API (see BatchScorerInterface)
def batch_score(self,
ys: paddle.Tensor,
states: List[Any],
xs: paddle.Tensor) -> Tuple[paddle.Tensor, List[Any]]:
"""Score new token batch (required).
Args:
ys (paddle.Tensor): paddle.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (paddle.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[paddle.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = len(self.encoder.encoders)
if states[0] is None:
batch_state = None
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
paddle.stack([states[b][i] for b in range(n_batch)])
for i in range(n_layers)
]
if self.embed_drop is not None:
emb = self.embed_drop(self.embed(ys))
else:
emb = self.embed(ys)
# batch decoding
h, _, states = self.encoder.forward_one_step(
emb, self._target_mask(ys), cache=batch_state)
h = self.decoder(h[:, -1])
logp = F.log_softmax(h)
# transpose state of [layer, batch] into [batch, layer]
state_list = [[states[i][b] for i in range(n_layers)]
for b in range(n_batch)]
return logp, state_list
if __name__ == "__main__":
tlm = TransformerLM(
n_vocab=5002,
pos_enc=None,
embed_unit=128,
att_unit=512,
head=8,
unit=2048,
layer=16,
dropout_rate=0.5, )
# n_vocab: int,
# pos_enc: str=None,
# embed_unit: int=128,
# att_unit: int=256,
# head: int=2,
# unit: int=1024,
# layer: int=4,
# dropout_rate: float=0.5,
# emb_dropout_rate: float = 0.0,
# att_dropout_rate: float = 0.0,
# tie_weights: bool = False,):
paddle.set_device("cpu")
model_dict = paddle.load("transformerLM.pdparams")
tlm.set_state_dict(model_dict)
tlm.eval()
#Test the score
input2 = np.array([5])
input2 = paddle.to_tensor(input2)
state = None
output, state = tlm.score(input2, state, None)
input3 = np.array([5, 10])
input3 = paddle.to_tensor(input3)
output, state = tlm.score(input3, state, None)
input4 = np.array([5, 10, 0])
input4 = paddle.to_tensor(input4)
output, state = tlm.score(input4, state, None)
print("output", output)
"""
#Test the batch score
batch_size = 2
inp2 = np.array([[5], [10]])
inp2 = paddle.to_tensor(inp2)
output, states = tlm.batch_score(
inp2, [(None,None,0)] * batch_size)
inp3 = np.array([[100], [30]])
inp3 = paddle.to_tensor(inp3)
output, states = tlm.batch_score(
inp3, states)
print("output", output)
#print("cache", cache)
#np.save("output_pd.npy", output)
"""
|
the-stack_106_25866 | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
# nn.LSTM(input dimension, output layer dimension, layer count)
# batch_first = True : input data 의 첫번째 차원을 batch_size로 맞춰주기 위함
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set initial hidden and cell states
# input x : (BATCH, LENGTH, INPUT_SIZE)
# 최초의 hidden state와 cell state를 초기화
# batch_size를 직접적으로 표시하지 않고 x.size(0) 이렇게 표현해야 함
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0))
# out: tensor of shape (batch_size, seq_length, hidden_size) tensor
# (hn, cn)은 필요 없으므로 받지 않고 _로 처리
# Decode the hidden state of the last time step
# 28 time step을 갖기 때문에, 최종적인 output에서도 마지막 time step의 output만 가져오면 됨
out = self.fc(out[:, -1, :])
return out
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
the-stack_106_25867 | from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
from pandas._config import config as cf
from pandas._libs import missing as libmissing
from pandas._libs.tslibs import iNaT, is_null_datetimelike
from pandas.core.dtypes.common import is_scalar
from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype
from pandas.core.dtypes.missing import (
array_equivalent,
isna,
isnull,
na_value_for_dtype,
notna,
notnull,
)
import pandas as pd
from pandas import DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range
import pandas._testing as tm
now = pd.Timestamp.now()
utcnow = pd.Timestamp.now("UTC")
@pytest.mark.parametrize("notna_f", [notna, notnull])
def test_notna_notnull(notna_f):
assert notna_f(1.0)
assert not notna_f(None)
assert not notna_f(np.NaN)
with cf.option_context("mode.use_inf_as_na", False):
assert notna_f(np.inf)
assert notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_na", True):
assert not notna_f(np.inf)
assert not notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_na", False):
for s in [
tm.makeFloatSeries(),
tm.makeStringSeries(),
tm.makeObjectSeries(),
tm.makeTimeSeries(),
tm.makePeriodSeries(),
]:
assert isinstance(notna_f(s), Series)
class TestIsNA:
def test_0d_array(self):
assert isna(np.array(np.nan))
assert not isna(np.array(0.0))
assert not isna(np.array(0))
# test object dtype
assert isna(np.array(np.nan, dtype=object))
assert not isna(np.array(0.0, dtype=object))
assert not isna(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isna(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("isna_f", [isna, isnull])
def test_isna_isnull(self, isna_f):
assert not isna_f(1.0)
assert isna_f(None)
assert isna_f(np.NaN)
assert float("nan")
assert not isna_f(np.inf)
assert not isna_f(-np.inf)
# type
assert not isna_f(type(pd.Series(dtype=object)))
assert not isna_f(type(pd.Series(dtype=np.float64)))
assert not isna_f(type(pd.DataFrame()))
# series
for s in [
tm.makeFloatSeries(),
tm.makeStringSeries(),
tm.makeObjectSeries(),
tm.makeTimeSeries(),
tm.makePeriodSeries(),
]:
assert isinstance(isna_f(s), Series)
# frame
for df in [
tm.makeTimeDataFrame(),
tm.makePeriodFrame(),
tm.makeMixedDataFrame(),
]:
result = isna_f(df)
expected = df.apply(isna_f)
tm.assert_frame_equal(result, expected)
def test_isna_lists(self):
result = isna([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isna([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isna(["foo", "bar"])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isna(["foo", "bar"])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
# GH20675
result = isna([np.NaN, "world"])
exp = np.array([True, False])
tm.assert_numpy_array_equal(result, exp)
def test_isna_nat(self):
result = isna([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isna(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isna_numpy_nat(self):
arr = np.array(
[
NaT,
np.datetime64("NaT"),
np.timedelta64("NaT"),
np.datetime64("NaT", "s"),
]
)
result = isna(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isna_datetime(self):
assert not isna(datetime.now())
assert notna(datetime.now())
idx = date_range("1/1/1990", periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notna(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isna(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq="M")
mask = isna(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isna(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_isna_old_datetimelike(self):
# isna_old should work for dt64tz, td64, and period, not just tznaive
dti = pd.date_range("2016-01-01", periods=3)
dta = dti._data
dta[-1] = pd.NaT
expected = np.array([False, False, True], dtype=bool)
objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")]
for obj in objs:
with cf.option_context("mode.use_inf_as_na", True):
result = pd.isna(obj)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"value, expected",
[
(np.complex128(np.nan), True),
(np.float64(1), False),
(np.array([1, 1 + 0j, np.nan, 3]), np.array([False, False, True, False])),
(
np.array([1, 1 + 0j, np.nan, 3], dtype=object),
np.array([False, False, True, False]),
),
(
np.array([1, 1 + 0j, np.nan, 3]).astype(object),
np.array([False, False, True, False]),
),
],
)
def test_complex(self, value, expected):
result = isna(value)
if is_scalar(result):
assert result is expected
else:
tm.assert_numpy_array_equal(result, expected)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in [
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
]:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(["1 days", "NaT", "2 days"])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in [
"timedelta64[D]",
"timedelta64[h]",
"timedelta64[m]",
"timedelta64[s]",
"timedelta64[ms]",
"timedelta64[us]",
"timedelta64[ns]",
]:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2012-01"], freq="M")
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
@pytest.mark.parametrize("dtype_equal", [True, False])
def test_array_equivalent(dtype_equal):
assert array_equivalent(
np.array([np.nan, np.nan]), np.array([np.nan, np.nan]), dtype_equal=dtype_equal
)
assert array_equivalent(
np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]),
dtype_equal=dtype_equal,
)
assert array_equivalent(
np.array([np.nan, None], dtype="object"),
np.array([np.nan, None], dtype="object"),
dtype_equal=dtype_equal,
)
# Check the handling of nested arrays in array_equivalent_object
assert array_equivalent(
np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"),
np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"),
dtype_equal=dtype_equal,
)
assert array_equivalent(
np.array([np.nan, 1 + 1j], dtype="complex"),
np.array([np.nan, 1 + 1j], dtype="complex"),
dtype_equal=dtype_equal,
)
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype="complex"),
np.array([np.nan, 1 + 2j], dtype="complex"),
dtype_equal=dtype_equal,
)
assert not array_equivalent(
np.array([np.nan, 1, np.nan]),
np.array([np.nan, 2, np.nan]),
dtype_equal=dtype_equal,
)
assert not array_equivalent(
np.array(["a", "b", "c", "d"]), np.array(["e", "e"]), dtype_equal=dtype_equal
)
assert array_equivalent(
Float64Index([0, np.nan]), Float64Index([0, np.nan]), dtype_equal=dtype_equal
)
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]), dtype_equal=dtype_equal
)
assert array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]), dtype_equal=dtype_equal
)
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]), dtype_equal=dtype_equal
)
assert array_equivalent(
TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]),
dtype_equal=dtype_equal,
)
assert not array_equivalent(
TimedeltaIndex([0, np.nan]),
TimedeltaIndex([1, np.nan]),
dtype_equal=dtype_equal,
)
assert array_equivalent(
DatetimeIndex([0, np.nan], tz="US/Eastern"),
DatetimeIndex([0, np.nan], tz="US/Eastern"),
dtype_equal=dtype_equal,
)
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz="US/Eastern"),
DatetimeIndex([1, np.nan], tz="US/Eastern"),
dtype_equal=dtype_equal,
)
# The rest are not dtype_equal
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern"),
)
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz="CET"),
DatetimeIndex([0, np.nan], tz="US/Eastern"),
)
assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_array_equivalent_different_dtype_but_equal():
# Unclear if this is exposed anywhere in the public-facing API
assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0]))
@pytest.mark.parametrize(
"lvalue, rvalue",
[
# There are 3 variants for each of lvalue and rvalue. We include all
# three for the tz-naive `now` and exclude the datetim64 variant
# for utcnow because it drops tzinfo.
(now, utcnow),
(now.to_datetime64(), utcnow),
(now.to_pydatetime(), utcnow),
(now, utcnow),
(now.to_datetime64(), utcnow.to_pydatetime()),
(now.to_pydatetime(), utcnow.to_pydatetime()),
],
)
def test_array_equivalent_tzawareness(lvalue, rvalue):
# we shouldn't raise if comparing tzaware and tznaive datetimes
left = np.array([lvalue], dtype=object)
right = np.array([rvalue], dtype=object)
assert not array_equivalent(left, right, strict_nan=True)
assert not array_equivalent(left, right, strict_nan=False)
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
n = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
assert array_equivalent(m, n, strict_nan=True)
assert array_equivalent(m, n, strict_nan=False)
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
n = np.array([(1, 2), (4, 3)], dtype=[("a", int), ("b", float)])
assert not array_equivalent(m, n, strict_nan=True)
assert not array_equivalent(m, n, strict_nan=False)
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
n = np.array([(1, 2), (3, 4)], dtype=[("b", int), ("a", float)])
assert not array_equivalent(m, n, strict_nan=True)
assert not array_equivalent(m, n, strict_nan=False)
def test_array_equivalent_str():
for dtype in ["O", "S", "U"]:
assert array_equivalent(
np.array(["A", "B"], dtype=dtype), np.array(["A", "B"], dtype=dtype)
)
assert not array_equivalent(
np.array(["A", "B"], dtype=dtype), np.array(["A", "X"], dtype=dtype)
)
def test_array_equivalent_nested():
# reached in groupby aggregations, make sure we use np.any when checking
# if the comparison is truthy
left = np.array([np.array([50, 70, 90]), np.array([20, 30, 40])], dtype=object)
right = np.array([np.array([50, 70, 90]), np.array([20, 30, 40])], dtype=object)
assert array_equivalent(left, right, strict_nan=True)
assert not array_equivalent(left, right[::-1], strict_nan=True)
left = np.array([np.array([50, 50, 50]), np.array([40, 40, 40])], dtype=object)
right = np.array([50, 40])
assert not array_equivalent(left, right, strict_nan=True)
@pytest.mark.parametrize(
"dtype, na_value",
[
# Datetime-like
(np.dtype("M8[ns]"), NaT),
(np.dtype("m8[ns]"), NaT),
(DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]"), NaT),
(PeriodDtype("M"), NaT),
# Integer
("u1", 0),
("u2", 0),
("u4", 0),
("u8", 0),
("i1", 0),
("i2", 0),
("i4", 0),
("i8", 0),
# Bool
("bool", False),
# Float
("f2", np.nan),
("f4", np.nan),
("f8", np.nan),
# Object
("O", np.nan),
# Interval
(IntervalDtype(), np.nan),
],
)
def test_na_value_for_dtype(dtype, na_value):
result = na_value_for_dtype(dtype)
assert result is na_value
class TestNAObj:
_1d_methods = ["isnaobj", "isnaobj_old"]
_2d_methods = ["isnaobj2d", "isnaobj2d_old"]
def _check_behavior(self, arr, expected):
for method in TestNAObj._1d_methods:
result = getattr(libmissing, method)(arr)
tm.assert_numpy_array_equal(result, expected)
arr = np.atleast_2d(arr)
expected = np.atleast_2d(expected)
for method in TestNAObj._2d_methods:
result = getattr(libmissing, method)(arr)
tm.assert_numpy_array_equal(result, expected)
def test_basic(self):
arr = np.array([1, None, "foo", -5.1, pd.NaT, np.nan])
expected = np.array([False, True, False, False, True, True])
self._check_behavior(arr, expected)
def test_non_obj_dtype(self):
arr = np.array([1, 3, np.nan, 5], dtype=float)
expected = np.array([False, False, True, False])
self._check_behavior(arr, expected)
def test_empty_arr(self):
arr = np.array([])
expected = np.array([], dtype=bool)
self._check_behavior(arr, expected)
def test_empty_str_inp(self):
arr = np.array([""]) # empty but not na
expected = np.array([False])
self._check_behavior(arr, expected)
def test_empty_like(self):
# see gh-13717: no segfaults!
arr = np.empty_like([None])
expected = np.array([True])
self._check_behavior(arr, expected)
m8_units = ["as", "ps", "ns", "us", "ms", "s", "m", "h", "D", "W", "M", "Y"]
na_vals = (
[
None,
NaT,
float("NaN"),
complex("NaN"),
np.nan,
np.float64("NaN"),
np.float32("NaN"),
np.complex64(np.nan),
np.complex128(np.nan),
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
+ [np.datetime64("NaT", unit) for unit in m8_units]
+ [np.timedelta64("NaT", unit) for unit in m8_units]
)
inf_vals = [
float("inf"),
float("-inf"),
complex("inf"),
complex("-inf"),
np.inf,
np.NINF,
]
int_na_vals = [
# Values that match iNaT, which we treat as null in specific cases
np.int64(NaT.value),
int(NaT.value),
]
sometimes_na_vals = [Decimal("NaN")]
never_na_vals = [
# float/complex values that when viewed as int64 match iNaT
-0.0,
np.float64("-0.0"),
-0j,
np.complex64(-0j),
]
class TestLibMissing:
def test_checknull(self):
for value in na_vals:
assert libmissing.checknull(value)
for value in inf_vals:
assert not libmissing.checknull(value)
for value in int_na_vals:
assert not libmissing.checknull(value)
for value in sometimes_na_vals:
assert not libmissing.checknull(value)
for value in never_na_vals:
assert not libmissing.checknull(value)
def test_checknull_old(self):
for value in na_vals:
assert libmissing.checknull_old(value)
for value in inf_vals:
assert libmissing.checknull_old(value)
for value in int_na_vals:
assert not libmissing.checknull_old(value)
for value in sometimes_na_vals:
assert not libmissing.checknull_old(value)
for value in never_na_vals:
assert not libmissing.checknull_old(value)
def test_is_null_datetimelike(self):
for value in na_vals:
assert is_null_datetimelike(value)
assert is_null_datetimelike(value, False)
for value in inf_vals:
assert not is_null_datetimelike(value)
assert not is_null_datetimelike(value, False)
for value in int_na_vals:
assert is_null_datetimelike(value)
assert not is_null_datetimelike(value, False)
for value in sometimes_na_vals:
assert not is_null_datetimelike(value)
assert not is_null_datetimelike(value, False)
for value in never_na_vals:
assert not is_null_datetimelike(value)
|
the-stack_106_25868 | # This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
from application.esdl import esdl
from application.esdl.esh import EnergySystemHandler
class EnergySystemStatistics:
def __init__(self):
self.esh = EnergySystemHandler()
self.es = None
self.area = None
def calculate(self, esdl_string):
self.esh.load_from_string(esdl_string, 'calc_stats')
self.es = self.esh.get_energy_system()
result = dict()
self.get_energysystem_info(result)
instances = self.es.instance
if instances:
inst = instances[0]
if inst:
self.area = inst.area
if self.area:
self.get_all_areas_info(result)
self.get_number_of_assets_info(result)
self.get_power_of_assets_info(result)
return result
def get_energysystem_info(self, res_dict):
res_dict['energysystem'] = {
'es_id': self.es.id,
'es_name': self.es.name,
'es_description': self.es.description,
'inst_id': self.es.instance[0].id,
'inst_name': self.es.instance[0].name
}
def _get_area_info(self, area):
res = {
'id': area.id,
'name': area.name,
'scope': str(area.scope),
'sub_areas': [],
'bld_info': {}
}
for sub_area in area.area:
ar_info = self._get_area_info(sub_area)
res['sub_areas'].append(ar_info)
res['bld_info'] = self._building_statistiscs_per_area(area)
return res
def get_all_areas_info(self, res_dict):
res_dict['areas'] = self._get_area_info(self.area)
def _get_number_of_assets_in_building(self, building, res):
for content in building.eContents:
if isinstance(content, esdl.AbstractBuilding):
self._get_number_of_assets_in_building(content, res)
if isinstance(content, esdl.Asset):
if content.__class__.__name__ in res:
res[content.__class__.__name__]['cnt'] += 1
res[content.__class__.__name__]['aggr_cnt'] += content.aggregationCount
else:
res[content.__class__.__name__] = {'cnt': 1, 'aggr_cnt': content.aggregationCount}
def _get_number_of_assets_in_area(self, area, res):
for content in area.eContents:
if isinstance(content, esdl.Area):
self._get_number_of_assets_in_area(content, res)
if isinstance(content, esdl.AbstractBuilding):
self._get_number_of_assets_in_building(content, res)
if isinstance(content, esdl.Asset):
if content.__class__.__name__ in res:
res[content.__class__.__name__]['cnt'] += 1
res[content.__class__.__name__]['aggr_cnt'] += content.aggregationCount
else:
res[content.__class__.__name__] = {'cnt': 1, 'aggr_cnt': content.aggregationCount}
def get_number_of_assets_info(self, res_dict):
res = dict()
self._get_number_of_assets_in_area(self.area, res)
res_dict['number_of_assets'] = res
def _get_power_of_assets_in_building(self, building, res):
for content in building.eContents:
if isinstance(content, esdl.AbstractBuilding):
self._get_power_of_assets_in_building(content, res)
if isinstance(content, esdl.EnergyAsset):
if 'power' in dir(content):
if content.__class__.__name__ in res:
res[content.__class__.__name__] += content.power
else:
res[content.__class__.__name__] = content.power
def _get_power_of_assets_in_area(self, area, res):
for content in area.eContents:
if isinstance(content, esdl.Area):
self._get_power_of_assets_in_area(content, res)
if isinstance(content, esdl.AbstractBuilding):
self._get_power_of_assets_in_building(content, res)
if isinstance(content, esdl.EnergyAsset):
if 'power' in dir(content):
if content.__class__.__name__ in res:
res[content.__class__.__name__] += content.power
else:
res[content.__class__.__name__] = content.power
def get_power_of_assets_info(self, res_dict):
res = dict()
self._get_power_of_assets_in_area(self.area, res)
res_dict['power_of_assets'] = res
def _building_statistiscs_per_area(self, area):
type_number_area = dict()
for asset in area.asset:
if isinstance(asset, esdl.Building):
for bld_asset in asset.asset:
if isinstance(bld_asset, esdl.BuildingUnit):
bldu_gbd = bld_asset.type
gbds = list()
for gbd in bldu_gbd:
gbds.append(str(gbd))
gbds_str = ",".join(gbds)
bldu_floor_area = bld_asset.floorArea
if gbds_str in type_number_area:
gbd_stats = type_number_area[gbds_str]
else:
gbd_stats = dict()
type_number_area[gbds_str] = gbd_stats
if 'number' in gbd_stats:
gbd_stats['number'] += 1
else:
gbd_stats['number'] = 1
if 'floor_area' in gbd_stats:
gbd_stats['floor_area'] += bldu_floor_area
else:
gbd_stats['floor_area'] = bldu_floor_area
return type_number_area
|
the-stack_106_25871 | import logging
import psycopg2
import psycopg2.extras
import socket
import sys
import time
from cluster_under_test import *
class DbRetriable:
"""
Wrapper around psycopg2, which offers convenient retry functionality.
If connection to postgres is lost during query execution or between
queries, retry with increasing intervals.
Low level functionality: create_connection_with_cursor, run_with_fail, run_with_retry
Here you have access to both connection and cursor objects and can e.g
run multiple inserts with cursor.execute and the commit them together with connection.commit()
More convenient `execute`: run query, commit, return the records. Is
sutable for both: selects and insert/update/delete. Supports auto-retry.
Usage:
db = DbRetriable(dbname="postgres", user="...", password="...")
"""
def __init__(self, **other_connection_args):
"""Saves connection_args so they can be later used for connection retry."""
self.other_connection_args = other_connection_args
self.ntry = 1
def create_connection_with_cursor(self):
"""@returns tuple with connection and cursor"""
# Reresolve the host name on every connection
resolved = ClusterUnderTest.resolve_service_url()
con = psycopg2.connect(host=resolved, **self.other_connection_args)
cur = con.cursor()
return (con, cur)
def run_with_retry(self):
'''
Runs a block until queries succeed.
Generator provides following to the executed block:
* psycopg2.connection object
* psycopg2.cursor object
* number of retries so far
Example:
>>> for (con, cur, ntry) in db.run_with_retry():
... cur.execute("""INSERT INTO testdata(batch, try, name)
... SELECT %s, %s, md5(random()::text)
... FROM generate_series(1,%s);""",
... (self.batch, ntry, self.BATCH_SIZE))
... con.commit()
'''
last_exception = ''
delay = 1
while True:
try:
con, cur = self.create_connection_with_cursor()
yield con, cur, self.ntry
con.commit()
break
except psycopg2.OperationalError as e:
self.ntry +=1
if str(e) == last_exception:
sys.stdout.write('+')
sys.stdout.flush()
else:
last_exception = str(e)
print(e)
time.sleep(delay)
delay = delay if delay > 15 else delay*2
if last_exception != '':
print()
def run_with_fail(self):
"""
Similar API to run_with_retry. but try to connect and run the block only once. Fail on failure.
"""
con, cur = self.create_connection_with_cursor()
yield con, cur, self.ntry
def execute(self, query, params=None, retry=False):
"""
Shortcut to
* run query with params
* with retry if desired and necessary
* commits at the end
* return the dataset as array, if any
Is sutable for both: selects and insert/update/delete
>>> print(db.execute("SELECT count(*) from testdata;")[0])
"""
if retry:
for (con, cur, ntry) in self.run_with_retry():
cur.execute(query, params)
else:
for (con, cur, ntry) in self.run_with_fail():
cur.execute(query, params)
try:
res = cur.fetchall()
except psycopg2.ProgrammingError as ex:
res = None # no results to fetch
con.commit()
return res
|
the-stack_106_25873 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings, os, pickle, argparse, multiprocessing, logging
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import yaml
import recommender_config
warnings.simplefilter('ignore', ConvergenceWarning)
warnings.simplefilter('ignore', FutureWarning)
warnings.simplefilter('ignore', UserWarning)
from recommender.functions import Theta_forecast, Theta_forecast_sktime, Naive_forecast, lr_forecast, autoarima_forecast, KNN_forecast, DT_forecast, VPA_forecast
from recommender.functions import perform_tests
def pando_recommender(y_segment, tree, window, limit = 5):
forecasters = {'theta':Theta_forecast, 'theta-sktime': Theta_forecast_sktime, 'naive': Naive_forecast, 'linear': lr_forecast,'arima': autoarima_forecast, 'kn': KNN_forecast, 'dt': DT_forecast, "vpa": VPA_forecast}
# Check y_segment length
if len(y_segment) < window:
forecast = np.zeros(window)
forecast[-len(y_segment):] = y_segment
prov = np.percentile(forecast, recommender_config.TARGET_PERCENTILE)
return forecast, prov, "warmup"
# get label for segment
tests = perform_tests(y_segment, recommender_config.STAT_THRESHOLD, recommender_config.THETA_THRESHOLD, recommender_config.MAX_CHANGEPOINTS)
label = int("".join(str(int(i)) for i in tests.values()), 2)
print("Trace Behavior Label: {}".format(label))
# get forecaster
rec_name = tree[label]
if type(rec_name) == float and np.isnan(rec_name):
logging.debug("Unseen label: {}. Proceed to apply default VPA".format(label))
rec_name = "vpa"
forecaster = forecasters[rec_name]
print("Trace Forecaster Selected: {}".format(rec_name))
try:
logging.info("Detected label: {}. Forecasting with: {}".format(label, rec_name))
forecast = forecaster(y_segment, window, output=recommender_config.OUTPUT)
except:
logging.warning("Forecast is invalid, proceed to recommend previous usage.")
forecast = y_segment[-window:]
if max(forecast) > recommender_config.LIMIT * max(y_segment):
logging.warning("Forecast is out of limits, proceed to recommend previous usage.")
forecast = y_segment[-window:]
prov = np.percentile(y_segment, recommender_config.TARGET_PERCENTILE)
else:
prov = np.percentile(forecast, recommender_config.TARGET_PERCENTILE)
print("Forecasts: {}".format(forecast))
print("Provision: {}".format(prov))
return forecast, prov, label
def get_all_recommendation(y, tree, window = 20, sight = 240, default_usage=1000):
start, end = 0, window
N = len(y)
prov, forecast = np.zeros(N), np.zeros(N)
labels = []
prov[start:end] = default_usage
forecast[start:end] = default_usage
while end < N:
logging.info("Current segment {}-{}".format(start, end))
forecast_start = end
forecast_end = min(end + window, N)
prev_usage = y[start:end]
if end < sight:
logging.info("Warmup phase")
prov[forecast_start:forecast_end] = np.percentile(prev_usage, recommender_config.TARGET_PERCENTILE)
forecast[forecast_start:forecast_end] = prev_usage
else:
prev_sight = y[end - sight:end]
forecast_segment, rec, label = pando_recommender(prev_sight, tree, forecast_end - forecast_start)
forecast[forecast_start:forecast_end] = forecast_segment
labels.append(label)
prov[forecast_start:forecast_end] = rec
start = start + window
end = min(end + window, N)
return forecast, prov, labels
def convert_cputicks2mlcores(trace):
return trace / (15 * 10 ** 6)
def plot_trace(trace, forecast=None, recommended_requests=None, plt_name="trace", y_label="CPU (milicores)", trace_legend="CPU Usage"):
trace_len = len(trace)
trace_idx = np.arange(trace_len) * 15
if forecast is not None and recommended_requests is not None:
trace_pd = pd.DataFrame({trace_legend: trace, "VPA recommended request": recommended_requests, "Forecast": forecast}, index=trace_idx)
elif forecast is not None:
trace_pd = pd.DataFrame({trace_legend: trace, "Forecast": forecast}, index=trace_idx)
elif recommended_requests is not None:
trace_pd = pd.DataFrame({trace_legend: trace, "VPA recommended request": recommended_requests}, index=trace_idx)
else:
trace_pd = pd.DataFrame({trace_legend: trace}, index=trace_idx)
ax = trace_pd.plot()
ax.set_xlabel("Time (seconds)")
ax.set_ylabel(y_label)
ax.legend(loc='upper right')
plt.savefig("./imgs/" + plt_name + ".pdf")
plt.savefig("./imgs/" + plt_name + ".png")
plt.show()
def main():
global data_file, output_folder, config_file
data = pickle.load(open(data_file, "rb"))
if multiproc:
pool_obj = multiprocessing.Pool(processes=None)
results = pool_obj.starmap(get_all_recommendation, zip(data, np.repeat(recommender_config.TREE, len(data))))
result_dict = {}
for i in range(len(data)):
result_dict[i] = {"forecast": results[i][0], "prov": results[i][1], "labels": results[i][2]}
pickle.dump(result_dict, open(output_folder+ "/forecast_prov_{}".format(os.path.basename(data_file)), "wb"))
else:
result_dict = {}
for i, trace in enumerate(data):
logging.info("Processing trace {}".format(i))
if "synthetic" not in data_file:
trace = convert_cputicks2mlcores(trace[:600])
forecast, prov, labels = get_all_recommendation(trace[:600], recommender_config.TREE, window=recommender_config.FORECASTING_WINDOW, sight=recommender_config.FORECASTING_SIGHT)
result_dict[i] = {"forecast": forecast, "prov": prov, "labels": labels}
plot_trace(trace[300:600], forecast[300:600], prov[300:600], plt_name="trace_{}".format(i), y_label="CPU (milicores)", trace_legend="CPU Usage")
pickle.dump(result_dict, open(output_folder+ "/pando_forecast_prov_{}".format(os.path.basename(data_file)), "wb"))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('datafile', help="Data file")
parser.add_argument('configfile', help="Tree file")
parser.add_argument('outfolder', help="Output folder")
parser.add_argument('-mp', "--multiproc", action='store_true', help='Compute recommendations in parallel')
args = parser.parse_args()
data_file = args.datafile
config_file = args.configfile
output_folder = args.outfolder
multiproc = args.multiproc
logging.info("Starting Pando VPA")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
main()
|
the-stack_106_25875 | # pylint: skip-file
import inspect
from typing import List, Optional
import lark
from WDL.Error import SourcePosition
from WDL import Error as Err
from WDL import Tree as D
from WDL import Type as T
from WDL import Expr as E
common_grammar = r"""
?literal: "true"-> boolean_true
| "false" -> boolean_false
| INT -> int
| SIGNED_INT -> int
| FLOAT -> float
| SIGNED_FLOAT -> float
?string: string1 | string2
STRING_INNER1: ("\\'"|/[^']/)
ESCAPED_STRING1: "'" STRING_INNER1* "'"
string_literal: ESCAPED_STRING | ESCAPED_STRING1
?map_key: literal | string
map_kv: map_key ":" expr
// WDL declarations
_quant: optional | nonempty | optional_nonempty
optional: "?"
nonempty: "+"
optional_nonempty: "+?"
unbound_decl: type CNAME -> decl
bound_decl: type CNAME "=" expr -> decl
?any_decl: unbound_decl | bound_decl
// WDL task commands: with {} and <<< >>> command and ${} and ~{} placeholder styles
!?placeholder_key: "default" | "false" | "true" | "sep"
?placeholder_value: string_literal
| INT -> int
| FLOAT -> float
placeholder_option: placeholder_key "=" placeholder_value
placeholder: placeholder_option* expr
?command: command1 | command2
// task meta/parameter_meta sections (effectively JSON)
meta_object: "{" [meta_kv (","? meta_kv)*] "}"
meta_kv: CNAME ":" meta_value
?meta_value: literal | string_literal
| meta_object
| "[" [meta_value ("," meta_value)*] "]" -> meta_array
META_KIND.2: "meta" | "parameter_meta" | "runtime" // .2 ensures higher priority than CNAME
meta_section: META_KIND meta_object
// task runtime section (key-expression pairs)
runtime_section: "runtime" "{" [runtime_kv (","? runtime_kv)*] "}"
runtime_kv: CNAME ":" expr
// WDL tasks
input_decls: "input" "{" any_decl* "}"
output_decls: "output" "{" bound_decl* "}"
?task_sections1: input_decls
| output_decls
| meta_section
| runtime_section
| any_decl+ -> noninput_decls
?task_sections2: input_decls
| output_decls
| meta_section
| runtime_section
task: "task" CNAME "{" task_sections1* command task_sections2* "}"
tasks: task*
// WDL workflows
namespaced_ident: CNAME ("." CNAME)*
call_input: CNAME "=" expr
call_inputs: "input" ":" [call_input ("," call_input)*] ","?
?call_body: "{" call_inputs? "}"
call: "call" namespaced_ident call_body? -> call
| "call" namespaced_ident "as" CNAME call_body? -> call_as
?inner_workflow_element: bound_decl | call | scatter | conditional
scatter: "scatter" "(" CNAME "in" expr ")" "{" inner_workflow_element* "}"
conditional: "if" "(" expr ")" "{" inner_workflow_element* "}"
?workflow_element: any_decl | call | scatter | conditional | meta_section
workflow: "workflow" CNAME "{" input_decls? workflow_element* workflow_outputs? meta_section?"}"
// WDL document: version, imports, tasks and (at most one) workflow
version: "version" /[^ \t\r\n]+/
import_alias: "alias" CNAME "as" CNAME
import_doc: "import" string_literal ["as" CNAME] import_alias*
document: version? document_element*
| version? document_element*
COMMENT: "#" /[^\r\n]*/ NEWLINE
%import common.INT
%import common.SIGNED_INT
%import common.FLOAT
%import common.SIGNED_FLOAT
%import common.CNAME
%import common.ESCAPED_STRING
%import common.WS
%import common.NEWLINE
%ignore WS
%ignore COMMENT
// WDL expressions
?expr: expr_infix
?expr_infix: expr_infix0
?expr_infix0: expr_infix0 "||" expr_infix1 -> lor
| expr_infix1
?expr_infix1: expr_infix1 "&&" expr_infix2 -> land
| expr_infix2
?expr_infix2: expr_infix2 "==" expr_infix3 -> eqeq
| expr_infix2 "!=" expr_infix3 -> neq
| expr_infix2 "<=" expr_infix3 -> lte
| expr_infix2 ">=" expr_infix3 -> gte
| expr_infix2 "<" expr_infix3 -> lt
| expr_infix2 ">" expr_infix3 -> gt
| expr_infix3
?expr_infix3: expr_infix3 "+" expr_infix4 -> add
| expr_infix3 "-" expr_infix4 -> sub
| expr_infix4
?expr_infix4: expr_infix4 "*" expr_infix5 -> mul
| expr_infix4 "/" expr_infix5 -> div
| expr_infix4 "%" expr_infix5 -> rem
| expr_infix5
?expr_infix5: expr_core
// expression core (everything but infix)
// we stuck this last down here so that further language-version-specific
// productions can be added below
?expr_core: "(" expr ")"
| literal
| string
| "!" expr -> negate
| "[" [expr ("," expr)*] ","? "]" -> array
| expr_core "[" expr "]" -> at
| "(" expr "," expr ")" -> pair
| "{" [map_kv ("," map_kv)*] "}" -> map
| "if" expr "then" expr "else" expr -> ifthenelse
| CNAME "(" [expr ("," expr)*] ")" -> apply
| CNAME -> left_name
| expr_core "." CNAME -> get_name
"""
# pre-1.0 specific productions:
# - predefined types only
# - interpolated strings and { } and <<< >>> command styles all have placeholders delimited by ${ }
# - workflow outputs can be bare identifiers rather than complete decls
productions_pre_1_0 = r"""
// WDL types
type: BUILTIN_TYPE _quant?
| BUILTIN_TYPE "[" type ["," type] "]" _quant?
BUILTIN_TYPE.2: "Int" | "Float" | "Boolean" | "String" | "File" | "Array" | "Map" | "Pair"
// string (single-quoted)
STRING1_CHAR: "\\'" | /[^'$]/ | /\$[^{$']/
STRING1_FRAGMENT: STRING1_CHAR+
string1: /'/ (STRING1_FRAGMENT? /\$/* "${" expr "}")* STRING1_FRAGMENT? /\$/* /'/ -> string
// string (double-quoted)
STRING2_CHAR: "\\\"" | /[^"$]/ | /\$[^{$"]/
STRING2_FRAGMENT: STRING2_CHAR+
string2: /"/ (STRING2_FRAGMENT? /\$/* "${" expr "}")* STRING2_FRAGMENT? /\$/* /"/ -> string
COMMAND1_CHAR: /[^$}]/ | /\$[^{$]/
COMMAND1_FRAGMENT: COMMAND1_CHAR+
command1: "command" "{" (COMMAND1_FRAGMENT? /\$/* "${" placeholder "}")* COMMAND1_FRAGMENT? /\$/* "}" -> command
COMMAND2_CHAR: /[^$>]/ | /\$[^{$]/ | />[^>]/ | />>[^>]/
COMMAND2_FRAGMENT: COMMAND2_CHAR+
command2: "command" "<<<" (COMMAND2_FRAGMENT? /\$/* "${" placeholder "}")* COMMAND2_FRAGMENT? /\$/* ">>>" -> command
?workflow_outputs: "output" "{" workflow_output_decls "}"
workflow_output_decls: workflow_output_decl*
?workflow_output_decl: bound_decl | namespaced_ident | workflow_wildcard_output
workflow_wildcard_output: namespaced_ident "." "*" | namespaced_ident ".*"
?document_element: import_doc | task | workflow
"""
# 1.0+ productions:
# - types can be any CNAME (structs)
# - within interpolated strings and { } task commands, placeholders may be delimited by ${ } or ~{ }
# - within <<< >>> commands, placeholders are delimited by ~{ } only
# - workflow outputs are complete decls
# - struct type definitions
# - struct literals (as object literals)
productions_1_0 = r"""
| "object" "{" [object_kv ("," object_kv)*] "}" -> obj // appends to expr_core
object_kv: CNAME ":" expr
| string_literal ":" expr
// WDL types
type: CNAME _quant?
| CNAME "[" type ["," type] "]" _quant?
_EITHER_DELIM.2: "~{" | "${"
// string (single-quoted)
STRING1_CHAR: "\\'" | /[^'~$]/ | /\$[^{$~']/ | /\~[^{$~']/
STRING1_FRAGMENT: STRING1_CHAR+
string1: /'/ (STRING1_FRAGMENT? /\$/* /\~/* _EITHER_DELIM expr "}")* STRING1_FRAGMENT? /\$/* /\~/* /'/ -> string
// string (double-quoted)
STRING2_CHAR: "\\\"" | /[^"~$]/ | /\$[^{$~"]/ | /~[^{$~"]/
STRING2_FRAGMENT: STRING2_CHAR+
string2: /"/ (STRING2_FRAGMENT? /\$/* /\~/* _EITHER_DELIM expr "}")* STRING2_FRAGMENT? /\$/* /\~/* /"/ -> string
COMMAND1_CHAR: /[^~$}]/ | /\$[^{$~]/ | /~[^{$~]/
COMMAND1_FRAGMENT: COMMAND1_CHAR+
command1: "command" "{" (COMMAND1_FRAGMENT? /\$/* /\~/* _EITHER_DELIM placeholder "}")* COMMAND1_FRAGMENT? /\$/* /\~/* "}" -> command
COMMAND2_CHAR: /[^~>]/ | /~[^{~]/ | />[^>]/ | />>[^>]/
COMMAND2_FRAGMENT: COMMAND2_CHAR+
command2: "command" "<<<" (COMMAND2_FRAGMENT? /\~/? "~{" placeholder "}")* COMMAND2_FRAGMENT? /\~/* ">>>" -> command
?workflow_outputs: output_decls
// struct definitions
struct: "struct" CNAME "{" unbound_decl* "}"
?document_element: import_doc | task | workflow | struct
"""
_keywords = "Array Float Int Map None Pair String alias as call command else false if import input left meta object output parameter_meta right runtime scatter struct task then true workflow".split(
" "
)
def _grammar_for_version(version: Optional[str]) -> str:
if version == "draft-2":
return common_grammar + productions_pre_1_0
return common_grammar + productions_1_0
# memoize Lark parsers constructed for version & start symbol
_lark_cache = {}
def parse(txt: str, start: str, version: Optional[str] = None) -> lark.Tree:
if (version, start) not in _lark_cache:
_lark_cache[(version, start)] = lark.Lark(
_grammar_for_version(version), start=start, parser="lalr", propagate_positions=True
)
return _lark_cache[(version, start)].parse(txt)
def sp(filename, meta) -> SourcePosition:
return SourcePosition(
filename=filename,
line=meta.line,
column=meta.column,
end_line=meta.end_line,
end_column=meta.end_column,
)
def to_int(x):
return int(x)
def to_float(x):
return float(x)
# Transformer from lark.Tree to WDL.Expr
class _ExprTransformer(lark.Transformer):
# pylint: disable=no-self-use,unused-argument
def __init__(self, file: str) -> None:
self.filename = file
def boolean_true(self, items, meta) -> E.Base:
return E.Boolean(sp(self.filename, meta), True)
def boolean_false(self, items, meta) -> E.Base:
return E.Boolean(sp(self.filename, meta), False)
def int(self, items, meta) -> E.Base:
assert len(items) == 1
return E.Int(sp(self.filename, meta), to_int(items[0]))
def float(self, items, meta) -> E.Base:
assert len(items) == 1
return E.Float(sp(self.filename, meta), to_float(items[0]))
def string(self, items, meta) -> E.Base:
parts = []
for item in items:
if isinstance(item, E.Base):
parts.append(E.Placeholder(item.pos, {}, item))
else:
parts.append(item.value)
assert len(parts) >= 2
assert parts[0] in ['"', "'"]
assert parts[-1] in ['"', "'"]
return E.String(sp(self.filename, meta), parts)
def string_literal(self, items, meta):
assert len(items) == 1
assert items[0].value.startswith('"') or items[0].value.startswith("'")
return str.encode(items[0].value[1:-1]).decode("unicode_escape")
def array(self, items, meta) -> E.Base:
return E.Array(sp(self.filename, meta), items)
def apply(self, items, meta) -> E.Base:
assert len(items) >= 1
assert not items[0].startswith("_") # TODO enforce in grammar
return E.Apply(sp(self.filename, meta), items[0], items[1:])
def negate(self, items, meta) -> E.Base:
return E.Apply(sp(self.filename, meta), "_negate", items)
def at(self, items, meta) -> E.Base:
return E.Apply(sp(self.filename, meta), "_at", items)
def pair(self, items, meta) -> E.Base:
assert len(items) == 2
return E.Pair(sp(self.filename, meta), items[0], items[1])
def map_kv(self, items, meta):
assert len(items) == 2
return (items[0], items[1])
def map(self, items, meta) -> E.Base:
return E.Map(sp(self.filename, meta), items)
def object_kv(self, items, meta):
assert len(items) == 2
k = items[0]
assert isinstance(k, str), k
assert isinstance(items[1], E.Base)
return (k, items[1])
def obj(self, items, meta) -> E.Base:
return E.Struct(sp(self.filename, meta), items)
def ifthenelse(self, items, meta) -> E.Base:
assert len(items) == 3
return E.IfThenElse(sp(self.filename, meta), *items)
def left_name(self, items, meta) -> E.Base:
assert len(items) == 1 and isinstance(items[0], str)
return E.Get(sp(self.filename, meta), E._LeftName(sp(self.filename, meta), items[0]), None)
def get_name(self, items, meta) -> E.Base:
assert len(items) == 2 and isinstance(items[0], E.Base) and isinstance(items[1], str)
return E.Get(sp(self.filename, meta), items[0], items[1])
# _ExprTransformer infix operators
for op in [
"land",
"lor",
"add",
"sub",
"mul",
"div",
"rem",
"eqeq",
"neq",
"lt",
"lte",
"gt",
"gte",
]:
def fn(self, items, meta, op=op):
assert len(items) == 2
return E.Apply(sp(self.filename, meta), "_" + op, items)
setattr(_ExprTransformer, op, lark.v_args(meta=True)(classmethod(fn))) # pyre-fixme
class _TypeTransformer(lark.Transformer):
# pylint: disable=no-self-use,unused-argument
def __init__(self, file: str) -> None:
self.filename = file
def optional(self, items, meta):
return set(["optional"])
def nonempty(self, items, meta):
return set(["nonempty"])
def optional_nonempty(self, items, meta):
return set(["optional", "nonempty"])
def type(self, items, meta):
quantifiers = set()
if len(items) > 1 and isinstance(items[-1], set):
quantifiers = items.pop()
param = items[1] if len(items) > 1 else None
param2 = items[2] if len(items) > 2 else None
if items[0].value == "Array":
if not param or param2:
raise Err.InvalidType(sp(self.filename, meta), "Array must have one type parameter")
if quantifiers - set(["optional", "nonempty"]):
raise Err.ValidationError(
sp(self.filename, meta), "invalid type quantifier(s) for Array"
)
return T.Array(param, "optional" in quantifiers, "nonempty" in quantifiers)
if "nonempty" in quantifiers:
raise Err.InvalidType(
sp(self.filename, meta), "invalid type quantifier(s) for " + items[0].value
)
atomic_types = {
"Int": T.Int,
"Float": T.Float,
"Boolean": T.Boolean,
"String": T.String,
"File": T.File,
}
if items[0].value in atomic_types:
if param or param2:
raise Err.InvalidType(
sp(self.filename, meta), items[0] + " type doesn't accept parameters"
)
return atomic_types[items[0].value]("optional" in quantifiers)
if items[0].value == "Map":
if not (param and param2):
raise Err.InvalidType(sp(self.filename, meta), "Map must have two type parameters")
return T.Map((param, param2), "optional" in quantifiers)
if items[0].value == "Pair":
if not (param and param2):
raise Err.InvalidType(sp(self.filename, meta), "Pair must have two type parameters")
return T.Pair(param, param2, "optional" in quantifiers)
if param or param2:
raise Err.InvalidType(sp(self.filename, meta), "Unexpected type parameter(s)")
return T.StructInstance(items[0].value, "optional" in quantifiers)
def _check_keyword(pos, name):
if name in _keywords:
raise Err.SyntaxError(pos, "unexpected keyword {}".format(name))
class _DocTransformer(_ExprTransformer, _TypeTransformer):
# pylint: disable=no-self-use,unused-argument
def __init__(self, file: str) -> None:
# pylint: disable=super-init-not-called
self.filename = file
def decl(self, items, meta):
_check_keyword(sp(self.filename, meta), items[1].value)
return D.Decl(
sp(self.filename, meta),
items[0],
items[1].value,
(items[2] if len(items) > 2 else None),
)
def input_decls(self, items, meta):
return {"inputs": items}
def noninput_decls(self, items, meta):
return {"decls": items}
def placeholder_option(self, items, meta):
assert len(items) == 2
return (items[0].value, items[1])
def placeholder(self, items, meta):
options = dict(items[:-1])
if len(options.items()) < len(items) - 1:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate options in expression placeholder"
)
return E.Placeholder(sp(self.filename, meta), options, items[-1])
def command(self, items, meta):
parts = []
for item in items:
if isinstance(item, E.Placeholder):
parts.append(item)
else:
parts.append(item.value)
return {"command": E.String(sp(self.filename, meta), parts)}
def output_decls(self, items, meta):
return {"outputs": items}
def meta_kv(self, items, meta):
return (items[0].value, items[1])
def meta_object(self, items, meta):
d = dict()
for k, v in items:
if k in d:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate keys in meta object"
)
d[k] = v
return d
def meta_array(self, items, meta):
return items
def meta_section(self, items, meta):
kind = items[0].value
assert kind in ["meta", "parameter_meta"]
d = dict()
d[kind] = items[1]
return d
def runtime_kv(self, items, meta):
return (items[0].value, items[1])
def runtime_section(self, items, meta):
d = dict()
for k, v in items:
# TODO: restore duplicate check, cf. https://github.com/gatk-workflows/five-dollar-genome-analysis-pipeline/blob/89f11befc13abae97ab8fb1b457731f390c8728d/tasks_pipelines/qc.wdl#L288
# if k in d:
# raise Err.MultipleDefinitions(sp(self.filename, meta), "duplicate keys in runtime section")
d[k] = v
return {"runtime": d}
def task(self, items, meta):
d = {}
for item in items:
if isinstance(item, dict):
for k, v in item.items():
if k in d:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in task"
)
d[k] = v
else:
assert isinstance(item, str)
assert "name" not in d
d["name"] = item.value
_check_keyword(sp(self.filename, meta), d["name"])
return D.Task(
sp(self.filename, meta),
d["name"],
d.get("inputs", None),
d.get("decls", []),
d["command"],
d.get("outputs", []),
d.get("parameter_meta", {}),
d.get("runtime", {}),
d.get("meta", {}),
)
def tasks(self, items, meta):
return items
def namespaced_ident(self, items, meta) -> E.Base:
assert items
return [item.value for item in items]
def call_input(self, items, meta):
return (items[0].value, items[1])
def call_inputs(self, items, meta):
d = dict()
for k, v in items:
if k in d:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate keys in call inputs"
)
d[k] = v
return d
def call(self, items, meta):
return D.Call(
sp(self.filename, meta), items[0], None, items[1] if len(items) > 1 else dict()
)
def call_as(self, items, meta):
_check_keyword(sp(self.filename, meta), items[1].value)
return D.Call(
sp(self.filename, meta),
items[0],
items[1].value,
items[2] if len(items) > 2 else dict(),
)
def scatter(self, items, meta):
_check_keyword(sp(self.filename, meta), items[0].value)
return D.Scatter(sp(self.filename, meta), items[0].value, items[1], items[2:])
def conditional(self, items, meta):
return D.Conditional(sp(self.filename, meta), items[0], items[1:])
def workflow_wildcard_output(self, items, meta):
return items[0] + ["*"]
# return E.Ident(items[0].pos, items[0].namespace + [items[0].name, "*"])
def workflow_output_decls(self, items, meta):
decls = [elt for elt in items if isinstance(elt, D.Decl)]
idents = [elt for elt in items if isinstance(elt, list)]
assert len(decls) + len(idents) == len(items)
return {"outputs": decls, "output_idents": idents, "pos": sp(self.filename, meta)}
def workflow(self, items, meta):
elements = []
inputs = None
outputs = None
output_idents = None
output_idents_pos = None
parameter_meta = None
meta_section = None
for item in items[1:]:
if isinstance(item, dict):
if "inputs" in item:
assert inputs is None
inputs = item["inputs"]
elif "outputs" in item:
if outputs is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in workflow"
)
outputs = item["outputs"]
if "output_idents" in item:
assert output_idents is None
output_idents = item["output_idents"]
output_idents_pos = item["pos"]
elif "meta" in item:
if meta_section is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in workflow"
)
meta_section = item["meta"]
elif "parameter_meta" in item:
if parameter_meta is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in workflow"
)
parameter_meta = item["parameter_meta"]
else:
assert False
elif isinstance(item, (D.Call, D.Conditional, D.Decl, D.Scatter)):
elements.append(item)
else:
assert False
_check_keyword(sp(self.filename, meta), items[0].value)
return D.Workflow(
sp(self.filename, meta),
items[0].value,
inputs,
elements,
outputs,
parameter_meta or dict(),
meta_section or dict(),
output_idents,
output_idents_pos,
)
def struct(self, items, meta):
assert len(items) >= 1
name = items[0]
_check_keyword(sp(self.filename, meta), name)
members = {}
for d in items[1:]:
assert not d.expr
if d.name in members:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate members in struct"
)
members[d.name] = d.type
return D.StructTypeDef(sp(self.filename, meta), name, members)
def import_alias(self, items, meta):
assert len(items) == 2
_check_keyword(sp(self.filename, meta), items[1].value)
return (items[0].value, items[1].value)
def import_doc(self, items, meta):
uri = items[0]
if len(items) > 1 and isinstance(items[1], str):
namespace = items[1].value
else:
namespace = uri
try:
namespace = namespace[namespace.rindex("/") + 1 :]
except ValueError:
pass
if namespace.endswith(".wdl"):
namespace = namespace[:-4]
_check_keyword(sp(self.filename, meta), namespace)
aliases = [p for p in items[1:] if isinstance(p, tuple)]
return D.DocImport(
pos=sp(self.filename, meta), uri=uri, namespace=namespace, aliases=aliases, doc=None
)
def document(self, items, meta):
imports = []
structs = {}
tasks = []
workflow = None
for item in items:
if isinstance(item, D.Task):
tasks.append(item)
elif isinstance(item, D.Workflow):
if workflow is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "Document has multiple workflows"
)
workflow = item
elif isinstance(item, D.StructTypeDef):
if item.name in structs:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "multiple structs named " + item.name
)
structs[item.name] = item
elif isinstance(item, lark.Tree) and item.data == "version":
pass
elif isinstance(item, D.DocImport):
imports.append(item)
else:
assert False
return D.Document(sp(self.filename, meta), imports, structs, tasks, workflow)
# have lark pass the 'meta' with line/column numbers to each transformer method
for _klass in [_ExprTransformer, _TypeTransformer, _DocTransformer]:
for name, method in inspect.getmembers(_klass, inspect.isfunction):
if not name.startswith("_"):
setattr(_klass, name, lark.v_args(meta=True)(method)) # pyre-fixme
def parse_expr(txt: str, version: Optional[str] = None) -> E.Base:
try:
return _ExprTransformer(txt).transform(parse(txt, "expr", version))
except lark.exceptions.UnexpectedInput as exn:
pos = SourcePosition(
filename="(buffer)",
line=getattr(exn, "line", "?"),
column=getattr(exn, "column", "?"),
end_line=getattr(exn, "line", "?"),
end_column=getattr(exn, "column", "?"),
)
raise Err.SyntaxError(pos, str(exn)) from None
except lark.exceptions.VisitError as exn:
raise exn.__context__
def parse_tasks(txt: str, version: Optional[str] = None) -> List[D.Task]:
try:
return _DocTransformer("").transform(parse(txt, "tasks", version))
except lark.exceptions.VisitError as exn:
raise exn.__context__
def parse_document(txt: str, version: Optional[str] = None, uri: str = "") -> D.Document:
if version is None:
# for now assume the version is 1.0 if the first line is "version <number>"
# otherwise draft-2
version = "draft-2"
for line in txt.split("\n"):
line = line.strip()
if line and line[0] != "#":
if line.startswith("version ") and line[8].isdigit():
version = "1.0"
break
if not txt.strip():
return D.Document(
SourcePosition(filename=uri, line=0, column=0, end_line=0, end_column=0),
[],
{},
[],
None,
)
try:
return _DocTransformer(uri).transform(parse(txt, "document", version))
except lark.exceptions.UnexpectedInput as exn:
pos = SourcePosition(
filename=(uri if uri != "" else "(buffer)"),
line=getattr(exn, "line", "?"),
column=getattr(exn, "column", "?"),
end_line=getattr(exn, "line", "?"),
end_column=getattr(exn, "column", "?"),
)
raise Err.SyntaxError(pos, str(exn)) from None
except lark.exceptions.VisitError as exn:
raise exn.__context__
|
the-stack_106_25876 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import enum
# Pip package imports
from flask import Blueprint, current_app, make_response
from flask.json import dumps, JSONEncoder as BaseJSONEncoder
from flask.views import MethodViewType
from flask_restful import Api as BaseApi
from flask_sqlalchemy.model import camel_to_snake_case, Model
# FIXME: This is no longer present
#from marshmallow import MarshalResult
from werkzeug.wrappers import Response
# Internal package imports
from backend.extensions import db
from backend.utils import was_decorated_without_parenthesis
from .constants import CREATE, DELETE, GET, LIST, PATCH, PUT
from .model_resource import ModelResource
from .utils import get_last_param_name
def _get_model_resource_args(args):
bp, model, urls = None, args[0], args[1:]
if isinstance(args[0], Blueprint):
bp, model, urls = args[0], args[1], args[2:]
if not issubclass(model, db.Model):
raise NotImplementedError(
f"The {'second' if bp else 'first'} argument to Api.model_resource"
' must be a database model class')
if not urls:
raise NotImplementedError(
'Api.model_resource requires at least one url argument.')
return bp, model, urls
class Api(BaseApi):
"""Extends :class:`flask_restful.Api` to support integration with
Flask-Marshmallow serializers, along with a few other minor enhancements:
- can register individual view functions ala blueprints, via @api.route()
- supports using flask.jsonify() in resource methods
"""
def __init__(self, name, app=None, prefix='',
default_mediatype='application/json',
decorators=None, catch_all_404s=False,
serve_challenge_on_401=False,
url_part_order='bae', errors=None):
super().__init__(app,
prefix=prefix,
default_mediatype=default_mediatype,
decorators=decorators,
catch_all_404s=catch_all_404s,
serve_challenge_on_401=serve_challenge_on_401,
url_part_order=url_part_order,
errors=errors)
# name prefix for endpoints
self.name = name
# configure a customized output_json function so that we can use
# Flask's current_app.json_encoder setting
self.representations = {
'application/json': output_json,
}
# registry for individual view functions
self._got_registered_once = False
self.deferred_functions = []
# automatic serializer handling
self.deferred_serializers = []
self.serializers = {}
self.serializers_many = {}
self.registered_endpoints = {}
def _init_app(self, app):
super()._init_app(app)
self._got_registered_once = True
# register individual view functions with the app
for deferred in self.deferred_functions:
deferred(app)
# instantiate serializers
for serializer_class in app.serializers.values():
model_name = serializer_class.Meta.model.__name__
self.serializers[model_name] = serializer_class()
self.serializers_many[model_name] = serializer_class(many=True)
# register serializer overrides
for model_name, serializer_class, many in self.deferred_serializers:
if many:
self.serializers_many[model_name] = serializer_class(many=True)
else:
self.serializers[model_name] = serializer_class()
# attach serializers to Resource instances so that they can perform
# automatic deserialization from json requests
for resource, _, _ in self.resources:
model_name = resource.model.__name__
if model_name not in self.serializers:
raise KeyError(
f'Could not find a serializer for the {model_name} model!')
resource.serializer = self.serializers[model_name]
resource.serializer_create = self.serializers[model_name].__class__()
resource.serializer_create.context['is_create'] = True
self._register_json_encoder(app, self.serializers)
def resource(self, *urls, **kwargs):
"""Decorator to wrap a :class:`~flask_restful.Resource` class, adding
it to the api. Parameters are the same as
:meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = Api('api', app)
@api.resource('/foo')
class FooResource(Resource):
def get(self):
return 'Hello, World!'
Overridden to customize the endpoint name
"""
if urls and isinstance(urls[0], Blueprint):
bp = urls[0]
urls = (f"{bp.url_prefix or ''}{url}" for url in urls[1:])
def decorator(cls):
endpoint = self._get_endpoint(cls, kwargs.pop('endpoint', None))
self.add_resource(cls, *urls, endpoint=endpoint, **kwargs)
return cls
return decorator
def model_resource(self, *args, **kwargs):
"""Decorator to wrap a :class:`backend.api.ModelResource` class, adding
it to the api. There are two supported method signatures:
`Api.model_resource(model, *urls, **kwargs)`
and
`Api.model_resource(blueprint, model, *urls, *kwargs)`
Example without blueprint::
from backend.extensions.api import api
from models import User
@api.model_resource(User, '/users', '/users/<int:id>')
class UserResource(Resource):
def get(self, user):
return user
def list(self, users):
return users
Example with blueprint::
from backend.extensions.api import api
from models import User
from views import bp
@api.model_resource(bp, User, '/users', '/users/<int:id>')
class UserResource(Resource):
def get(self, user):
return user
def list(self, users):
return users
"""
bp, model, urls = _get_model_resource_args(args)
if bp:
urls = (f"{bp.url_prefix or ''}{url}" for url in urls)
def decorator(cls):
cls.model = model
kw_endpoint = kwargs.pop('endpoint', None)
if kw_endpoint:
self.registered_endpoints[cls.__name__] = kw_endpoint
endpoint = self._get_endpoint(cls, kw_endpoint)
self.add_resource(cls, *urls, endpoint=endpoint, **kwargs)
return cls
return decorator
def serializer(self, *args, many=False):
"""Decorator to wrap a :class:`~backend.api.ModelSerializer` class,
registering the wrapped serializer as the specific one to use for the
serializer's model.
For example::
from backend.extensions.api import api
from backend.api import ModelSerializer
from models import Foo
@api.serializer # @api.serializer() works too
class FooSerializer(ModelSerializer):
class Meta:
model = Foo
@api.serializer(many=True)
class FooListSerializer(ModelSerializer):
class Meta:
model = Foo
"""
def decorator(serializer_class):
model_name = serializer_class.Meta.model.__name__
self.deferred_serializers.append((model_name, serializer_class, many))
return serializer_class
if was_decorated_without_parenthesis(args):
return decorator(args[0])
return decorator
def route(self, *args, **kwargs):
"""Decorator for registering individual view functions.
Usage without blueprint::
api = Api('api', prefix='/api/v1')
@api.route('/foo') # resulting url: /api/v1/foo
def get_foo():
# do stuff
Usage with blueprint::
api = Api('api', prefix='/api/v1')
team = Blueprint('team', url_prefix='/team')
@api.route(team, '/users') # resulting url: /api/v1/team/users
def users():
# do stuff
"""
bp, url = None, args[0]
if isinstance(args[0], Blueprint):
bp, url = args[0], args[1]
url = f"{bp.url_prefix or ''}{url}"
def decorator(fn):
endpoint = self._get_endpoint(fn, kwargs.pop('endpoint', None))
self.add_url_rule(url, endpoint, fn, **kwargs)
return fn
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **kwargs):
if not rule.startswith('/'):
raise ValueError('URL rule must start with a forward slash (/)')
rule = self.prefix + rule
self.record(
lambda _app: _app.add_url_rule(rule, endpoint, view_func, **kwargs)
)
def record(self, fn):
if self._got_registered_once:
from warnings import warn
warn(Warning('The api was already registered once but is getting'
' modified now. These changes will not show up.'))
self.deferred_functions.append(fn)
def _get_endpoint(self, view_func, endpoint=None, plural=False):
def extract_endpoint(ep):
ep = self.registered_endpoints.get(view_func.__name__, ep)
if isinstance(endpoint, (list, tuple)):
return ep
else:
return ep, None
endpoint, endpoint_plural = extract_endpoint(endpoint)
# Store each endpoint with the view func name
if endpoint:
assert '.' not in endpoint, 'Api endpoints should not contain dots'
if isinstance(endpoint, (list, tuple)):
endpoint = endpoint[1] if plural else endpoint[0]
elif isinstance(view_func, MethodViewType):
endpoint = camel_to_snake_case(view_func.__name__)
if hasattr(view_func, 'model') and plural:
if endpoint_plural:
endpoint = endpoint_plural
else:
plural_model = camel_to_snake_case(view_func.model.__plural__)
endpoint = f'{plural_model}_resource'
else:
endpoint = view_func.__name__
str_c = f'{self.name}.{endpoint}'
return str_c
def _register_json_encoder(self, app, serializers):
BaseEncoderClass = app.json_encoder or BaseJSONEncoder
class JSONEncoder(BaseEncoderClass):
def default(self, o):
if isinstance(o, enum.Enum):
return o.name
if isinstance(o, Model):
model_name = o.__class__.__name__
base_model_name = type(o).__bases__[0].__name__
if model_name in serializers:
return serializers[model_name].dump(o)
elif base_model_name in serializers:
return serializers[base_model_name].dump(o)
return super().default(o)
app.json_encoder = JSONEncoder
def make_response(self, data, *args, **kwargs):
"""Overridden to support returning already-formed Responses unmodified,
as well as automatic serialization of lists of sqlalchemy models
(serialization of individual models is handled by a custom JSONEncoder
class configured in the self._register_json_encoder method)
"""
print("Make response-data: ", data)
# we've already got a response, eg, from jsonify
if isinstance(data, Response):
return (data, *args)
if isinstance(data, (list, tuple)) and len(data) and isinstance(data[0], Model):
model_name = data[0].__class__.__name__
print("Serializer-model_name: ", model_name)
print("self.serializers_many: ", self.serializers_many)
if model_name in self.serializers_many:
data = self.serializers_many[model_name].dump(data)
# we got the result of serializer.dump(obj)
# FIXME: This is no longer present
#if isinstance(data, MarshalResult):
# data = data.data
# we got plain python data types that need to be serialized
return super().make_response(data, *args, **kwargs)
def _register_view(self, app, resource, *urls, **kwargs):
"""Overridden to handle custom method names on ModelResources
"""
if not issubclass(resource, ModelResource) or 'methods' in kwargs:
return super()._register_view(app, resource, *urls, **kwargs)
for url in urls:
endpoint = self._get_endpoint(resource)
http_methods = []
has_last_param = get_last_param_name(url)
if has_last_param:
if ModelResource.has_method(resource, GET):
http_methods += ['GET', 'HEAD']
if ModelResource.has_method(resource, DELETE):
http_methods += ['DELETE']
if ModelResource.has_method(resource, PATCH):
http_methods += ['PATCH']
if ModelResource.has_method(resource, PUT):
http_methods += ['PUT']
else:
endpoint = self._get_endpoint(resource, plural=True)
if ModelResource.has_method(resource, LIST):
http_methods += ['GET', 'HEAD']
if ModelResource.has_method(resource, CREATE):
http_methods += ['POST']
kwargs['endpoint'] = endpoint
super()._register_view(app, resource, url, **kwargs,
methods=http_methods)
def output_json(data, code, headers=None):
"""Replaces Flask-RESTful's default output_json function, using
Flask.json's dumps method instead of the stock Python json.dumps.
Mainly this means we end up using the current app's configured
json_encoder class.
"""
settings = current_app.config.get('RESTFUL_JSON', {})
# If we're in debug mode, and the indent is not set, we set it to a
# reasonable value here.
if current_app.debug:
settings.setdefault('indent', 4)
# always end the json dumps with a new line
# see https://github.com/mitsuhiko/flask/pull/1262
dumped = dumps(data, **settings) + '\n'
response = make_response(dumped, code)
response.headers.extend(headers or {})
return response
|
the-stack_106_25877 | #!/usr/bin/env python
#--coding:utf-8 --
"""
2018-03-08: modified default minPts to 3
2018-03-13: mode added for pre-set parameters
2018-03-26: modified cut option , removed
"""
__author__ = "CAO Yaqiang"
__date__ = ""
__modified__ = ""
__email__ = "[email protected]"
__version__ = "0.93"
#sys library
import os, time, sys, logging, gzip, argparse
#glob settings
#epilog for argparse
EPILOG = "Any bug is welcome reported to [email protected], [email protected]"
def getLogger(fn=None):
"""
Creat the logger system.
"""
#get the current time
date = time.strftime(' %Y-%m-%d', time.localtime(time.time()))
#if fn == None:
# fn = os.getcwd() + "/" + date.strip() + ".log"
#set up logging, both write log info to console and log file
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-6s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=fn,
filemode='a')
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.NOTSET)
return logger
def callSys(cmds, logger=None):
"""
Call systematic commands without return.
"""
for c in cmds:
try:
logger.info(c)
except:
print(c)
try:
os.system(c)
except:
try:
logger.error(c)
except:
print("ERROR!", c)
def cFlush(r):
"""
One line flush.
"""
sys.stdout.write("\r%s" % r)
sys.stdout.flush()
def mainHelp():
"""
Create the command line interface of the main programme for calling loops.
"""
description = """
Intra-chromosomal loops calling for ChIA-PET,HiChIP and high-resolution Hi-C data.
"""
parser = argparse.ArgumentParser(description=description, epilog=EPILOG)
parser.add_argument(
"-f",
dest="fnIn",
required=True,
type=str,
help=
"Input file is mapped PETs, tab-delimited BEDPE format in gzip. Replicates could be input as -f A.bedpe.gz,B.bedpe.gz,C.bedpe.gz. Loops will be called with pooled data."
)
parser.add_argument("-o",
dest="fnOut",
required=True,
type=str,
help="Output prefix.")
parser.add_argument(
"-m",
dest="mode",
required=False,
type=int,
default=0,
choices=[0, 1, 2, 3, 4],
help=
"Pre-set parameters and signicicance cutoff for different types of data. Default is 0, using values from -eps and -minPts. Set 1 for sharp peak like ChIA-PET data (CTCF,RAD21,eg..),set 2 for broad peak like ChIA-PET data (H3K27ac,H3K4me1 eg..), and set 3 for deep sequenced Hi-C (>200 million cis PETs), set 4 for HiChIP (>100 million cis PETs). Detail parameters will be logged in the log file."
)
parser.add_argument(
"-eps",
dest="eps",
default=0,
required=False,
help=
"Distance that define two points being neighbors, eps in cDBSCAN as key parameter. For sharp peak like ChIA-PET data (CTCF), it can be set as 1000,2000. For broad peak like ChIA-PET data, such as H3K27ac/H3K4me1, set it to 2000,5000. For data like HiChIP and Hi-C, set it larger,set several eps like 5000,7500,10000. Default is 0, cLoops can auto estimate a eps for initial result, maybe not good."
)
parser.add_argument(
"-minPts",
dest="minPts",
default=0,
help=
"Points required in a cluster, minPts in cDBSCAN, key parameter. Empirically 5 is good for TFs and histone modification ChIA-PET data. For data like HiChIP and Hi-C, set it larger, like >=20. Since v0.9, it can be a seires, and the final loops will have the PETs>= max(minPts). For Hi-C data with ~200 million intra-chromosomal PETs, we set it to 20,30,40,50. For cohesin HiChIP data with ~30-40 million intra-chromosomal PETs, we set it to 10,15,20. You can custome it."
)
parser.add_argument(
"-p",
dest="cpu",
required=False,
default=1,
type=int,
help=
"CPU number used to run the job, default is 1,set -1 to use all cpus available. Too many CPU could cause memory error."
)
parser.add_argument(
"-c",
dest="chroms",
required=False,
default="",
type=str,
help=
"Whether to process limited chroms, specify it as chr1,chr2,chr3, default is not. Use this to filter reads in like chr22_KI270876v1_alt"
)
parser.add_argument(
"-w",
dest="washU",
required=False,
action="store_true",
help=
"Whether to save tracks of loops to visualize in washU. Default is No, set this flag to save."
)
parser.add_argument(
"-j",
dest="juice",
required=False,
action="store_true",
help=
"Whether to convert loops to 2d feature annotations to visualize in Juicebox. Default is No, set this flag to save."
)
parser.add_argument(
"-s",
dest="tmp",
required=False,
action="store_true",
help=
"Whether or not to save temp files for each chromosomes during processing. Set this flag for following calling differentially enriched loops or converting PETs to washU track or hic file load into juicebox. Default is not."
)
parser.add_argument(
"-hic",
dest="hic",
required=False,
action="store_true",
help=
"If input is HiChIP or high resolution Hi-C data, set this flag, using different significance cutoffs for loops than ChIA-PET data."
)
parser.add_argument(
"-cut",
dest="cut",
required=False,
default=0,
type=int,
help=
"Initial distance cutoff to filter PETs, default is 0, only used for debuging."
)
parser.add_argument(
"-max_cut",
dest="max_cut",
required=False,
action="store_true",
help=
"When running cLoops with multiple eps or minPts, multiple distance cutoffs for self-ligation and inter-ligation will be esimated, defulat is the minimal one, set this flag to use maxmial one."
)
parser.add_argument(
"-plot",
dest="plot",
required=False,
action="store_true",
help=
"Whether to plot estimated inter-ligation and self-ligation PETs distance distrbution, default is not."
)
parser.add_argument(
"-v",
dest="version",
action="version",
version="cLoops v%s" % __version__,
)
op = parser.parse_args()
return op
def deloopHelp():
"""
Create the command line interface for the script of deLoops
"""
epilog = EPILOG
description = """
Differentially enriched loops calling based on loops called by cLoops.
For example:
deLoops -fa a.loop -fb b.loop -da A -db B -p 10
"""
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument(
"-fa",
dest="fa",
required=True,
type=str,
help=
"Loops file called by cLoops. Only using significant loops as mark 1, you can change this in the .loop file."
)
parser.add_argument("-fb",
dest="fb",
required=True,
type=str,
help="Loops file called by cLoops.")
parser.add_argument(
"-da",
dest="da",
required=True,
type=str,
help=
"Directory for .jd files of loop file a, generated by cLoops with option -s 1."
)
parser.add_argument(
"-db",
dest="db",
required=True,
type=str,
help=
"Directory for .jd files of loop file b, generated by cLoops with option -s 1."
)
parser.add_argument(
"-p",
dest="cpu",
required=False,
default=1,
type=int,
help=
"CPU number used to run the job, default is 1,set -1 to use all cpus available. Too many CPU could cause memory error."
)
parser.add_argument(
"-c",
dest="chroms",
required=False,
default="",
type=str,
help=
"Whether to process limited chroms, specify it as chr1,chr2,chr3, default is not. Set it to the same one for cLoops."
)
parser.add_argument(
"-dis",
dest="dis",
required=False,
default=0,
type=int,
help=
"Set a distance cutoff to filter PETs, could be the inter-ligation and self-ligation cutoff, default is 0."
)
op = parser.parse_args()
return op
def jd2washUHelp():
"""
Create the command line interface for the script of jd2washU.
"""
description = """
Convert PETs level data to washU browser track for visualization. bedtools,bgzip,tabix are required.
Example:
jd2washU -d CTCF_ChIA-PET -o CTCF_ChIA-PET
"""
parser = argparse.ArgumentParser(description=description, epilog=EPILOG)
parser.add_argument(
"-d",
dest="dir",
required=True,
type=str,
help="Directory for .jd files, generated by cLoops with option -s 1.")
parser.add_argument("-o",
dest="output",
required=True,
type=str,
help="Output prefix.")
parser.add_argument(
"-ext",
dest="ext",
type=int,
default=75,
help=
"Extension from the middle center of the PET to both ends,default is 75."
)
parser.add_argument(
"-cut",
dest="cut",
type=int,
default=0,
help="Distance cutoff for PETs to filter, default is 0.")
op = parser.parse_args()
return op
def jd2juiceHelp():
"""
Create the command line interface for the script of jd2juice.
"""
description = """
Convert PETs level data to .hic file to load in juicebox. The command "juicer_tools pre" is required in the enviroment.
For example:
jd2juice -d CTCF -o test -org hg38
"""
parser = argparse.ArgumentParser(description=description, epilog=EPILOG)
parser.add_argument(
"-d",
dest="dir",
required=True,
type=str,
help="Directory for .jd files, generated by cLoops with option -s 1.")
parser.add_argument("-o",
dest="output",
required=True,
type=str,
help="Output prefix.")
parser.add_argument(
"-org",
dest="org",
required=True,
type=str,
default="hg38",
help="Organism required to generate .hic file,default is hg38.")
parser.add_argument(
"-res",
dest="resolution",
type=str,
default="1000,5000,10000,20000",
help=
"Resolutions used to generate .hic file,default is 1000,5000,10000,20000"
)
parser.add_argument(
"-cut",
dest="cut",
type=int,
default=0,
help="Distance cutoff for PETs to filter, default is 0.")
op = parser.parse_args()
return op
def jd2saturationHelp():
"""
Create the command line interface for the script of jd2saturation.
"""
description = """
Re-sampling PETs to estimate loops detection saturation.
For example:
jd2saturation -jd CTCF_chr21/chr21-chr21.jd -o test -s 5 -eps 750 -minPts 5
"""
parser = argparse.ArgumentParser(description=description, epilog=EPILOG)
parser.add_argument(
"-jd",
dest="jd",
required=True,
type=str,
help=
"The .jd file used to re-sampling, generated by cLoops with option -s 1. Small chromosome is recommended,like chr21 for human or chr19 for mouse."
)
parser.add_argument("-o",
dest="output",
required=True,
type=str,
help="Output prefix.")
parser.add_argument(
"-r",
dest="repeats",
type=int,
default=5,
help="Re-sampling times for each sequencing depth,default is 5.")
parser.add_argument("-s",
dest="step",
type=int,
default=10,
help="Re-sampling step,default is 10. ")
parser.add_argument(
"-eps",
dest="eps",
required=True,
help="Eps to run. Eps will be the same for different sequencing depth. "
)
parser.add_argument(
"-minPts",
dest="minPts",
required=True,
type=int,
help=
"minPts to run. minPts will be re-caculated according to re-sampling sequencing depth, say 0.5 PETs and it equals 0.5*minPts."
)
parser.add_argument(
"-cut",
dest="cut",
type=int,
default=0,
help="Distance cutoff for PETs to filter, default is 0.")
parser.add_argument(
"-p",
dest="cpu",
required=False,
default=1,
type=int,
help=
"CPU number used to run the job, default is 1,set -1 to use all cpus available. Too many CPU could cause memory error."
)
parser.add_argument(
"-hic",
dest="hic",
required=False,
default=False,
type=bool,
help=
"If input is HiChIP or high resolution Hi-C data, set this to 1, using different significance cutoffs for loops than ChIA-PET data."
)
op = parser.parse_args()
return op
def jd2fingerprintHelp():
"""
Create the command line interface for the script of jd2saturation.
"""
description = """
Get the finger print for the datasets using contact matrix with specific bin size, small bin sizes like 1000, 2000 are recommended.
For exmaple:
jd2fingerprint -d CTCF_ChIA-PET,cohesin_HiChIP,HiC -o test -bs 2000 -plot 1 -p 10 -labels CTCF_ChIA-PET,cohesin_HiChIP,HiC
"""
parser = argparse.ArgumentParser(description=description, epilog=EPILOG)
parser.add_argument(
"-d",
dest="d",
required=True,
type=str,
help=
"The directory of cis .jd file used to draw quality control fingerprint,created by cLoops with option -s 1. Mutiple samples/datasets for comparasion should be assigned as -d a,b,c"
)
parser.add_argument("-o",
dest="output",
required=True,
type=str,
help="Output prefix.")
parser.add_argument("-bs",
dest="binSize",
default=2000,
type=int,
help="Bin sizes for contact matrix, default is 2000.")
parser.add_argument(
"-labels",
dest="labels",
default="",
help=
"Labels for the datasets for ploting, default is the directory name.")
parser.add_argument(
"-plot",
dest="plot",
default=0,
type=bool,
help=
"Set 1 for ploting the finger print, set 0 for not, default is not. ")
parser.add_argument(
"-p",
dest="cpu",
required=False,
default=1,
type=int,
help=
"CPU number used to run the job, default is 1,set -1 to use all cpus available."
)
parser.add_argument(
"-cut",
dest="cut",
type=int,
default=0,
help="Distance cutoff for PETs to filter, default is 0.")
op = parser.parse_args()
return op
|
the-stack_106_25883 | #!/usr/bin/python3
# Copyright 2022. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HomeKit connector logger module
"""
# Python base dependencies
import logging
import uuid
from typing import Dict
# Library libs
from fastybird_homekit_connector.types import CONNECTOR_NAME
class Logger:
"""
Connector logger
@package FastyBird:HomeKitConnector!
@module logger
@author Adam Kadlec <[email protected]>
"""
__connector_id: uuid.UUID
__logger: logging.Logger
# -----------------------------------------------------------------------------
def __init__(
self,
connector_id: uuid.UUID,
logger: logging.Logger = logging.getLogger("dummy"),
) -> None:
self.__connector_id = connector_id
self.__logger = logger
# -----------------------------------------------------------------------------
def set_logger(self, logger: logging.Logger) -> None:
"""Configure custom logger handler"""
self.__logger = logger
# -----------------------------------------------------------------------------
@property
def level(self) -> int:
"""Logger configured logging level"""
return self.__logger.level
# -----------------------------------------------------------------------------
def debug(self, msg: str, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Log debugging message"""
extra = self.__get_connector_extra()
if "extra" in kwargs:
extra = {**extra, **kwargs.get("extra", {})}
del kwargs["extra"]
self.__logger.debug(msg, extra=extra, *args, **kwargs)
# -----------------------------------------------------------------------------
def info(self, msg: str, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Log information message"""
extra = self.__get_connector_extra()
if "extra" in kwargs:
extra = {**extra, **kwargs.get("extra", {})}
del kwargs["extra"]
self.__logger.info(msg, extra=extra, *args, **kwargs)
# -----------------------------------------------------------------------------
def warning(self, msg: str, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Log warning message"""
extra = self.__get_connector_extra()
if "extra" in kwargs:
extra = {**extra, **kwargs.get("extra", {})}
del kwargs["extra"]
self.__logger.warning(msg, extra=extra, *args, **kwargs)
# -----------------------------------------------------------------------------
def error(self, msg: str, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Log error message"""
extra = self.__get_connector_extra()
if "extra" in kwargs:
extra = {**extra, **kwargs.get("extra", {})}
del kwargs["extra"]
self.__logger.error(msg, extra=extra, *args, **kwargs)
# -----------------------------------------------------------------------------
def exception(self, msg: Exception) -> None:
"""Log thrown exception"""
self.__logger.exception(msg)
# -----------------------------------------------------------------------------
def __get_connector_extra(self) -> Dict:
return {
"connector": {
"type": CONNECTOR_NAME,
"id": str(self.__connector_id),
},
}
|
the-stack_106_25885 | import atexit
import functools
import os
import shutil
import tempfile
import time
from copy import deepcopy as _deepcopy
from taichi._lib import core as _ti_core
from taichi._lib.utils import locale_encode
from taichi.lang import impl
from taichi.lang.expr import Expr
from taichi.lang.impl import axes
from taichi.lang.runtime_ops import sync
from taichi.lang.snode import SNode
from taichi.lang.util import warning
from taichi.profiler.kernel_profiler import get_default_kernel_profiler
from taichi.types.primitive_types import f32, f64, i32, i64
from taichi import _logging, _snode, _version_check
i = axes(0)
j = axes(1)
k = axes(2)
l = axes(3)
ij = axes(0, 1)
ik = axes(0, 2)
il = axes(0, 3)
jk = axes(1, 2)
jl = axes(1, 3)
kl = axes(2, 3)
ijk = axes(0, 1, 2)
ijl = axes(0, 1, 3)
ikl = axes(0, 2, 3)
jkl = axes(1, 2, 3)
ijkl = axes(0, 1, 2, 3)
cfg = impl.default_cfg()
x86_64 = _ti_core.x64
"""The x64 CPU backend.
"""
x64 = _ti_core.x64
"""The X64 CPU backend.
"""
arm64 = _ti_core.arm64
"""The ARM CPU backend.
"""
cuda = _ti_core.cuda
"""The CUDA backend.
"""
metal = _ti_core.metal
"""The Apple Metal backend.
"""
opengl = _ti_core.opengl
"""The OpenGL backend. OpenGL 4.3 required.
"""
# Skip annotating this one because it is barely maintained.
cc = _ti_core.cc
wasm = _ti_core.wasm
"""The WebAssembly backend.
"""
vulkan = _ti_core.vulkan
"""The Vulkan backend.
"""
dx11 = _ti_core.dx11
"""The DX11 backend.
"""
gpu = [cuda, metal, opengl, vulkan, dx11]
"""A list of GPU backends supported on the current system.
When this is used, Taichi automatically picks the matching GPU backend. If no
GPU is detected, Taichi falls back to the CPU backend.
"""
cpu = _ti_core.host_arch()
"""A list of CPU backends supported on the current system.
When this is used, Taichi automatically picks the matching CPU backend.
"""
timeline_clear = lambda: impl.get_runtime().prog.timeline_clear() # pylint: disable=unnecessary-lambda
timeline_save = lambda fn: impl.get_runtime().prog.timeline_save(fn) # pylint: disable=unnecessary-lambda
# Legacy API
type_factory_ = _ti_core.get_type_factory_instance()
def print_memory_profile_info():
"""Memory profiling tool for LLVM backends with full sparse support.
This profiler is automatically on.
"""
impl.get_runtime().materialize()
impl.get_runtime().prog.print_memory_profiler_info()
extension = _ti_core.Extension
def is_extension_supported(arch, ext):
"""Checks whether an extension is supported on an arch.
Args:
arch (taichi_core.Arch): Specified arch.
ext (taichi_core.Extension): Specified extension.
Returns:
bool: Whether `ext` is supported on `arch`.
"""
return _ti_core.is_extension_supported(arch, ext)
def reset():
"""Resets Taichi to its initial state.
This would destroy all the fields and kernels.
"""
_ti_core.reset_snode_access_flag()
impl.reset()
global runtime
runtime = impl.get_runtime()
class _EnvironmentConfigurator:
def __init__(self, kwargs, _cfg):
self.cfg = _cfg
self.kwargs = kwargs
self.keys = []
def add(self, key, _cast=None):
_cast = _cast or self.bool_int
self.keys.append(key)
# TI_ASYNC= : no effect
# TI_ASYNC=0 : False
# TI_ASYNC=1 : True
name = 'TI_' + key.upper()
value = os.environ.get(name, '')
if len(value):
self[key] = _cast(value)
if key in self.kwargs:
_ti_core.warn(
f'ti.init argument "{key}" overridden by environment variable {name}={value}'
)
del self.kwargs[key] # mark as recognized
elif key in self.kwargs:
self[key] = self.kwargs[key]
del self.kwargs[key] # mark as recognized
def __getitem__(self, key):
return getattr(self.cfg, key)
def __setitem__(self, key, value):
setattr(self.cfg, key, value)
@staticmethod
def bool_int(x):
return bool(int(x))
class _SpecialConfig:
# like CompileConfig in C++, this is the configurations that belong to other submodules
def __init__(self):
self.log_level = 'info'
self.gdb_trigger = False
self.experimental_real_function = False
self.short_circuit_operators = False
self.ndarray_use_torch = False
def prepare_sandbox():
'''
Returns a temporary directory, which will be automatically deleted on exit.
It may contain the taichi_core shared object or some misc. files.
'''
tmp_dir = tempfile.mkdtemp(prefix='taichi-')
atexit.register(shutil.rmtree, tmp_dir)
print(f'[Taichi] preparing sandbox at {tmp_dir}')
os.mkdir(os.path.join(tmp_dir, 'runtime/'))
return tmp_dir
def init(arch=None,
default_fp=None,
default_ip=None,
_test_mode=False,
enable_fallback=True,
**kwargs):
"""Initializes the Taichi runtime.
This should always be the entry point of your Taichi program. Most
importantly, it sets the backend used throughout the program.
Args:
arch: Backend to use. This is usually :const:`~taichi.lang.cpu` or :const:`~taichi.lang.gpu`.
default_fp (Optional[type]): Default floating-point type.
default_ip (Optional[type]): Default integral type.
**kwargs: Taichi provides highly customizable compilation through
``kwargs``, which allows for fine grained control of Taichi compiler
behavior. Below we list some of the most frequently used ones. For a
complete list, please check out
https://github.com/taichi-dev/taichi/blob/master/taichi/program/compile_config.h.
* ``cpu_max_num_threads`` (int): Sets the number of threads used by the CPU thread pool.
* ``debug`` (bool): Enables the debug mode, under which Taichi does a few more things like boundary checks.
* ``print_ir`` (bool): Prints the CHI IR of the Taichi kernels.
* ``packed`` (bool): Enables the packed memory layout. See https://docs.taichi.graphics/lang/articles/advanced/layout.
"""
# Check version for users every 7 days if not disabled by users.
_version_check.start_version_check_thread()
# Make a deepcopy in case these args reference to items from ti.cfg, which are
# actually references. If no copy is made and the args are indeed references,
# ti.reset() could override the args to their default values.
default_fp = _deepcopy(default_fp)
default_ip = _deepcopy(default_ip)
kwargs = _deepcopy(kwargs)
reset()
spec_cfg = _SpecialConfig()
env_comp = _EnvironmentConfigurator(kwargs, cfg)
env_spec = _EnvironmentConfigurator(kwargs, spec_cfg)
# configure default_fp/ip:
# TODO: move these stuff to _SpecialConfig too:
env_default_fp = os.environ.get("TI_DEFAULT_FP")
if env_default_fp:
if default_fp is not None:
_ti_core.warn(
f'ti.init argument "default_fp" overridden by environment variable TI_DEFAULT_FP={env_default_fp}'
)
if env_default_fp == '32':
default_fp = f32
elif env_default_fp == '64':
default_fp = f64
elif env_default_fp is not None:
raise ValueError(
f'Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64')
env_default_ip = os.environ.get("TI_DEFAULT_IP")
if env_default_ip:
if default_ip is not None:
_ti_core.warn(
f'ti.init argument "default_ip" overridden by environment variable TI_DEFAULT_IP={env_default_ip}'
)
if env_default_ip == '32':
default_ip = i32
elif env_default_ip == '64':
default_ip = i64
elif env_default_ip is not None:
raise ValueError(
f'Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64')
if default_fp is not None:
impl.get_runtime().set_default_fp(default_fp)
if default_ip is not None:
impl.get_runtime().set_default_ip(default_ip)
# submodule configurations (spec_cfg):
env_spec.add('log_level', str)
env_spec.add('gdb_trigger')
env_spec.add('experimental_real_function')
env_spec.add('short_circuit_operators')
env_spec.add('ndarray_use_torch')
# compiler configurations (ti.cfg):
for key in dir(cfg):
if key in ['arch', 'default_fp', 'default_ip']:
continue
_cast = type(getattr(cfg, key))
if _cast is bool:
_cast = None
env_comp.add(key, _cast)
unexpected_keys = kwargs.keys()
if len(unexpected_keys):
raise KeyError(
f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}'
)
# dispatch configurations that are not in ti.cfg:
if not _test_mode:
_ti_core.set_core_trigger_gdb_when_crash(spec_cfg.gdb_trigger)
impl.get_runtime().experimental_real_function = \
spec_cfg.experimental_real_function
impl.get_runtime().short_circuit_operators = \
spec_cfg.short_circuit_operators
impl.get_runtime().ndarray_use_torch = \
spec_cfg.ndarray_use_torch
_logging.set_logging_level(spec_cfg.log_level.lower())
# select arch (backend):
env_arch = os.environ.get('TI_ARCH')
if env_arch is not None:
_logging.info(f'Following TI_ARCH setting up for arch={env_arch}')
arch = _ti_core.arch_from_name(env_arch)
cfg.arch = adaptive_arch_select(arch, enable_fallback, cfg.use_gles)
if cfg.arch == cc:
_ti_core.set_tmp_dir(locale_encode(prepare_sandbox()))
print(f'[Taichi] Starting on arch={_ti_core.arch_name(cfg.arch)}')
# Torch based ndarray on opengl backend allocates memory on host instead of opengl backend.
# So it won't work.
if cfg.arch == opengl and spec_cfg.ndarray_use_torch:
_logging.warn(
'Opengl backend doesn\'t support torch based ndarray. Setting ndarray_use_torch to False.'
)
impl.get_runtime().ndarray_use_torch = False
if _test_mode:
return spec_cfg
get_default_kernel_profiler().set_kernel_profiler_mode(cfg.kernel_profiler)
# create a new program:
impl.get_runtime().create_program()
_logging.trace('Materializing runtime...')
impl.get_runtime().prog.materialize_runtime()
impl._root_fb = _snode.FieldsBuilder()
if not os.environ.get("TI_DISABLE_SIGNAL_HANDLERS", False):
impl.get_runtime()._register_signal_handlers()
return None
def no_activate(*args):
for v in args:
_ti_core.no_activate(v.snode.ptr)
def block_local(*args):
"""Hints Taichi to cache the fields and to enable the BLS optimization.
Please visit https://docs.taichi.graphics/lang/articles/advanced/performance
for how BLS is used.
Args:
*args (List[Field]): A list of sparse Taichi fields.
"""
if impl.current_cfg().opt_level == 0:
_logging.warn("""opt_level = 1 is enforced to enable bls analysis.""")
impl.current_cfg().opt_level = 1
for a in args:
for v in a.get_field_members():
_ti_core.insert_snode_access_flag(
_ti_core.SNodeAccessFlag.block_local, v.ptr)
def mesh_local(*args):
for a in args:
for v in a.get_field_members():
_ti_core.insert_snode_access_flag(
_ti_core.SNodeAccessFlag.mesh_local, v.ptr)
def cache_read_only(*args):
for a in args:
for v in a.get_field_members():
_ti_core.insert_snode_access_flag(
_ti_core.SNodeAccessFlag.read_only, v.ptr)
def assume_in_range(val, base, low, high):
return _ti_core.expr_assume_in_range(
Expr(val).ptr,
Expr(base).ptr, low, high)
def loop_unique(val, covers=None):
if covers is None:
covers = []
if not isinstance(covers, (list, tuple)):
covers = [covers]
covers = [x.snode.ptr if isinstance(x, Expr) else x.ptr for x in covers]
return _ti_core.expr_loop_unique(Expr(val).ptr, covers)
parallelize = _ti_core.parallelize
serialize = lambda: parallelize(1)
block_dim = _ti_core.block_dim
global_thread_idx = _ti_core.insert_thread_idx_expr
mesh_patch_idx = _ti_core.insert_patch_idx_expr
def Tape(loss, clear_gradients=True):
"""Return a context manager of :class:`~taichi.lang.tape.TapeImpl`. The
context manager would catching all of the callings of functions that
decorated by :func:`~taichi.lang.kernel_impl.kernel` or
:func:`~taichi.ad.grad_replaced` under `with` statement, and calculate
all the partial gradients of a given loss variable by calling all of the
gradient function of the callings caught in reverse order while `with`
statement ended.
See also :func:`~taichi.lang.kernel_impl.kernel` and
:func:`~taichi.ad.grad_replaced` for gradient functions.
Args:
loss(:class:`~taichi.lang.expr.Expr`): The loss field, which shape should be ().
clear_gradients(Bool): Before `with` body start, clear all gradients or not.
Returns:
:class:`~taichi.lang.tape.TapeImpl`: The context manager.
Example::
>>> @ti.kernel
>>> def sum(a: ti.float32):
>>> for I in ti.grouped(x):
>>> y[None] += x[I] ** a
>>>
>>> with ti.Tape(loss = y):
>>> sum(2)"""
impl.get_runtime().materialize()
if len(loss.shape) != 0:
raise RuntimeError(
'The loss of `Tape` must be a 0-D field, i.e. scalar')
if not loss.snode.ptr.has_grad():
raise RuntimeError(
'Gradients of loss are not allocated, please use ti.field(..., needs_grad=True)'
' for all fields that are required by autodiff.')
if clear_gradients:
clear_all_gradients()
from taichi._kernels import clear_loss # pylint: disable=C0415
clear_loss(loss)
return impl.get_runtime().get_tape(loss)
def clear_all_gradients():
"""Set all fields' gradients to 0."""
impl.get_runtime().materialize()
def visit(node):
places = []
for _i in range(node.ptr.get_num_ch()):
ch = node.ptr.get_ch(_i)
if not ch.is_place():
visit(SNode(ch))
else:
if not ch.is_primal():
places.append(ch.get_expr())
places = tuple(places)
if places:
from taichi._kernels import \
clear_gradients # pylint: disable=C0415
clear_gradients(places)
for root_fb in _snode.FieldsBuilder.finalized_roots():
visit(root_fb)
def benchmark(_func, repeat=300, args=()):
def run_benchmark():
compile_time = time.time()
_func(*args) # compile the kernel first
sync()
compile_time = time.time() - compile_time
stat_write('compilation_time', compile_time)
codegen_stat = _ti_core.stat()
for line in codegen_stat.split('\n'):
try:
a, b = line.strip().split(':')
except:
continue
a = a.strip()
b = int(float(b))
if a == 'codegen_kernel_statements':
stat_write('compiled_inst', b)
if a == 'codegen_offloaded_tasks':
stat_write('compiled_tasks', b)
elif a == 'launched_tasks':
stat_write('launched_tasks', b)
# Use 3 initial iterations to warm up
# instruction/data caches. Discussion:
# https://github.com/taichi-dev/taichi/pull/1002#discussion_r426312136
for _ in range(3):
_func(*args)
sync()
clear_kernel_profile_info()
t = time.time()
for _ in range(repeat):
_func(*args)
sync()
elapsed = time.time() - t
avg = elapsed / repeat
stat_write('wall_clk_t', avg)
device_time = kernel_profiler_total_time()
avg_device_time = device_time / repeat
stat_write('exec_t', avg_device_time)
run_benchmark()
def benchmark_plot(fn=None,
cases=None,
columns=None,
column_titles=None,
archs=None,
title=None,
bars='sync_vs_async',
bar_width=0.4,
bar_distance=0,
left_margin=0,
size=(12, 8)):
import matplotlib.pyplot as plt # pylint: disable=C0415
import yaml # pylint: disable=C0415
if fn is None:
fn = os.path.join(_ti_core.get_repo_dir(), 'benchmarks', 'output',
'benchmark.yml')
with open(fn, 'r') as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
if bars != 'sync_vs_async': # need baseline
baseline_dir = os.path.join(_ti_core.get_repo_dir(), 'benchmarks',
'baseline')
baseline_file = f'{baseline_dir}/benchmark.yml'
with open(baseline_file, 'r') as f:
baseline_data = yaml.load(f, Loader=yaml.SafeLoader)
if cases is None:
cases = list(data.keys())
assert len(cases) >= 1
if len(cases) == 1:
cases = [cases[0], cases[0]]
warning(
'Function benchmark_plot does not support plotting with only one case for now. Duplicating the item to move on.'
)
if columns is None:
columns = list(data[cases[0]].keys())
if column_titles is None:
column_titles = columns
normalize_to_lowest = lambda x: True
figure, subfigures = plt.subplots(len(cases), len(columns))
if title is None:
title = 'Taichi Performance Benchmarks (Higher means more)'
figure.suptitle(title, fontweight="bold")
for col_id in range(len(columns)):
subfigures[0][col_id].set_title(column_titles[col_id])
for case_id, case in enumerate(cases):
subfigures[case_id][0].annotate(
case,
xy=(0, 0.5),
xytext=(-subfigures[case_id][0].yaxis.labelpad - 5, 0),
xycoords=subfigures[case_id][0].yaxis.label,
textcoords='offset points',
size='large',
ha='right',
va='center')
for col_id, col in enumerate(columns):
if archs is None:
current_archs = data[case][col].keys()
else:
current_archs = [
x for x in archs if x in data[case][col].keys()
]
if bars == 'sync_vs_async':
y_left = [
data[case][col][arch]['sync'] for arch in current_archs
]
label_left = 'sync'
y_right = [
data[case][col][arch]['async'] for arch in current_archs
]
label_right = 'async'
elif bars == 'sync_regression':
y_left = [
baseline_data[case][col][arch]['sync']
for arch in current_archs
]
label_left = 'before'
y_right = [
data[case][col][arch]['sync'] for arch in current_archs
]
label_right = 'after'
elif bars == 'async_regression':
y_left = [
baseline_data[case][col][arch]['async']
for arch in current_archs
]
label_left = 'before'
y_right = [
data[case][col][arch]['async'] for arch in current_archs
]
label_right = 'after'
else:
raise RuntimeError('Unknown bars type')
if normalize_to_lowest(col):
for _i in range(len(current_archs)):
maximum = max(y_left[_i], y_right[_i])
y_left[_i] = y_left[_i] / maximum if y_left[_i] != 0 else 1
y_right[
_i] = y_right[_i] / maximum if y_right[_i] != 0 else 1
ax = subfigures[case_id][col_id]
bar_left = ax.bar(x=[
i - bar_width / 2 - bar_distance / 2
for i in range(len(current_archs))
],
height=y_left,
width=bar_width,
label=label_left,
color=(0.47, 0.69, 0.89, 1.0))
bar_right = ax.bar(x=[
i + bar_width / 2 + bar_distance / 2
for i in range(len(current_archs))
],
height=y_right,
width=bar_width,
label=label_right,
color=(0.68, 0.26, 0.31, 1.0))
ax.set_xticks(range(len(current_archs)))
ax.set_xticklabels(current_archs)
figure.legend((bar_left, bar_right), (label_left, label_right),
loc='lower center')
figure.subplots_adjust(left=left_margin)
fig = plt.gcf()
fig.set_size_inches(size)
plt.show()
def stat_write(key, value):
import yaml # pylint: disable=C0415
case_name = os.environ.get('TI_CURRENT_BENCHMARK')
if case_name is None:
return
if case_name.startswith('benchmark_'):
case_name = case_name[10:]
arch_name = _ti_core.arch_name(cfg.arch)
async_mode = 'async' if cfg.async_mode else 'sync'
output_dir = os.environ.get('TI_BENCHMARK_OUTPUT_DIR', '.')
filename = f'{output_dir}/benchmark.yml'
try:
with open(filename, 'r') as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
except FileNotFoundError:
data = {}
data.setdefault(case_name, {})
data[case_name].setdefault(key, {})
data[case_name][key].setdefault(arch_name, {})
data[case_name][key][arch_name][async_mode] = value
with open(filename, 'w') as f:
yaml.dump(data, f, Dumper=yaml.SafeDumper)
def is_arch_supported(arch, use_gles=False):
"""Checks whether an arch is supported on the machine.
Args:
arch (taichi_core.Arch): Specified arch.
use_gles (bool): If True, check is GLES is available otherwise
check if GLSL is available. Only effective when `arch` is `ti.opengl`.
Default is `False`.
Returns:
bool: Whether `arch` is supported on the machine.
"""
arch_table = {
cuda: _ti_core.with_cuda,
metal: _ti_core.with_metal,
opengl: functools.partial(_ti_core.with_opengl, use_gles),
cc: _ti_core.with_cc,
vulkan: _ti_core.with_vulkan,
dx11: _ti_core.with_dx11,
wasm: lambda: True,
cpu: lambda: True,
}
with_arch = arch_table.get(arch, lambda: False)
try:
return with_arch()
except Exception as e:
arch = _ti_core.arch_name(arch)
_ti_core.warn(
f"{e.__class__.__name__}: '{e}' occurred when detecting "
f"{arch}, consider adding `TI_ENABLE_{arch.upper()}=0` "
f" to environment variables to suppress this warning message.")
return False
def adaptive_arch_select(arch, enable_fallback, use_gles):
if arch is None:
return cpu
if not isinstance(arch, (list, tuple)):
arch = [arch]
for a in arch:
if is_arch_supported(a, use_gles):
return a
if not enable_fallback:
raise RuntimeError(f'Arch={arch} is not supported')
_logging.warn(f'Arch={arch} is not supported, falling back to CPU')
return cpu
def get_host_arch_list():
return [_ti_core.host_arch()]
__all__ = [
'i', 'ij', 'ijk', 'ijkl', 'ijl', 'ik', 'ikl', 'il', 'j', 'jk', 'jkl', 'jl',
'k', 'kl', 'l', 'cfg', 'x86_64', 'x64', 'dx11', 'wasm', 'arm64', 'cc',
'cpu', 'cuda', 'gpu', 'metal', 'opengl', 'vulkan', 'extension',
'parallelize', 'block_dim', 'global_thread_idx', 'Tape', 'assume_in_range',
'benchmark', 'benchmark_plot', 'block_local', 'cache_read_only',
'clear_all_gradients', 'init', 'mesh_local', 'no_activate',
'print_memory_profile_info', 'reset'
]
|
the-stack_106_25886 | from numpy.lib.shape_base import take_along_axis
from suzieq.poller.services.service import Service
from suzieq.utils import convert_macaddr_format_to_colon, expand_ios_ifname
import re
import numpy as np
class LldpService(Service):
"""LLDP service. Different class because of munging ifname"""
def _common_data_cleaner(self, processed_data, raw_data):
drop_indices = []
for i, entry in enumerate(processed_data):
if not entry:
continue
if not entry.get('ifname', ''):
drop_indices.append(i)
continue
self._common_cleaner(entry)
processed_data = np.delete(processed_data, drop_indices).tolist()
return processed_data
def _common_cleaner(self, entry):
chassis_type = entry.get('_chassisType', '')
if not isinstance(chassis_type, str):
if chassis_type == 7:
chassis_type = 'peerHostname'
else:
chassis_type = 'unknown'
chassis_type = chassis_type.lower()
if chassis_type == 'mac address':
entry['peerHostname'] = convert_macaddr_format_to_colon(
entry.get('peerHostname', '0000.0000.0000'))
subtype = entry.get('subtype', '')
if not isinstance(subtype, str):
if subtype == 7:
subtype = 'interface name'
else:
subtype = 'unknown'
subtype = subtype.lower()
if subtype in ["interface name", '']:
entry['peerMacaddr'] = '00:00:00:00:00:00'
entry['peerIfindex'] = 0
entry['subtype'] = 'interface name' # IOS* don't provide subtype
if subtype == 'mac address':
entry['peerMacaddr'] = convert_macaddr_format_to_colon(
entry.get('peerIfname', '0000.0000.0000'))
entry['peerIfname'] = '-'
entry['peerIfindex'] = 0
entry['subtype'] = 'mac address'
elif subtype.startswith('locally'):
entry['peerIfindex'] = entry['peerIfname']
entry['peerIfname'] = '-'
entry['peerMacaddr'] = '00:00:00:00:00:00'
entry['subtype'] = 'locally assigned'
def _clean_nxos_data(self, processed_data, raw_data):
drop_indices = []
entries = {}
for i, entry in enumerate(processed_data):
entry['peerHostname'] = re.sub(r'\(.*\)', '',
entry['peerHostname'])
entry['ifname'] = re.sub(
r'^Eth?(\d)', 'Ethernet\g<1>', entry['ifname'])
if entry['ifname'] in entries:
# Description is sometimes filled in with CDP, but not LLDP
if not entry.get('description', ''):
old_entry = processed_data[entries[entry['ifname']]]
entry['description'] = old_entry.get('description', '')
drop_indices.append(entries[entry['ifname']])
else:
entries[entry['ifname']] = i
if entry.get('protocol', '') == 'cdp':
entry['subtype'] = 'interface name'
entry['peerIfname'] = expand_ios_ifname(
entry.get('peerIfname', ''))
if entry.get('mgmtIP', '') == "not advertised":
entry['mgmtIP'] = '' # make it consistent with other NOS
self._common_cleaner(entry)
if not entry['peerHostname']:
drop_indices.append(i)
processed_data = np.delete(processed_data, drop_indices).tolist()
return processed_data
def _clean_junos_data(self, processed_data, raw_data):
drop_indices = []
for i, entry in enumerate(processed_data):
if not entry.get('ifname', ''):
drop_indices.append(i)
continue
self._common_cleaner(entry)
processed_data = np.delete(processed_data, drop_indices).tolist()
return processed_data
def _clean_eos_data(self, processed_data, raw_data):
drop_indices = []
for i, entry in enumerate(processed_data):
subtype = entry.get('subtype', '')
if subtype == 'interfaceName':
entry['subtype'] = 'interface name'
elif subtype == 'macAddress':
entry['subtype'] = 'mac address'
elif subtype == "local":
entry['subtype'] = 'locally assigned'
self._common_cleaner(entry)
if not entry['peerHostname']:
drop_indices.append(i)
processed_data = np.delete(processed_data, drop_indices).tolist()
return processed_data
def _clean_cumulus_data(self, processed_data, raw_data):
for entry in processed_data:
subtype = entry.get('subtype', '')
if subtype == "ifname":
entry['subtype'] = "interface name"
elif subtype == "local":
entry['subtype'] = "locally assigned"
self._common_cleaner(entry)
return processed_data
def _clean_linux_data(self, processed_data, raw_data):
for entry in processed_data:
subtype = entry.get('subtype', '')
if subtype == 'ifname':
entry['subtype'] = 'interface name'
elif subtype == 'local':
entry['subtype'] = 'locally assigned'
elif subtype == 'mac':
entry['subtype'] = 'mac address'
self._common_cleaner(entry)
return processed_data
def _clean_iosxr_data(self, processed_data, raw_data):
for entry in processed_data:
self._common_cleaner(entry)
return processed_data
def _clean_iosxe_data(self, processed_data, raw_data):
for entry in processed_data:
for field in ['ifname', 'peerIfname']:
entry[field] = expand_ios_ifname(entry[field])
if ' ' in entry.get(field, ''):
entry[field] = entry[field].replace(' ', '')
self._common_cleaner(entry)
return processed_data
def _clean_ios_data(self, processed_data, raw_data):
return self._clean_iosxe_data(processed_data, raw_data)
def _clean_sonic_data(self, processed_data, raw_data):
return self._clean_linux_data(processed_data, raw_data)
|
the-stack_106_25887 | import json
import numpy as np
import gym
from gym import spaces
from game.simulator import Simulator
class LilysGardenEnv(gym.Env):
def __init__(self, level: int = 1, **kwargs):
"""The gym environment for Lily's Garden.
Example of starting env: env = gym.make('lg-v0', level=1)
Parameters
----------
simulator: Simulator
instance of Simulator
Returns
-------
"""
self.simulator = None
self.level = level
self.board_state = None
self.board_state_full = None
self.return_full_state = False
self.latest_observation = None
self.current_progress = None
self.max_total_steps = None
self.valid_steps = 0
self.total_steps = 0
self.collect_goal_goal = 0
self.sim_seed = None
self.board_size = (13, 9)
self.channels = 24 # 24 from sim + action mask from env
self.observation_space = gym.spaces.Box(low=0,
high=10.,
shape=(self.board_size[0], self.board_size[1], self.channels),
dtype=np.float32)
self.action_space = spaces.Discrete(self.board_size[0] * self.board_size[1])
self.valid_actions = [1] * self.action_space.n
self.__dict__.update(kwargs)
def reset(self, seed=None):
self.valid_steps = 0
self.total_steps = 0
sim_seed = np.random.randint(1, 2 ** 31 - 1) if seed is None else seed
self.sim_seed = sim_seed
new_board = self.simulator.reset(self.get_level(), sim_seed)
obs = new_board.get('multichannelArrayState', '{}')
if obs:
self.board_state = json.loads(obs)
else:
self.board_state = None
self.latest_observation = self._observation_from_state()
self.current_progress = self._calculate_progress()
return self.latest_observation
def step(self, action: int) -> any:
coords = self._action_to_coord(action)
result = self.simulator.step(coords['x'], coords['y'])
self.board_state = json.loads(result.get('multichannelArrayState', '{}'))
valid_action = result.get('clickSuccessful', False)
new_progress = self._calculate_progress()
observation = self._observation_from_state()
self.latest_observation = observation
self.valid_steps += 1 * valid_action
goal_reached = self.board_state['collectGoalRemaining'] < 1e-6
self.total_steps += 1
info_dict = dict(valid_steps=self.valid_steps,
total_steps=self.total_steps,
successful_click=valid_action,
new_progress=new_progress)
info_dict['goal_reached'] = goal_reached
reward = self._calculate_reward(info_dict)
# Should the env be reset?
done = goal_reached
self.current_progress = new_progress
return observation, reward, done, info_dict
def render(self, mode='human'):
raise NotImplementedError
def get_level(self):
return self.level
def set_level(self, level):
self.level = level
def set_simulator(self, simulator: Simulator):
self.simulator = simulator
def _calculate_progress(self):
try:
progress = self.board_state['collectGoalGoal'] - self.board_state['collectGoalRemaining']
except (TypeError, KeyError):
progress = self.current_progress
return progress
def _calculate_reward(self, info_dict) -> float:
reward = (self.current_progress - info_dict['new_progress']) - .1 - .5 * (not info_dict['successful_click'])
reward += 5 * info_dict.get('goal_reached', False)
return reward
def _observation_from_state(self):
try:
obs = np.array(self.board_state['board'],
dtype=np.float64).reshape(self.board_state['boardSize'], order='F')
except (TypeError, KeyError):
obs = None
return obs
def _action_to_coord(self, action: int) -> dict:
indexes = self._action_to_index(action)
return {'x': int(indexes['idx'] - self.board_size[0] // 2), 'y': int(indexes['idy'] - self.board_size[1] // 2)}
def _action_to_index(self, action: int) -> dict:
return {'idx': action % self.board_size[0], 'idy': action // self.board_size[0]}
def _coord_to_index(self, x: int, y: int) -> dict:
return {'idx': int(x + self.board_size[0] // 2), 'idy': int(y + self.board_size[1] // 2)}
def _coord_to_action(self, x: int, y: int) -> int:
return self._index_to_action(**self._coord_to_index(x, y))
def _index_to_action(self, idx: int, idy: int) -> int:
return idx + idy * self.board_size[0]
def _index_to_coord(self, idx: int, idy: int) -> dict:
return {'x': idx - self.board_size[0] // 2, 'y': idy - self.board_size[1] // 2}
|
the-stack_106_25888 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n.provider import clouds
from collections import Counter
import contextlib
import copy
import datetime
import itertools
import logging
import os
import shutil
import sys
import tempfile
import time
import traceback
import boto3
from c7n.credentials import SessionFactory
from c7n.config import Bag
from c7n.log import CloudWatchLogHandler
# Import output registries aws provider extends.
from c7n.output import (
api_stats_outputs,
blob_outputs,
log_outputs,
metrics_outputs,
tracer_outputs
)
# Output base implementations we extend.
from c7n.output import (
Metrics,
DeltaStats,
DirectoryOutput,
LogOutput,
)
from c7n.registry import PluginRegistry
from c7n import credentials, utils
log = logging.getLogger('custodian.aws')
try:
from aws_xray_sdk.core import xray_recorder, patch
from aws_xray_sdk.core.context import Context
HAVE_XRAY = True
except ImportError:
HAVE_XRAY = False
class Context(object): pass # NOQA
_profile_session = None
DEFAULT_NAMESPACE = "CloudMaid"
def get_profile_session(options):
global _profile_session
if _profile_session:
return _profile_session
profile = getattr(options, 'profile', None)
_profile_session = boto3.Session(profile_name=profile)
return _profile_session
def _default_region(options):
marker = object()
value = getattr(options, 'regions', marker)
if value is marker:
return
if len(value) > 0:
return
try:
options.regions = [get_profile_session(options).region_name]
except Exception:
log.warning('Could not determine default region')
options.regions = [None]
if options.regions[0] is None:
log.error('No default region set. Specify a default via AWS_DEFAULT_REGION '
'or setting a region in ~/.aws/config')
sys.exit(1)
log.debug("using default region:%s from boto" % options.regions[0])
def _default_account_id(options):
if options.assume_role:
try:
options.account_id = options.assume_role.split(':')[4]
return
except IndexError:
pass
try:
session = get_profile_session(options)
options.account_id = utils.get_account_id_from_sts(session)
except Exception:
options.account_id = None
@metrics_outputs.register('aws')
class MetricsOutput(Metrics):
"""Send metrics data to cloudwatch
"""
permissions = ("cloudWatch:PutMetricData",)
retry = staticmethod(utils.get_retry(('Throttling',)))
def __init__(self, ctx, config=None):
super(MetricsOutput, self).__init__(ctx, config)
self.namespace = self.config.get('namespace', DEFAULT_NAMESPACE)
def _format_metric(self, key, value, unit, dimensions):
d = {
"MetricName": key,
"Timestamp": datetime.datetime.utcnow(),
"Value": value,
"Unit": unit}
d["Dimensions"] = [
{"Name": "Policy", "Value": self.ctx.policy.name},
{"Name": "ResType", "Value": self.ctx.policy.resource_type}]
for k, v in dimensions.items():
d['Dimensions'].append({"Name": k, "Value": v})
return d
def _put_metrics(self, ns, metrics):
watch = utils.local_session(self.ctx.session_factory).client('cloudwatch')
return self.retry(
watch.put_metric_data, Namespace=ns, MetricData=metrics)
@log_outputs.register('aws')
class CloudWatchLogOutput(LogOutput):
log_format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
def get_handler(self):
return CloudWatchLogHandler(
log_group=self.ctx.options.log_group,
log_stream=self.ctx.policy.name,
session_factory=lambda x=None: self.ctx.session_factory(
assume=False))
def __repr__(self):
return "<%s to group:%s stream:%s>" % (
self.__class__.__name__,
self.ctx.options.log_group,
self.ctx.policy.name)
class XrayEmitter(object):
def __init__(self):
self.buf = []
self.client = None
def send_entity(self, entity):
self.buf.append(entity)
if len(self.buf) > 49:
self.flush()
def flush(self):
buf = self.buf
self.buf = []
for segment_set in utils.chunks(buf, 50):
self.client.put_trace_segments(
TraceSegmentDocuments=[
s.serialize() for s in segment_set])
class XrayContext(Context):
def __init__(self, *args, **kw):
super(XrayContext, self).__init__(*args, **kw)
# We want process global semantics as policy execution
# can span threads.
self._local = Bag()
self._current_subsegment = None
def handle_context_missing(self):
"""Custodian has a few api calls out of band of policy execution.
- Resolving account alias.
- Cloudwatch Log group/stream discovery/creation (when using -l on cli)
Also we want to folks to optionally based on configuration using xray
so default to disabling context missing output.
"""
@tracer_outputs.register('xray', condition=HAVE_XRAY)
class XrayTracer(object):
emitter = XrayEmitter()
in_lambda = 'LAMBDA_TASK_ROOT' in os.environ
use_daemon = 'AWS_XRAY_DAEMON_ADDRESS' in os.environ
service_name = 'custodian'
context = XrayContext()
if HAVE_XRAY:
xray_recorder.configure(
emitter=use_daemon is False and emitter or None,
context=context,
sampling=True,
context_missing='LOG_ERROR'
)
patch(['boto3', 'requests'])
logging.getLogger('aws_xray_sdk.core').setLevel(logging.ERROR)
def __init__(self, ctx, config):
self.ctx = ctx
self.config = config or {}
self.client = None
self.metadata = {}
@contextlib.contextmanager
def subsegment(self, name):
segment = xray_recorder.begin_subsegment(name)
try:
yield segment
except Exception as e:
stack = traceback.extract_stack(limit=xray_recorder.max_trace_back)
segment.add_exception(e, stack)
raise
finally:
xray_recorder.end_subsegment(time.time())
def __enter__(self):
if self.client is None:
self.client = self.ctx.session_factory(assume=False).client('xray')
self.emitter.client = self.client
if self.in_lambda:
self.segment = xray_recorder.begin_subsegment(self.service_name)
else:
self.segment = xray_recorder.begin_segment(
self.service_name, sampling=True)
p = self.ctx.policy
xray_recorder.put_annotation('policy', p.name)
xray_recorder.put_annotation('resource', p.resource_type)
if self.ctx.options.account_id:
xray_recorder.put_annotation('account', self.ctx.options.account_id)
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
metadata = self.ctx.get_metadata(('api-stats',))
metadata.update(self.metadata)
xray_recorder.put_metadata('custodian', metadata)
if self.in_lambda:
xray_recorder.end_subsegment()
return
xray_recorder.end_segment()
if not self.use_daemon:
self.emitter.flush()
self.metadata.clear()
@api_stats_outputs.register('aws')
class ApiStats(DeltaStats):
def __init__(self, ctx, config=None):
super(ApiStats, self).__init__(ctx, config)
self.api_calls = Counter()
def get_snapshot(self):
return dict(self.api_calls)
def get_metadata(self):
return self.get_snapshot()
def __enter__(self):
if isinstance(self.ctx.session_factory, credentials.SessionFactory):
self.ctx.session_factory.set_subscribers((self,))
self.push_snapshot()
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
if isinstance(self.ctx.session_factory, credentials.SessionFactory):
self.ctx.session_factory.set_subscribers(())
self.ctx.metrics.put_metric(
"ApiCalls", sum(self.api_calls.values()), "Count")
self.ctx.policy._write_file(
'api-stats.json', utils.dumps(dict(self.api_calls)))
self.pop_snapshot()
def __call__(self, s):
s.events.register(
'after-call.*.*', self._record, unique_id='c7n-api-stats')
def _record(self, http_response, parsed, model, **kwargs):
self.api_calls["%s.%s" % (
model.service_model.endpoint_prefix,
model.name)] += 1
@blob_outputs.register('s3')
class S3Output(DirectoryOutput):
"""
Usage:
.. code-block:: python
with S3Output(session_factory, 's3://bucket/prefix'):
log.info('xyz') # -> log messages sent to custodian-run.log.gz
"""
permissions = ('S3:PutObject',)
def __init__(self, ctx, config):
self.ctx = ctx
self.config = config
self.output_path = self.get_output_path(self.config['url'])
self.s3_path, self.bucket, self.key_prefix = utils.parse_s3(
self.output_path)
self.root_dir = tempfile.mkdtemp()
self.transfer = None
def __repr__(self):
return "<%s to bucket:%s prefix:%s>" % (
self.__class__.__name__,
self.bucket,
self.key_prefix)
def get_output_path(self, output_url):
if '{' not in output_url:
date_path = datetime.datetime.now().strftime('%Y/%m/%d/%H')
return self.join(
output_url, self.ctx.policy.name, date_path)
return output_url.format(**self.get_output_vars())
@staticmethod
def join(*parts):
return "/".join([s.strip('/') for s in parts])
def __exit__(self, exc_type=None, exc_value=None, exc_traceback=None):
from boto3.s3.transfer import S3Transfer
if exc_type is not None:
log.exception("Error while executing policy")
log.debug("Uploading policy logs")
self.leave_log()
self.compress()
self.transfer = S3Transfer(
self.ctx.session_factory(assume=False).client('s3'))
self.upload()
shutil.rmtree(self.root_dir)
log.debug("Policy Logs uploaded")
def upload(self):
for root, dirs, files in os.walk(self.root_dir):
for f in files:
key = "%s%s" % (
self.key_prefix,
"%s/%s" % (
root[len(self.root_dir):], f))
key = key.strip('/')
self.transfer.upload_file(
os.path.join(root, f), self.bucket, key,
extra_args={
'ACL': 'bucket-owner-full-control',
'ServerSideEncryption': 'AES256'})
@clouds.register('aws')
class AWS(object):
resource_prefix = 'aws'
# legacy path for older plugins
resources = PluginRegistry('resources')
def initialize(self, options):
"""
"""
_default_region(options)
_default_account_id(options)
return options
def get_session_factory(self, options):
return SessionFactory(
options.region,
options.profile,
options.assume_role,
options.external_id)
def initialize_policies(self, policy_collection, options):
"""Return a set of policies targetted to the given regions.
Supports symbolic regions like 'all'. This will automatically
filter out policies if their being targetted to a region that
does not support the service. Global services will target a
single region (us-east-1 if only all specified, else first
region in the list).
Note for region partitions (govcloud and china) an explicit
region from the partition must be passed in.
"""
from c7n.policy import Policy, PolicyCollection
policies = []
service_region_map, resource_service_map = get_service_region_map(
options.regions, policy_collection.resource_types)
for p in policy_collection:
available_regions = service_region_map.get(
resource_service_map.get(p.resource_type), ())
# its a global service/endpoint, use user provided region
# or us-east-1.
if not available_regions and options.regions:
candidates = [r for r in options.regions if r != 'all']
candidate = candidates and candidates[0] or 'us-east-1'
svc_regions = [candidate]
elif 'all' in options.regions:
svc_regions = available_regions
else:
svc_regions = options.regions
for region in svc_regions:
if available_regions and region not in available_regions:
level = ('all' in options.regions and
logging.DEBUG or logging.WARNING)
# TODO: fixme
policy_collection.log.log(
level, "policy:%s resources:%s not available in region:%s",
p.name, p.resource_type, region)
continue
options_copy = copy.copy(options)
options_copy.region = str(region)
if len(options.regions) > 1 or 'all' in options.regions and getattr(
options, 'output_dir', None):
options_copy.output_dir = (
options.output_dir.rstrip('/') + '/%s' % region)
policies.append(
Policy(p.data, options_copy,
session_factory=policy_collection.session_factory()))
return PolicyCollection(policies, options)
def get_service_region_map(regions, resource_types):
# we're not interacting with the apis just using the sdk meta information.
session = boto3.Session(
region_name='us-east-1',
aws_access_key_id='never',
aws_secret_access_key='found')
normalized_types = []
for r in resource_types:
if r.startswith('aws.'):
normalized_types.append(r[4:])
else:
normalized_types.append(r)
resource_service_map = {
r: clouds['aws'].resources.get(r).resource_type.service
for r in normalized_types if r != 'account'}
# support for govcloud and china, we only utilize these regions if they
# are explicitly passed in on the cli.
partition_regions = {}
for p in ('aws-cn', 'aws-us-gov'):
for r in session.get_available_regions('s3', partition_name=p):
partition_regions[r] = p
partitions = ['aws']
for r in regions:
if r in partition_regions:
partitions.append(partition_regions[r])
service_region_map = {}
for s in set(itertools.chain(resource_service_map.values())):
for partition in partitions:
service_region_map.setdefault(s, []).extend(
session.get_available_regions(s, partition_name=partition))
return service_region_map, resource_service_map
|
the-stack_106_25889 | # Copyright (c) ByteDance, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Copy-paste from mmcv library:
https://github.com/open-mmlab/mmcv/
"""
import os.path as osp
import time
import mmcv
import torch
try:
import apex
except:
print('apex is not installed')
from tempfile import TemporaryDirectory
from torch.optim import Optimizer
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelscloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
|
the-stack_106_25890 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import (
date,
datetime,
time,
)
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import (
_gt14,
read_sql_query,
read_sql_table,
)
try:
import sqlalchemy
from sqlalchemy import inspect
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"read_no_parameters_with_percent": {
"sqlite": "SELECT * FROM iris WHERE Name LIKE '%'",
"mysql": "SELECT * FROM iris WHERE `Name` LIKE '%'",
"postgresql": "SELECT * FROM iris WHERE \"Name\" LIKE '%'",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
# https://docs.sqlalchemy.org/en/13/core/connections.html#engine-disposal
self.conn.dispose()
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("io", "data", "csv", "iris.csv")])
def load_iris_data(self, datapath, request):
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with open(iris_csv_file, newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
{
"A": [4, 1, 3, 6],
"B": ["asd", "gsq", "ylt", "jkl"],
"C": [1.1, 3.1, 6.9, 5.3],
"D": [False, True, True, False],
"E": ["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
}
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_types_test_data(self, data):
def _filter_to_flavor(flavor, df):
flavor_dtypes = {
"sqlite": {
"TextCol": "str",
"DateCol": "str",
"IntDateCol": "int64",
"IntDateOnlyCol": "int64",
"FloatCol": "float",
"IntCol": "int64",
"BoolCol": "int64",
"IntColWithNull": "float",
"BoolColWithNull": "float",
},
"mysql": {
"TextCol": "str",
"DateCol": "str",
"IntDateCol": "int64",
"IntDateOnlyCol": "int64",
"FloatCol": "float",
"IntCol": "int64",
"BoolCol": "bool",
"IntColWithNull": "float",
"BoolColWithNull": "float",
},
"postgresql": {
"TextCol": "str",
"DateCol": "str",
"DateColWithTz": "str",
"IntDateCol": "int64",
"IntDateOnlyCol": "int64",
"FloatCol": "float",
"IntCol": "int64",
"BoolCol": "bool",
"IntColWithNull": "float",
"BoolColWithNull": "float",
},
}
dtypes = flavor_dtypes[flavor]
return df[dtypes.keys()].astype(dtypes)
df = DataFrame(data)
self.types_test = {
flavor: _filter_to_flavor(flavor, df)
for flavor in ("sqlite", "mysql", "postgresql")
}
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
self._load_types_test_data(data)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_no_parameter_with_percent(self):
query = SQL_STRINGS["read_no_parameters_with_percent"][self.flavor]
iris_frame = self.pandasSQL.read_query(query, params=None)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_with_chunksize_no_result(self):
query = "SELECT * FROM iris_view WHERE SepalLength < 0.0"
with_batch = sql.read_sql_query(query, self.conn, chunksize=5)
without_batch = sql.read_sql_query(query, self.conn)
tm.assert_frame_equal(concat(with_batch), without_batch)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
Timestamp(2000, 1, 3, 0, 0, 0),
Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
Timestamp(2000, 1, 3, 0, 0, 0),
Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
Timestamp(1986, 12, 25, 0, 0, 0),
Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
Timestamp(1986, 12, 25, 0, 0, 0),
Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
Timestamp("2010-10-10"),
Timestamp("2010-12-12"),
]
@pytest.mark.parametrize("error", ["ignore", "raise", "coerce"])
@pytest.mark.parametrize(
"read_sql, text, mode",
[
(sql.read_sql, "SELECT * FROM types_test_data", ("sqlalchemy", "fallback")),
(sql.read_sql, "types_test_data", ("sqlalchemy")),
(
sql.read_sql_query,
"SELECT * FROM types_test_data",
("sqlalchemy", "fallback"),
),
(sql.read_sql_table, "types_test_data", ("sqlalchemy")),
],
)
def test_custom_dateparsing_error(self, read_sql, text, mode, error):
if self.mode in mode:
expected = self.types_test[self.flavor].astype(
{"DateCol": "datetime64[ns]"}
)
result = read_sql(
text,
con=self.conn,
parse_dates={
"DateCol": {"errors": error},
},
)
tm.assert_frame_equal(result, expected)
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].view("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
@pytest.mark.parametrize(
"dtype",
[
None,
int,
float,
{"A": int, "B": float},
],
)
def test_dtype_argument(self, dtype):
# GH10285 Add dtype argument to read_sql_query
df = DataFrame([[1.2, 3.4], [5.6, 7.8]], columns=["A", "B"])
df.to_sql("test_dtype_argument", self.conn)
expected = df.astype(dtype)
result = sql.read_sql_query(
"SELECT A, B FROM test_dtype_argument", con=self.conn, dtype=dtype
)
tm.assert_frame_equal(result, expected)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_with_schema(self):
# GH28486
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, schema="pypi"
)
assert "CREATE TABLE pypi." in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["John P. Doe", "Jane Dove", "John P. Doe"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
@pytest.mark.parametrize(
"integer, expected",
[
("int8", "SMALLINT"),
("Int8", "SMALLINT"),
("uint8", "SMALLINT"),
("UInt8", "SMALLINT"),
("int16", "SMALLINT"),
("Int16", "SMALLINT"),
("uint16", "INTEGER"),
("UInt16", "INTEGER"),
("int32", "INTEGER"),
("Int32", "INTEGER"),
("uint32", "BIGINT"),
("UInt32", "BIGINT"),
("int64", "BIGINT"),
("Int64", "BIGINT"),
(int, "BIGINT" if np.dtype(int).name == "int64" else "INTEGER"),
],
)
def test_sqlalchemy_integer_mapping(self, integer, expected):
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
df = DataFrame([0, 1], columns=["a"], dtype=integer)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
result = str(table.table.c.a.type)
assert result == expected
@pytest.mark.parametrize("integer", ["uint64", "UInt64"])
def test_sqlalchemy_integer_overload_mapping(self, integer):
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
df = DataFrame([0, 1], columns=["a"], dtype=integer)
db = sql.SQLDatabase(self.conn)
with pytest.raises(
ValueError, match="Unsigned 64 bit integer datatype is not supported"
):
sql.SQLTable("test_type", db, frame=df)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
def test_column_with_percentage(self):
# GH 37157
df = DataFrame({"A": [0, 1, 2], "%_variation": [3, 4, 5]})
df.to_sql("test_column_percentage", self.conn, index=False)
res = sql.read_sql_table("test_column_percentage", self.conn)
tm.assert_frame_equal(res, df)
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
if _gt14():
insp = inspect(temp_conn)
assert insp.has_table("temp_frame")
else:
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
if _gt14():
insp = inspect(temp_conn)
assert insp.has_table("temp_frame")
else:
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
if _gt14():
assert not insp.has_table("temp_frame")
else:
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = concat(
list(
read_sql_query("select * from types_test_data", self.conn, chunksize=1)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_out_of_bounds_datetime(self):
# GH 26761
data = DataFrame({"date": datetime(9999, 1, 1)}, index=[0])
data.to_sql("test_datetime_obb", self.conn, index=False)
result = sql.read_sql_table("test_datetime_obb", self.conn)
expected = DataFrame([pd.NaT], columns=["date"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")._with_freq(None)
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy import inspect
insp = inspect(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
if _gt14():
# https://github.com/sqlalchemy/sqlalchemy/commit/
# 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973
foo_data = foo(conn)
bar(conn, foo_data)
else:
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
@pytest.mark.parametrize(
"input",
[{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}],
)
def test_to_sql_with_negative_npinf(self, input, request):
# GH 34431
df = DataFrame(input)
if self.flavor == "mysql":
# GH 36465
# The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error
# for pymysql version >= 0.10
# TODO: remove this version check after GH 36465 is fixed
import pymysql
if pymysql.VERSION[0:3] >= (0, 10, 0) and "infe0" in df.columns:
mark = pytest.mark.xfail(reason="GH 36465")
request.node.add_marker(mark)
msg = "inf cannot be used with MySQL"
with pytest.raises(ValueError, match=msg):
df.to_sql("foobar", self.conn, index=False)
else:
df.to_sql("foobar", self.conn, index=False)
res = sql.read_sql_table("foobar", self.conn)
tm.assert_equal(df, res)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
port = 3306
@classmethod
def connect(cls):
return sqlalchemy.create_engine(
f"mysql+{cls.driver}://root@localhost:{cls.port}/pandas",
connect_args=cls.connect_args,
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
port = 5432
@classmethod
def connect(cls):
return sqlalchemy.create_engine(
f"postgresql+{cls.driver}://postgres:postgres@localhost:{cls.port}/pandas"
)
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Float") == "REAL"
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
[
"test_weird_name]",
"test_weird_name[",
"test_weird_name`",
'test_weird_name"',
"test_weird_name'",
"_b.test_weird_name_01-30",
'"_b.test_weird_name_01-30"',
"99beginswithnumber",
"12345",
"\xe9",
]
):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime("%Y%m%d")
_formatters = {
datetime: "'{}'".format,
str: "'{}'".format,
np.str_: "'{}'".format,
bytes: "'{}'".format,
float: "{:.8f}".format,
int: "{:d}".format,
type(None): lambda x: "NULL",
np.float64: "{:.10f}".format,
bool: "'{!s}'".format,
}
def format_query(sql, *args):
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(":memory:")
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(":memory:")
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, rtol=1e-3)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for line in lines:
tokens = line.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(sql.DatabaseError, match="Execution failed on sql"):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with tm.external_error_raised(sqlite3.ProgrammingError):
tquery("select * from test", con=self.conn)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
new_idx = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = new_idx.copy()
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
expected.index = new_idx
expected.index.name = "Idx"
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(df, con=self.conn, name="testkeywords", index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=["c0"])
sql.to_sql(mono_df, con=self.conn, name="mono_df", index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df"))
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
msg = "'notvalidvalue' is not valid for if_exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
msg = "Table 'table_if_exists' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
@pytest.mark.single
@pytest.mark.db
@pytest.mark.skip(
reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
)
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError as err:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
except pymysql.Error as err:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError as err:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
except pymysql.Error as err:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
self.method = request.function
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, rtol=1e-3)
# GH#32571 result comes back rounded to 6 digits in some builds;
# no obvious pattern
def test_chunksize_read_type(self):
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name="test", con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = read_sql_query(
sql=query, con=self.conn, chunksize=chunksize, index_col="index"
)
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for line in lines:
tokens = line.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert "PRIMARY KEY (`A`, `B`)" in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception, match="<insert message here>"):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception, match="<insert message here>"):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(request, datapath)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
index = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(
df, con=self.conn, name="testkeywords", if_exists="replace", index=False
)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
with pytest.raises(ValueError, match="<insert message here>"):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
with pytest.raises(ValueError, match="<insert message here>"):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
|
the-stack_106_25891 | ##############################################################################
#
# Copyright (c) 2012 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""``pdfInclude`` Directive.
"""
__docformat__ = "reStructuredText"
from reportlab.platypus import flowables
from z3c.rml import attr, flowable, interfaces, occurence
class StoryPlaceFlowable(flowables.Flowable):
def __init__(self, x, y, width, height, origin, flows):
flowables.Flowable.__init__(self)
self.x = x
self.y = y
self.width = width
self.height = height
self.origin = origin
self.flows = flows
def wrap(self, *args):
return (0, 0)
def draw(self):
saveState = False
x, y = self.x, self.y
self.canv.restoreState()
if self.origin == 'frame':
x += self._frame._x1
y += self._frame._y1
elif self.origin == 'local':
x += self._frame._x
y += self._frame._y
else:
# origin == 'page'
pass
width, height = self.width, self.height
y += height
for flow in self.flows.flow:
flowWidth, flowHeight = flow.wrap(width, height)
if flowWidth <= width and flowHeight <= height:
y -= flowHeight
flow.drawOn(self.canv, x, y)
height -= flowHeight
else:
raise ValueError("Not enough space")
self.canv.saveState()
class IStoryPlace(interfaces.IRMLDirectiveSignature):
"""Draws a set of flowables on the canvas within a given region."""
x = attr.Measurement(
title=u'X-Coordinate',
description=(u'The X-coordinate of the lower-left position of the '
u'place.'),
required=True)
y = attr.Measurement(
title=u'Y-Coordinate',
description=(u'The Y-coordinate of the lower-left position of the '
u'place.'),
required=True)
width = attr.Measurement(
title=u'Width',
description=u'The width of the place.',
required=False)
height = attr.Measurement(
title=u'Height',
description=u'The height of the place.',
required=False)
origin = attr.Choice(
title=u'Origin',
description=u'The origin of the coordinate system for the story.',
choices=('page', 'frame', 'local'),
default = 'page',
required=False)
class StoryPlace(flowable.Flowable):
signature = IStoryPlace
def process(self):
x, y, width, height, origin = self.getAttributeValues(
select=('x', 'y', 'width', 'height', 'origin'), valuesOnly=True)
flows = flowable.Flow(self.element, self.parent)
flows.process()
self.parent.flow.append(
StoryPlaceFlowable(x, y, width, height, origin, flows))
flowable.Flow.factories['storyPlace'] = StoryPlace
flowable.IFlow.setTaggedValue(
'directives',
flowable.IFlow.getTaggedValue('directives') +
(occurence.ZeroOrMore('storyPlace', IStoryPlace),)
)
|
the-stack_106_25892 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exp Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
from tensorflow.contrib.distributions.python.ops.bijectors import exp as exp_lib
from tensorflow.python.platform import test
class ExpBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = exp(X) transformation."""
def testBijector(self):
with self.test_session():
bijector = exp_lib.Exp(event_ndims=1)
self.assertEqual("exp", bijector.name)
x = [[[1.], [2.]]]
y = np.exp(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-bijector.inverse_log_det_jacobian(np.exp(x)).eval(),
bijector.forward_log_det_jacobian(x).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = exp_lib.Exp()
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.test_session():
bijector = exp_lib.Exp(event_ndims=0)
x = np.linspace(-10, 10, num=10).astype(np.float32)
y = np.logspace(-10, 10, num=10).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(bijector, x, y)
if __name__ == "__main__":
test.main()
|
the-stack_106_25893 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import cv2
try:
from model.config import cfg, tmp_lam
from utils.blob import prep_im_for_blob, im_list_to_blob
except:
from lib.model.config import cfg, tmp_lam
from lib.utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
if cfg.MIX_TRAINING:
im_blob, im_scales, trans_scales = _get_mix_image_blob(roidb, random_scale_inds)
if cfg.MIX_TEST:
print("MIX-TRAINING: TEST: im_scalse {} \n\t trans_scales {}".format(im_scales, trans_scales))
blobs = {'data': im_blob}
assert len(im_scales) == 2, "MIX-TRAINING ERROR! Single batch only"
if cfg.TRAIN.USE_ALL_GT:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_inds2 = np.where(roidb[1]['gt_classes'] != 0)[0]
if cfg.MIX_TEST:
print("MIX-TRAINING: TEST: gt_inds {} ?? {}".format(gt_inds, gt_inds2))
else:
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_inds2 = np.where(roidb[1]['gt_classes'] != 0 & np.all(roidb[1]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
gt_boxes2 = np.empty((len(gt_inds2), 5), dtype=np.float32)
gt_boxes2[:, 0:4] = roidb[1]['boxes'][gt_inds2, :] * im_scales[1]
gt_boxes2[:, 0] *= trans_scales[1]
gt_boxes2[:, 1] *= trans_scales[0]
gt_boxes2[:, 2] *= trans_scales[1]
gt_boxes2[:, 3] *= trans_scales[0]
gt_boxes2[:, 4] = roidb[1]['gt_classes'][gt_inds2]
blobs['gt_boxes'] = gt_boxes
blobs['gt_boxes2'] = gt_boxes2
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
else:
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
# assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
if cfg.MIX_TEST:
print("TEST: gt_inds {} ".format(gt_inds))
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
if cfg.MIX_TEST:
print("TEST: gt_boxes {} ".format(gt_boxes))
print(roidb[0]['gt_classes'][gt_inds])
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['gt_boxes2'] = None
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_mix_image_blob(roidb, scale_inds):
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# add two image
im1 = processed_ims[0]
im2_tmp = processed_ims[1]
s1, s2 = np.array(im1.shape, dtype=np.float32), np.array(im2_tmp.shape, dtype=np.float32)
trans_scales = s1 / s2
im2 = cv2.resize(im2_tmp, None, None, fx=trans_scales[1], fy=trans_scales[0],
interpolation=cv2.INTER_LINEAR)
assert im1.shape == im2.shape, " {} ? {} tr scales {}, s1,s2 {} ? {}".format(im1.shape, im2.shape, trans_scales, s1, s2)
lam = tmp_lam
im = lam * im1 + (1-lam) * im2
processed_ims = [im]
# im_scales
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales, trans_scales |
the-stack_106_25899 | import pandas as pd, os.path as path
import numpy as np
import os
setting_folder=r'./results/'
fns=os.listdir(setting_folder)
transfer_type = '_gen_feat'
#transfer_type = '_None'
all_label = []
all_pred = []
for item in fns:
if item != 'rd_idx':
pred = np.load(setting_folder + item + '/pred' + transfer_type + '.npy', allow_pickle=True)
label = np.load(setting_folder + item + '/label' + transfer_type + '.npy', allow_pickle=True)
all_pred.append(pred)
all_label.append(label)
#print(len(all_label))
all_label = np.concatenate(all_label, axis=0)
all_pred = np.concatenate(all_pred, axis=0)
print(all_label.shape)
print(all_pred.shape)
print(np.mean(np.abs(all_pred-all_label), axis=0))
print(np.mean(np.abs(all_pred-all_label)))
|
the-stack_106_25901 | # --------------------------------------------------------------------
# Copyright (c) iEXBase. All rights reserved.
# Licensed under the MIT License.
# See License.txt in the project root for license information.
# --------------------------------------------------------------------
import binascii
import codecs
import string
from typing import (
Any,
AnyStr
)
# Type ignored for `codecs.decode()` due to lack of mypy support for 'hex' encoding
# https://github.com/python/typeshed/issues/300
from trx_utils.types import (
is_text,
is_string
)
def decode_hex(value: str) -> bytes:
if not is_text(value):
raise TypeError("Value must be an instance of str")
return codecs.decode(remove_0x_prefix(value), "hex") # type: ignore
def encode_hex(value: AnyStr) -> str:
if not is_string(value):
raise TypeError("Value must be an instance of str or unicode")
binary_hex = codecs.encode(value, "hex") # type: ignore
return add_0x_prefix(binary_hex.decode("ascii"))
def is_0x_prefixed(value: Any) -> bool:
if not is_text(value):
raise TypeError(
"is_0x_prefixed requires text typed arguments. Got: {0}".format(repr(value))
)
return value.startswith("0x") or value.startswith("0X")
def remove_0x_prefix(value: str) -> str:
if is_0x_prefixed(value):
return value[2:]
return value
def add_0x_prefix(value: str) -> str:
if is_0x_prefixed(value):
return value
return "0x" + value
def is_hex(value: Any) -> bool:
if not is_text(value):
raise TypeError(
"is_hex requires text typed arguments. Got: {0}".format(repr(value))
)
elif value.lower() == "0x":
return True
unprefixed_value = remove_0x_prefix(value)
if len(unprefixed_value) % 2 != 0:
value_to_decode = "0" + unprefixed_value
else:
value_to_decode = unprefixed_value
if any(char not in string.hexdigits for char in value_to_decode):
return False
try:
value_as_bytes = codecs.decode(value_to_decode, "hex") # type: ignore
except binascii.Error:
return False
except TypeError:
return False
else:
return bool(value_as_bytes)
|
the-stack_106_25903 | """Tests that ensure the dask-based fit matches.
https://github.com/DEAP/deap/issues/75
"""
import unittest
import nose
from sklearn.datasets import make_classification
from tpot import TPOTClassifier
try:
import dask # noqa
import dask_ml # noqa
except ImportError:
raise nose.SkipTest()
class TestDaskMatches(unittest.TestCase):
def test_dask_matches(self):
with dask.config.set(scheduler='single-threaded'):
for n_jobs in [-1]:
X, y = make_classification(random_state=42)
a = TPOTClassifier(
generations=0,
population_size=5,
cv=3,
random_state=42,
n_jobs=n_jobs,
use_dask=False,
verbosity=3
)
b = TPOTClassifier(
generations=0,
population_size=5,
cv=3,
random_state=42,
n_jobs=n_jobs,
use_dask=True,
verbosity=3
)
a.fit(X, y)
b.fit(X, y)
self.assertEqual(a.score(X, y), b.score(X, y))
self.assertEqual(a.pareto_front_fitted_pipelines_.keys(),
b.pareto_front_fitted_pipelines_.keys())
self.assertEqual(a.evaluated_individuals_,
b.evaluated_individuals_)
|
the-stack_106_25904 | from typing import Optional
import discord
class DropdownSelect(discord.ui.Select['DropdownView']):
def __init__(self, options: list[discord.SelectOption], placeholder: str):
super().__init__(
placeholder=placeholder,
min_values=1,
max_values=1,
options=options
)
async def callback(self, interaction: discord.Interaction):
self.view.answer = self.values[0]
await self.view.on_timeout()
self.view.stop()
class DropdownView(discord.ui.View):
def __init__(self, options: list[discord.SelectOption], context: discord.ApplicationContext, placeholder: str='\u200b', *, public: bool=False, timeout: int=60):
super().__init__(timeout=timeout)
self.ctx = context
self.public = public
self.options = options
self.add_item(DropdownSelect(options, placeholder))
self.answer = None
async def interaction_check(self, interaction: discord.Interaction):
if self.public == True or interaction.channel.type == discord.ChannelType.private:
return True
return interaction.user == self.ctx.author
async def on_timeout(self):
self.clear_items()
await self.ctx.edit(view=self)
|
the-stack_106_25905 | import hassapi as hass
import requests
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
"""
Get detailed Yr weather data
Arguments:
- event: Entity name when publishing event
- interval: Update interval, in minutes. Must be at least 10
- source: Yr xml source
- hours: Number of hours to forecast, at most 48
"""
disclaimer = "Weather forecast from Yr, delivered by the Norwegian Meteorological Institute and NRK"
user_agent = "HomeAssistant/Appdaemon Python/requests"
class Yr(hass.Hass):
def initialize(self):
self.url = self.args["source"]
self.entity = self.args["event"]
self.hours = self.args["hours"]
inOneMinute = datetime.now() + timedelta(minutes=1)
interval = int(self.args["interval"])
if interval < 10:
raise Exception("Update interval ({}) must be at least 10 minutes".format(interval))
# delay first launch with one minute, run every 'interval' minutes
self.run_every(self.updateState, inOneMinute, interval * 60)
def updateState(self, kwargs):
forecast = self.fetchForecast()
self.set_app_state(self.entity, {"state": "", "attributes": forecast})
def fetchData(self):
res = requests.get(self.url, headers={"User-Agent": user_agent})
return res.text
def fetchForecast(self):
data = self.fetchData()
root = ET.fromstring(data)
periods = root.find(".//tabular")
return {
"disclaimer": disclaimer,
"forecast": [
{
"from": x.get("from"),
"to": x.get("to"),
"weather": x.find("symbol").get("name"),
"symbol": x.find("symbol").get("var"),
"precip": x.find("precipitation").get("value"),
"windSpeed": x.find("windSpeed").get("mps"),
"windDirection": x.find("windDirection").get("deg"),
"temp": x.find("temperature").get("value"),
}
for x in periods[: self.hours]
],
}
|
the-stack_106_25907 | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os.path
import re
from generator import ucfirst, Generator
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
_PRIMITIVE_TO_CPP_NAME_MAP = {
'boolean': 'bool',
'integer': 'int',
'number': 'double',
'string': 'String',
'object': 'JSON::Object',
'array': 'JSON::Array',
'any': 'JSON::Value'
}
class CppGenerator(Generator):
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
def protocol_name(self):
return self.model().framework.setting('cpp_protocol_group', '')
def helpers_namespace(self):
return '%sHelpers' % self.protocol_name()
# Miscellaneous text manipulation routines.
@staticmethod
def cpp_getter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'getObject'
if isinstance(_type, ArrayType):
return 'getArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() is 'integer':
return 'getInteger'
elif _type.raw_name() is 'number':
return 'getDouble'
elif _type.raw_name() is 'any':
return 'getValue'
else:
return 'get' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_getter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_getter_method_for_type(_type.primitive_type)
@staticmethod
def cpp_setter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'setObject'
if isinstance(_type, ArrayType):
return 'setArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() is 'integer':
return 'setInteger'
elif _type.raw_name() is 'number':
return 'setDouble'
elif _type.raw_name() is 'any':
return 'setValue'
else:
return 'set' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_setter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_setter_method_for_type(_type.primitive_type)
# Generate type representations for various situations.
@staticmethod
def cpp_protocol_type_for_type(_type):
if isinstance(_type, ObjectType) and len(_type.members) == 0:
return 'JSON::Object'
if isinstance(_type, ArrayType):
if _type.raw_name() is None: # Otherwise, fall through and use typedef'd name.
return 'Inspector::Protocol::Array<%s>' % CppGenerator.cpp_protocol_type_for_type(_type.element_type)
if isinstance(_type, (ObjectType, AliasedType, EnumType, ArrayType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if isinstance(_type, PrimitiveType):
return CppGenerator.cpp_name_for_primitive_type(_type)
@staticmethod
def cpp_protocol_type_for_type_member(type_member, object_declaration):
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return '::'.join([CppGenerator.cpp_protocol_type_for_type(object_declaration.type), ucfirst(type_member.member_name)])
else:
return CppGenerator.cpp_protocol_type_for_type(type_member.type)
@staticmethod
def cpp_type_for_unchecked_formal_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through to primitive.
# This handles the 'any' type and objects with defined properties.
if isinstance(_type, ObjectType) or _type.qualified_name() is 'object':
cpp_name = 'JSON::Object'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, ArrayType):
cpp_name = 'JSON::Array'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return 'const %s* const' % cpp_name
elif _type.raw_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
return "unknown_unchecked_formal_in_parameter_type"
@staticmethod
def cpp_type_for_checked_formal_event_parameter(parameter):
return CppGenerator.cpp_type_for_type_with_name(parameter.type, parameter.parameter_name, parameter.is_optional)
@staticmethod
def cpp_type_for_type_member(member):
return CppGenerator.cpp_type_for_type_with_name(member.type, member.member_name, False)
@staticmethod
def cpp_type_for_type_with_name(_type, type_name, is_optional):
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if is_optional:
return 'const %s* const' % builder_type
elif _type.aliased_type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type.aliased_type)
elif _type.aliased_type.qualified_name() in ['string']:
return 'const %s&' % builder_type
else:
return builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['object']:
return 'RefPtr<JSON::Object>'
elif _type.qualified_name() in ['any']:
return 'RefPtr<JSON::Value>'
elif is_optional:
return 'const %s* const' % cpp_name
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
enum_type_name = ucfirst(type_name)
else:
enum_type_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if is_optional:
return '%s*' % enum_type_name
else:
return '%s' % enum_type_name
@staticmethod
def cpp_type_for_formal_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Inspector::Protocol::OptOutput<%s>*" % cpp_name
else:
return '%s*' % cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s*' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s*' % (_type.type_domain().domain_name, _type.raw_name())
raise ValueError("unknown formal out parameter type.")
# FIXME: this is only slightly different from out parameters; they could be unified.
@staticmethod
def cpp_type_for_formal_async_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Inspector::Protocol::OptOutput<%s>*" % cpp_name
elif _type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type)
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
raise ValueError("Unknown formal async parameter type.")
# In-parameters don't use builder types, because they could be passed
# "open types" that are manually constructed out of InspectorObjects.
# FIXME: Only parameters that are actually open types should need non-builder parameter types.
@staticmethod
def cpp_type_for_stack_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through.
if isinstance(_type, ObjectType):
return "RefPtr<JSON::Object>"
if isinstance(_type, ArrayType):
return "RefPtr<JSON::Array>"
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['any', 'object']:
return "RefPtr<%s>" % CppGenerator.cpp_name_for_primitive_type(_type)
elif parameter.is_optional and _type.qualified_name() not in ['boolean', 'string', 'integer']:
return "Inspector::Protocol::OptOutput<%s>" % cpp_name
else:
return cpp_name
@staticmethod
def cpp_type_for_stack_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if parameter.is_optional:
return "Inspector::Protocol::OptOutput<%s>" % builder_type
return '%s' % builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Inspector::Protocol::OptOutput<%s>" % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
@staticmethod
def cpp_assertion_method_for_type_member(type_member, object_declaration):
def assertion_method_for_type(_type):
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(type_member.type, AliasedType):
return assertion_method_for_type(type_member.type.aliased_type)
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type_member(type_member, object_declaration)
return assertion_method_for_type(type_member.type)
@staticmethod
def cpp_name_for_primitive_type(_type):
return _PRIMITIVE_TO_CPP_NAME_MAP.get(_type.raw_name())
# Decide whether certain helpers are necessary in a situation.
@staticmethod
def should_use_wrapper_for_return_type(_type):
return not isinstance(_type, (ArrayType, ObjectType))
@staticmethod
def should_use_references_for_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() in ["any", "object"])
@staticmethod
def should_pass_by_copy_for_return_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() == "object")
|
the-stack_106_25909 | # Tempest documentation build configuration file, created by
# sphinx-quickstart on Tue May 21 17:43:32 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
import warnings
# Build the plugin registry
def build_plugin_registry(app):
root_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
subprocess.call(['tools/generate-tempest-plugins-list.sh'], cwd=root_dir)
def setup(app):
app.connect('builder-inited', build_plugin_registry)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'oslosphinx',
'oslo_config.sphinxconfiggen',
]
config_generator_config_file = '../../tempest/cmd/config-generator.tempest.conf'
sample_config_basename = '_static/tempest'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tempest'
copyright = u'2013, OpenStack QA Team'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tempest.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(git_cmd,
stdout=subprocess.PIPE).\
communicate()[0]
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tempestdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Tempest.tex', u'Tempest Documentation',
u'OpenStack QA Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tempest', u'Tempest Documentation',
[u'OpenStack QA Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tempest', u'Tempest Documentation',
u'OpenStack QA Team', 'Tempest', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Tempest'
epub_author = u'Sean Dague'
epub_publisher = u'OpenStack QA Team'
epub_copyright = u'2013, OpenStack QA Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
the-stack_106_25912 | from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from auction_api.models import Auction
from auction_api.serializers.auction import AuctionSerializer
from auction_api.services.email import send_new_auction_notifications
class AuctionAPIView(GenericAPIView):
permission_classes = [IsAuthenticated]
serializer_class = AuctionSerializer
queryset = Auction.objects.all()
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
auction_instance = serializer.save()
send_new_auction_notifications(auction_instance)
return Response(serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(name="all", in_="query", type=openapi.TYPE_BOOLEAN),
openapi.Parameter(
name="only_active", in_="query", type=openapi.TYPE_BOOLEAN
),
openapi.Parameter(
name="only_closed", in_="query", type=openapi.TYPE_BOOLEAN
),
]
)
def get(self, request):
if len(request.query_params) > 1:
return Response(
"Using more than one parameter is not allowed",
status=status.HTTP_400_BAD_REQUEST,
)
if request.query_params.get("only_active") == "true":
self.queryset = self.get_queryset().is_active()
elif request.query_params.get("only_closed") == "true":
self.queryset = self.get_queryset().is_closed()
return Response(
self.serializer_class(instance=self.get_queryset(), many=True).data
)
|
the-stack_106_25914 |
import numpy as np
import scipy as sp
import logging
import osr
import ogr
import gdal
from shapely.geometry import Polygon
from biopal.agb.processing_AGB import (
check_intersection,
interp2d_wrapper,
merge_agb_intermediate,
compute_processing_blocs_order,
)
# %%
def sample_and_tabulate_data(
block_extents,
pixel_axis_east,
pixel_axis_north,
sampling_polygons,
stack_in_block,
stack_info_table,
stack_info_table_column_names,
formula_observables,
observable_for_forest_class,
formula_parameters,
number_of_subsets,
):
"""
(
observable_table,
observable_names
identifier_table,
identifier_names,
parameter_position_table,
parameter_position_names,
parameter_tables,
parameter_table_columns,
sample_info_table,
sample_info_table_columns,
) = sample_and_tabulate_data(
block_extents, # extent of the current area for which the table is created
pixel_axis_east, # east north axes onto which data are interpolated
pixel_axis_north,
sampling_polygons, # additional arbitrarily shaped polygons
stack_in_block, # flags whether each stack is in current block
stack_info_table, # info table with stack properties (stack id, headings, etc., defining the acquisition parameters)
stack_info_table_column_names, # column names for the abovementioned table
formula_observables,
observable_for_forest_class,
formula_parameters,
number_of_subsets, # number of subsets to use (used to allocate columns in parameter tables)
)
Reads input data images, averages over polygons, and tabulates
INPUT:
block_extents contains the extents of the current block
pixel_axis_east, pixel_axis_north are vectors defining the grid to which the read data has to be interpolated
sampling_polygons is a list of Polygon objects over which the sampling is to be done
stack_in_block is a vector of bools with the same length as the number of stacks, indicating whether each stack exists within the current block
stack_info_table is a 2D array linking stack IDs to selected identifiers
stack_info_table_column_names is the corresponding list of column names
formula_observables is a named tuple with information about formula observables
observale_for_forest_class is the name of the observable that acts as a forest_class observable
formula_parameters is a named tuple with information about formula parameters
number_of_subsets is the number subsets (independent tests) to be run
OUTPUT:
observable_table is a 2D table with observable data averaged over the polygons
observable_names is a corresponding list of observables (column names for the table above)
identifier_table is a 2D table with identifiers matching the observable_table
identifier_names is a corresponding list of identifier names (column names for the table above)
parameter_position_table is a 2D table with columnwise unique indices for each parameter
parameter_position_names is the corresponding list of column names for the table above
parameter_tables is a list of minimal-sized 2D arrays within which the estimated parameter values for each parameter will be saved (one list element for each unknown parameter)
parameter_table_columns is a corresponding list of lists with table column names
sample_info_table is a 2D table with information about each sample
sample_info_table_columns is the corresponding list with names for the columns of the table above
"""
# derived parameters
number_of_stacks = stack_info_table.shape[0]
stack_id_vector = stack_info_table[:, 0]
number_of_samples = len(sampling_polygons)
number_of_parameters = len(formula_parameters.name)
number_of_observables = len(formula_observables.name)
pixel_mesh_east, pixel_mesh_north = np.meshgrid(pixel_axis_east, pixel_axis_north)
# defining names for identifiers (sampleID & forest class ID and then all columns from stack info table)
identifier_names = [
"sample_id",
"forest_class_id",
] + stack_info_table_column_names # + ["resolution_m"]
number_of_identifiers = len(identifier_names)
identifier_table = np.nan * np.zeros(
(number_of_samples * number_of_stacks, number_of_identifiers)
)
# filling out the first column with sample IDs
identifier_table[:, 0] = np.kron(
np.arange(number_of_samples), np.ones(number_of_stacks)
)
# filling out columns 3-8 with stack IDs and corresponding other identifications from stack_info_table
identifier_table[:, 2] = np.kron(np.ones(number_of_samples), stack_id_vector)
for id_idx in range(5):
identifier_table[:, 3 + id_idx] = sp.interpolate.interp1d(
stack_id_vector, stack_info_table[:, 1 + id_idx], kind="nearest"
)(identifier_table[:, 2])
# allocate observable table
observable_table = np.nan * np.zeros(
(number_of_samples * number_of_stacks, number_of_observables)
)
sample_info_table_columns = [
"easting",
"northing",
"area",
"east_range",
"north_range",
]
sample_info_table = np.nan * np.zeros(
(number_of_samples * number_of_stacks, len(sample_info_table_columns))
)
## get info about samples (coordinates, area, shape factor)
# calculating sample area and coordinates
sample_info_table = []
for current_map, current_stat, current_name in zip(
[
pixel_mesh_east,
pixel_mesh_north,
pixel_mesh_east * 0
+ np.abs(np.diff(pixel_axis_east)[0] * np.diff(pixel_axis_north)[0]),
pixel_mesh_east,
pixel_mesh_north,
],
["mean", "mean", "sum", "range", "range"],
sample_info_table_columns,
):
logging.info("AGB: calculating sample statistic '{}'".format(current_name))
sample_info_table.append(
np.round(
stats_on_all_samples(
current_map,
0,
pixel_mesh_east,
pixel_mesh_north,
sampling_polygons,
current_stat,
)
)
)
sample_info_table = np.column_stack(sample_info_table)
sample_info_table = np.kron(sample_info_table, np.ones((number_of_stacks, 1)))
### READING OBSERVABLE DATA
# cycle through observable sources in the stack
for observable_idx in range(number_of_observables):
# number of stacks in current observable (must be either 1 or number_of_stacks)
current_number_of_stacks = len(formula_observables.source_paths[observable_idx])
is_stacked = current_number_of_stacks == number_of_stacks
# cycle over stacks
for stack_idx in range(current_number_of_stacks):
# go ahead only if current stack is (at least partially) contained in the current parameter block:
if (is_stacked and stack_in_block[stack_idx]) or not is_stacked:
# cycle over all the equi7 tiles, interpolate over pixel grid and average
source_data_interp = np.NaN * np.zeros(
(len(pixel_axis_north), len(pixel_axis_east)), dtype="float"
)
for file_idx, file_source in enumerate(
formula_observables.source_paths[observable_idx][stack_idx]
):
if (
file_source[0] != "none"
): # np.round(file_source[2])<=current_resolution:
source_data_interp_curr = interp2d_wrapper(
file_source[0],
file_source[1] + 1,
pixel_axis_east,
pixel_axis_north,
fill_value=np.NaN,
)
source_data_interp = merge_agb_intermediate(
source_data_interp,
source_data_interp_curr,
method="nan_mean",
)
# # masking the stack:
# source_data_interp[forest_class_map_interp == 0] = np.NaN
if is_stacked:
logging.info(
"AGB: sampling and transforming data for stacked observable '{}' (stack {}/{}, file {}/{})".format(
formula_observables.name[observable_idx],
stack_idx + 1,
number_of_stacks,
file_idx + 1,
len(
formula_observables.source_paths[
observable_idx
][stack_idx]
),
)
)
else:
logging.info(
"AGB: sampling and transforming data for unstacked observable '{}' (file {}/{})".format(
formula_observables.name[observable_idx],
file_idx + 1,
len(
formula_observables.source_paths[
observable_idx
][stack_idx]
),
)
)
# calculate sample statistics
temp_transformed_sampled_data = transform_function(
stats_on_all_samples(
source_data_interp,
formula_observables.source_resolution[observable_idx],
pixel_mesh_east,
pixel_mesh_north,
sampling_polygons,
formula_observables.averaging_method[observable_idx],
),
formula_observables.limits[observable_idx],
formula_observables.transform[observable_idx],
)
# check if observable is stacked (i.e., the list length equals number of stacks)
if is_stacked:
# find rows where the stack id matches the current stack id
current_rows = (
identifier_table[:, 2] == stack_id_vector[stack_idx]
) # & (identifier_table[:,-1]==current_resolution)
# fill out the table
observable_table[
current_rows, observable_idx
] = temp_transformed_sampled_data
else:
# fill out the table by replicating the averaged data
observable_table[:, observable_idx] = np.kron(
temp_transformed_sampled_data, np.ones(number_of_stacks)
)
# break if this is a required observable and all sampled data are nan
# (speeds up the reading)
if (
np.all(np.isnan(observable_table[:, observable_idx]))
& formula_observables.is_required[observable_idx]
):
logging.info(
"AGB: no data for the first required observable, skipping the rest"
)
break
# repeating it across stacks and inserting into observable data table (note: forest class assumed constant across stacks)
# identifier_table[:, 1] = np.kron(temp_forest_class_vector, np.ones(number_of_stacks))
forest_class_position = np.where(
match_string_lists(
formula_observables.name, [observable_for_forest_class]
).flatten()
>= 0
)[0]
if len(forest_class_position) == 0:
logging.error(
"AGB: cannot find forest class observable {}.".format(
observable_for_forest_class
)
)
else:
forest_class_position = forest_class_position[0]
identifier_table[:, 1] = observable_table[:, forest_class_position]
# observable_table = np.nan*observable_table # this is just a dummy thing to check the behaviour of this function in case of lack of data
# mark rows in observable data table that have negative identifiers, nan-valued sar observables, infinite sar observables, or negative agb values
invalid_rows = (
np.any(identifier_table < 0, axis=1)
| np.any(np.isnan(observable_table[:, formula_observables.is_required]), axis=1)
| np.any(
~np.isfinite(observable_table[:, formula_observables.is_required]), axis=1
)
)
# exclude invalid rows
observable_table = observable_table[~invalid_rows, :]
identifier_table = identifier_table[~invalid_rows, :]
sample_info_table = sample_info_table[~invalid_rows, :]
# number of rows in data table
number_of_rows_in_observable_table = observable_table.shape[0]
### PREPARING PARAMETER TABLES
parameter_property_names = ["lower_limit", "upper_limit", "initial_value"] + [
"estimate_%d" % (ii) for ii in np.arange(number_of_subsets) + 1
]
parameter_position_names = [
"row_" + parameter_name for parameter_name in formula_parameters.name
]
parameter_tables = []
parameter_table_columns = []
parameter_position_table = np.nan * np.zeros(
(number_of_rows_in_observable_table, number_of_parameters)
)
# creating parameter matrices
for parameter_idx, parameter_variability in enumerate(
formula_parameters.parameter_variabilities
):
# take out only the relevant identifiers (the ones that change as per parameter variability)
# and create column names by adding four additional columns: min, max and initial value, and estimated value (later set to NaN)
parameter_table_columns.append(
np.concatenate(
(
np.array(identifier_names)[parameter_variability],
np.array(parameter_property_names),
)
)
)
# create the minimal ID table (without unnecessary columns for those dimension across which the parameter doesn't change)
temp_ids_table = identifier_table[:, np.where(parameter_variability)[0]]
# create the last four columns
temp_minmax_table = np.array(
[formula_parameters.limits[parameter_idx]]
) * np.ones((number_of_rows_in_observable_table, 1))
temp_initial_table = np.mean(temp_minmax_table, axis=1)
temp_estimated_table = np.kron(
np.ones((1, number_of_subsets)),
np.array([np.mean(temp_minmax_table, axis=1)]).transpose(),
)
# create the full table
# note: this table has initially the same shape as the observable table
temp_full_table = np.column_stack(
(
temp_ids_table,
temp_minmax_table,
temp_initial_table,
temp_estimated_table,
)
)
# take out unique rows and the inverse vector recreating the rows of the observable table
# the inverse vector is critical as it relates the positions in the observable table to positions in each parameter table
temp_full_table, temp_position_in_observable_table = np.unique(
temp_full_table, axis=0, return_inverse=True
)
# set the last colum of the full table to nan (estimated value unavailable now)
temp_full_table[:, -number_of_subsets:] = np.nan
# append the table
parameter_tables.append(temp_full_table)
# include the inverse vector in the observable data table at the correct position
parameter_position_table[:, parameter_idx] = temp_position_in_observable_table
return (
observable_table,
formula_observables.name,
identifier_table,
identifier_names,
parameter_position_table,
parameter_position_names,
parameter_tables,
parameter_table_columns,
sample_info_table,
sample_info_table_columns,
)
# %%
def fit_formula_to_random_subsets(
formula_terms,
number_of_subsets,
observable_table,
observable_names,
identifier_table,
identifier_names,
parameter_position_table,
parameter_names,
parameter_tables,
parameter_table_columns,
parameter_variabilities,
calibration_fraction,
estimation_fraction,
calibration_areas_per_test,
estimation_areas_per_test,
transfer_function_name,
):
"""
(
parameter_tables,
space_invariant_parameter_table,
space_invariant_parameter_names,
space_variant_parameter_table,
space_variant_parameter_names,
) = fit_formula_to_random_subsets(
formula_terms,
number_of_subsets,
observable_table,
observable_names,
identifier_table,
identifier_names,
parameter_position_table,
parameter_names,
parameter_tables,
parameter_table_columns,
parameter_variabilities,
calibration_fraction,
estimation_fraction,
calibration_areas_per_test,
estimation_areas_per_test,
transfer_function_name,
)
Fits space invariant and space variant parameters to the data by minimising the residuals defined in the formulas
INPUT:
formula_terms is a named tuple with formula strings and associated formula weights
number_of_subsets is the number of random subsets to be created
observable_table is a 2D numpy array with observable data
observable_names is a corresponding list of observable names
identifier_table is a 2D numpy array with identifiers
identifier_names is a corresponding list of identifier names
parameter_position_table is a 2D numpy array with indices indicating how the parameters to be estimated vary across samples
parameter_names is a corresonding list of parameter names
parameter_tables is a list of parameter tables, each containing the minimal number of rows and columns for each parameter, where the rows contain individual parameter values and columns contain estimates from each subset and required identifiers
parameter_columns is a list of lists with parameter column names
parameter_variabilities contains the variabilities of each parameter across the predefined dimensions
calibration_fraction is the fraction of samples with all required and non-requried data to be used in each subset
estimation_fraction is the fraction of all samples with all required, but with some missing non-required data to be used in each subset
calibration_areas_per_test is the minimal number of calibration areas required for this function to be run
estimation_areas_per_test is the minimal number of esti9mation areas required for this function to be run
transfer_function_name is the name of the transfer function used in this function
OUTPUT:
parameter_tables is the updated list of parameter tables
space_invariant_parameter_table is a 2D numpy array on the same format as observable_table (same number of rows), with the estimated space invariant parameter values replicated as parameter_position_table
space_invariant_parameter_names is a corresponding list of space invariant parameter names
space_variant_parameter_table is a 2D numpy array on the same format as observable_table (same number of rows), with the estimated space variant parameter values replicated as parameter_position_table
space_variant_parameter_names is a corresponding list of space variant parameter names
"""
### minimal and maximal number of tests with different initial start values (assess local minima and avoids occasional failures)
min_max_number_of_tests = [3,30]
### CREATE CALIBRATION AND ESTIMATION SUBSETS
logging.info("AGB: creating {} subsets".format(number_of_subsets))
# select rows with available agb information as calibration data and those without as estimation data
columns_with_nans = np.all(np.isnan(observable_table), axis=0)
calibration_rows = np.where(
np.all(~np.isnan(observable_table[:, ~columns_with_nans]), axis=1)
)[0]
estimation_rows = np.where(
np.any(np.isnan(observable_table[:, ~columns_with_nans]), axis=1)
)[0]
calibration_sample_ids = np.unique(identifier_table[calibration_rows, 0])
estimation_sample_ids = np.unique(identifier_table[estimation_rows, 0])
# calculate subset sizes
estimation_subset_size = np.int32(
np.ceil(len(estimation_sample_ids) * estimation_fraction)
)
calibration_subset_size = np.int32(
np.ceil(len(calibration_sample_ids) * calibration_fraction)
)
# find random data subsetting vectors making sure that the number of calibration and estimation areas
# is the same in all
subset_indexing_vectors = []
number_of_accepted_subsets = 0
max_number_of_subsets_to_test = number_of_subsets * 10
tested_subset_counter = 0
while (number_of_accepted_subsets < number_of_subsets) & (
tested_subset_counter < max_number_of_subsets_to_test
):
# create a random subset of calibration and estimation samples
current_random_estimation_subset = np.sort(
np.random.permutation(estimation_sample_ids)[:estimation_subset_size]
)
current_random_calibration_subset = np.sort(
np.random.permutation(calibration_sample_ids)[:calibration_subset_size]
)
# calculate the minimal number of calibration and estimation samples for the space-invariant parameters
# (for the latter, we use the column with parameter positions in parameter tables - the same value indicates the same parameter)
current_parameter_position_columns = np.where(
~np.row_stack(parameter_variabilities)[:, 0]
)[0]
current_calibration_rows = np.isin(
identifier_table[:, 0], current_random_calibration_subset
)
min_number_of_calibration_measurements_per_space_invariant_parameter = np.inf
for column_idx in current_parameter_position_columns:
# calculate the minimal number of samples for all parameter values within this column and all different parameters until the current one
if np.any(current_calibration_rows):
min_number_of_calibration_measurements_per_space_invariant_parameter = np.minimum(
min_number_of_calibration_measurements_per_space_invariant_parameter,
np.min(
np.unique(
parameter_position_table[
current_calibration_rows, column_idx
],
return_counts=True,
)[1]
),
)
else:
min_number_of_calibration_measurements_per_space_invariant_parameter = 0
current_estimation_rows = np.isin(
identifier_table[:, 0], current_random_estimation_subset
)
min_number_of_estimation_measurements_per_space_invariant_parameter = np.inf
for column_idx in current_parameter_position_columns:
# calculate the minimal number of samples for all parameter values within this column and all different parameters until the current one
if np.any(current_estimation_rows):
min_number_of_estimation_measurements_per_space_invariant_parameter = np.minimum(
min_number_of_estimation_measurements_per_space_invariant_parameter,
np.min(
np.unique(
parameter_position_table[
current_estimation_rows, column_idx
],
return_counts=True,
)[1]
),
)
else:
min_number_of_estimation_measurements_per_space_invariant_parameter = 0
# if the minimal number of samples is larger than the one specified in the xml configuration file, accept this subset
# (at the moment, we don't perform other tests, which means that subsets may be repeated)
if (
min_number_of_calibration_measurements_per_space_invariant_parameter
>= calibration_areas_per_test
) & (
min_number_of_estimation_measurements_per_space_invariant_parameter
>= estimation_areas_per_test
):
subset_indexing_vectors.append(
np.isin(
identifier_table[:, 0],
np.sort(
np.concatenate(
(
current_random_calibration_subset,
current_random_estimation_subset,
)
)
),
)
)
number_of_accepted_subsets += 1
tested_subset_counter += 1
# subset success flag
subset_success = np.ones(number_of_subsets) == 0
if number_of_accepted_subsets == 0:
logging.info("AGB: no accepted subsets found in current block.")
space_invariant_parameter_table = []
space_invariant_parameter_names = []
space_variant_parameter_table = []
space_variant_parameter_names = []
else:
if number_of_accepted_subsets < number_of_subsets:
logging.warning(
"AGB: number of accepted subsets ({}) is less than the required number of subsets ({}).".format(
number_of_accepted_subsets, number_of_subsets
)
)
# detect observables without data (used to remove formula terms that would generate nans)
observables_without_data = [
observable_name
for is_nan, observable_name in zip(
np.all(np.isnan(observable_table), axis=0), observable_names
)
if is_nan
]
terms_with_nan_observables = np.any(
match_string_lists(formula_terms.string, observables_without_data) >= 0,
axis=1,
)
if np.any(terms_with_nan_observables):
logging.warning(
"AGB: skipping formula terms: {} in steps 1-3 due to lack of useful data for observables: {}.".format(
", ".join(
[
"%s" % (curr_name)
for curr_name in subset_iterable(
formula_terms.name, terms_with_nan_observables
)
]
),
", ".join(observables_without_data),
)
)
### FIRST, WE PERFORM THE ESTIMATION OF PARAMETERS FOR SUBSETS
logging.info("AGB: parameter estimation step 1: estimation for subsets")
#### select relevant formula and weights for this step
terms_with_zero_weight_step1 = (
np.array(formula_terms.formula_weights.step1) == 0
)
if np.any(terms_with_zero_weight_step1):
logging.warning(
"AGB: skipping formula terms: {} in step 1 due to zero weights.".format(
", ".join(
[
"%s" % (curr_name)
for curr_name in subset_iterable(
formula_terms.name, terms_with_zero_weight_step1
)
]
),
)
)
terms_to_take_step1 = ~(
terms_with_nan_observables | terms_with_zero_weight_step1
)
formula_step1 = subset_iterable(formula_terms.string, terms_to_take_step1)
formula_names_step1 = subset_iterable(formula_terms.name, terms_to_take_step1)
formula_weights_step1 = subset_iterable(
formula_terms.formula_weights.step1, terms_to_take_step1
)
# find observables and parameters in formula and create
# vectors for selecting parameters and observables that exist in formula
observables_in_formula_step1 = np.any(
match_string_lists(formula_step1, observable_names) >= 0, axis=0
)
observables_in_parameters = np.any(
match_string_lists(parameter_names, observable_names) >= 0, axis=0
)
parameters_in_formula_step1 = np.any(
match_string_lists(formula_step1, parameter_names) >= 0, axis=0
)
# find parameters that do not change between samples
space_invariant_parameters = (
False == np.column_stack(parameter_variabilities)[0, :]
)
# some more flag vectors
space_invariant_parameters_in_formula_step1 = (
space_invariant_parameters & parameters_in_formula_step1
)
# loop through calibration subsets
for subset_idx, current_subset in enumerate(subset_indexing_vectors):
logging.info(
"AGB: minimising cost function for subset {} out of {}...".format(
subset_idx + 1, number_of_subsets
)
)
# subset parameter and observable tables
current_parameter_position_table = parameter_position_table[
current_subset, :
][:, parameters_in_formula_step1]
current_parameter_names = subset_iterable(
parameter_names, parameters_in_formula_step1, False
)
current_observable_table = observable_table[current_subset, :][
:, observables_in_formula_step1
]
current_observable_names = subset_iterable(
observable_names, observables_in_formula_step1, False
)
# create min-max tables
individual_parameter_min_max_tables = subset_iterable(
parameter_tables, parameters_in_formula_step1, False
)
for parameter_idx in range(len(individual_parameter_min_max_tables)):
individual_parameter_min_max_tables[
parameter_idx
] = individual_parameter_min_max_tables[parameter_idx][
:, -number_of_subsets - 3 : -number_of_subsets - 1
]
# estimate both parameters and AGB for the subset
(current_lut_all_parameters, _, curr_cost_function_values) = fit_formula_to_table_data(
formula_step1,
formula_names_step1,
formula_weights_step1,
current_observable_table, # subset_iterable(current_observable_table.transpose(),~np.all(np.isnan(current_observable_table),axis=0),return_array=True).transpose(),
current_observable_names, # subset_iterable(current_observable_names,~np.all(np.isnan(current_observable_table),axis=0)),
current_parameter_position_table,
current_parameter_names,
individual_parameter_min_max_tables,
transfer_function_name,
min_max_number_of_tests,
)
# check if success
subset_success[subset_idx] = ~np.any(np.isnan(curr_cost_function_values))
# fill out parameter tables with estimates of space invariant parameters
for current_parameter_idx in np.where(
space_invariant_parameters_in_formula_step1
)[0]:
# identify column in the current output table
current_column_idx = np.where(
np.array(current_parameter_names)
== np.array(parameter_names[current_parameter_idx])
)[0][0]
# identify valid rows in the current output lut
current_rows = (
current_lut_all_parameters[:, 1] == current_column_idx
) & (
np.abs(
current_lut_all_parameters[:, -2]
- current_lut_all_parameters[:, -1]
)
> 1e-10 # checking if the parameter has changed from the initial value
)
# write the relevant rows of the parameter table
parameter_tables[current_parameter_idx][
np.int32(current_lut_all_parameters[current_rows, 2]),
-number_of_subsets + subset_idx,
] = current_lut_all_parameters[current_rows, -1]
### THEN, WE ESTIMATE SPACE VARIANT PARAMETERS FOR ALL SAMPLES USING SPACE INVARIANT PARAMETERS FROM SUBSETS
logging.info(
"AGB: parameter estimation step 2: estimation of AGB from subset estimated parameters"
)
#### select relevant formula and weights for this step
terms_with_zero_weight_step2 = (
np.array(formula_terms.formula_weights.step2) == 0
)
if np.any(terms_with_zero_weight_step2):
logging.warning(
"AGB: skipping formula terms: {} in step 2 due to zero weights.".format(
", ".join(
[
"%s" % (curr_name)
for curr_name in subset_iterable(
formula_terms.name, terms_with_zero_weight_step2
)
]
),
)
)
terms_to_take_step2 = ~(
terms_with_nan_observables | terms_with_zero_weight_step2
)
formula_step2 = subset_iterable(formula_terms.string, terms_to_take_step2)
formula_names_step2 = subset_iterable(formula_terms.name, terms_to_take_step2)
formula_weights_step2 = subset_iterable(
formula_terms.formula_weights.step2, terms_to_take_step2
)
observables_in_formula_step2 = np.any(
match_string_lists(formula_step2, observable_names) >= 0, axis=0
)
parameters_in_formula_step2 = np.any(
match_string_lists(formula_step2, parameter_names) >= 0, axis=0
)
space_invariant_parameters_in_formula_step2 = (
space_invariant_parameters & parameters_in_formula_step2
)
space_variant_parameters_in_formula_step2 = (
~space_invariant_parameters & parameters_in_formula_step2
)
observables_in_formula_step2_not_in_parameters = (
observables_in_formula_step2 & ~observables_in_parameters
)
# loop through calibration subsets
for subset_idx in range(number_of_subsets):
if not subset_success[subset_idx]:
logging.warning(
"AGB: skipping space-invariant parameter set {} out of {}. Proceed with caution. ".format(
subset_idx + 1, number_of_subsets
)
)
continue
logging.info(
"AGB: estimating space-variant parameters for all samples using space-invariant parameter set {} out of {}...".format(
subset_idx + 1, number_of_subsets
)
)
# create a table with all space invariant parameters
space_invariant_parameter_names = subset_iterable(
parameter_names, space_invariant_parameters_in_formula_step2, False
)
current_space_invariant_parameter_table = []
for current_parameter_idx in np.where(
space_invariant_parameters_in_formula_step2
)[0]:
# extract current parameter table
current_parameter_table = parameter_tables[current_parameter_idx]
# extract the current position vector
current_parameter_position_vector = np.int32(
parameter_position_table[:, current_parameter_idx]
)
# extract the column with the current subset estimates
current_column_in_parameter_table = -number_of_subsets + subset_idx
# add the current data
current_space_invariant_parameter_table.append(
current_parameter_table[
current_parameter_position_vector,
current_column_in_parameter_table,
]
)
current_space_invariant_parameter_table = np.column_stack(
current_space_invariant_parameter_table
)
# these parameters are now treated as observables, so
# space invariant parameter table is merged with observable table
new_observable_table = np.column_stack(
(
observable_table[:, observables_in_formula_step2_not_in_parameters],
current_space_invariant_parameter_table,
)
)
new_observable_names = (
subset_iterable(
observable_names,
observables_in_formula_step2_not_in_parameters,
False,
)
+ space_invariant_parameter_names
)
# now, only the space-variant parameters are treated us unknown parameters
# the corresponding tables and lists are now created
new_parameter_position_table = parameter_position_table[
:, space_variant_parameters_in_formula_step2
]
new_parameter_names = subset_iterable(
parameter_names, space_variant_parameters_in_formula_step2, False
)
new_individual_parameter_min_max_tables = subset_iterable(
parameter_tables, space_variant_parameters_in_formula_step2, False
)
for parameter_idx in range(len(new_individual_parameter_min_max_tables)):
new_individual_parameter_min_max_tables[
parameter_idx
] = new_individual_parameter_min_max_tables[parameter_idx][
:, -number_of_subsets - 3 : -number_of_subsets - 1
]
# estimate space variant parameters for all samples
(
current_lut_space_variant_parameters,
space_variant_parameter_table,
curr_cost_function_values
) = fit_formula_to_table_data(
formula_step2,
formula_names_step2,
formula_weights_step2,
new_observable_table,
new_observable_names,
new_parameter_position_table,
new_parameter_names,
new_individual_parameter_min_max_tables,
transfer_function_name,
min_max_number_of_tests,
)
# check if success
subset_success[subset_idx] = ~np.any(np.isnan(curr_cost_function_values))
# fill out parameter tables with estimates of space invariant parameters
for current_parameter_idx in np.where(
space_variant_parameters_in_formula_step2
)[0]:
# identify column
current_column_idx = np.where(
np.array(new_parameter_names)
== np.array(parameter_names[current_parameter_idx])
)[0][0]
# identify valid rows
current_rows = (
current_lut_space_variant_parameters[:, 1] == current_column_idx
) & (
np.abs(
current_lut_space_variant_parameters[:, -2]
- current_lut_space_variant_parameters[:, -1]
)
> 1e-10
)
# save current estimates to parameter tables
parameter_tables[np.int32(current_parameter_idx)][
np.int32(current_lut_space_variant_parameters[current_rows, 2]),
-number_of_subsets + subset_idx,
] = current_lut_space_variant_parameters[current_rows, -1]
if np.all(subset_success):
logging.info("AGB: all subsets passed steps 1 and 2. Continuing with step 3.")
else:
if np.any(subset_success):
logging.warning("AGB: {} out of {} subsets passed steps 1 and 2. Continuing with step 3. Proceed with care.".format(np.sum(subset_success),len(subset_success)))
else:
logging.info("AGB: no subsets passed steps 1 and 2. Skipping step 3.")
if np.any(subset_success):
### FINALLY, WE ESTIMATE SPACE INVARIANT PARAMETERS USING THE AVERAGE SPACE VARIANT PARAMETER VALUES FROM ALL SUBSETS FOR ALL SAMPLES
logging.info(
"AGB: parameter estimation step 3: fitting space-invariant parameters using space-variant parameter estimate"
)
terms_with_zero_weight_step3 = (
np.array(formula_terms.formula_weights.step3) == 0
)
if np.any(terms_with_zero_weight_step3):
logging.warning(
"AGB: skipping formula terms: {} in step 3 due to zero weights.".format(
", ".join(
[
"%s" % (curr_name)
for curr_name in subset_iterable(
formula_terms.name, terms_with_zero_weight_step3
)
]
),
)
)
terms_to_take_step3 = ~(
terms_with_nan_observables | terms_with_zero_weight_step3
)
formula_step3 = subset_iterable(formula_terms.string, terms_to_take_step3)
formula_names_step3 = subset_iterable(formula_terms.name, terms_to_take_step3)
formula_weights_step3 = subset_iterable(
formula_terms.formula_weights.step3, terms_to_take_step3
)
observables_in_formula_step3 = np.any(
match_string_lists(formula_step3, observable_names) >= 0, axis=0
)
parameters_in_formula_step3 = np.any(
match_string_lists(formula_step3, parameter_names) >= 0, axis=0
)
space_invariant_parameters_in_formula_step3 = (
space_invariant_parameters & parameters_in_formula_step3
)
space_variant_parameters_in_formula_step3 = (
~space_invariant_parameters & parameters_in_formula_step3
)
observables_in_formula_step3_not_in_parameters = (
observables_in_formula_step3 & ~observables_in_parameters
)
# names for table columns
space_variant_parameter_names = subset_iterable(
parameter_names, space_variant_parameters_in_formula_step3, False
)
# create table with all space invariant parameters
current_space_variant_parameter_table = []
for position_in_parameter_table_list in np.where(
space_variant_parameters_in_formula_step3
)[0]:
current_parameter_table = parameter_tables[position_in_parameter_table_list]
current_parameter_position_vector = np.int32(
parameter_position_table[:, position_in_parameter_table_list]
)
current_columns_in_parameter_table = np.arange(-number_of_subsets, 0)
current_space_variant_parameter_table.append(
np.mean(
current_parameter_table[current_parameter_position_vector, :][
:, current_columns_in_parameter_table
],
axis=1,
)
)
current_space_variant_parameter_table = np.column_stack(
current_space_variant_parameter_table
)
# new observable table is the combination of observable table and space variant parameter table
new_observable_table = np.column_stack(
(
observable_table[:, observables_in_formula_step3_not_in_parameters],
current_space_variant_parameter_table,
)
)
new_observable_names = (
subset_iterable(
observable_names, observables_in_formula_step3_not_in_parameters, False
)
+ space_variant_parameter_names
)
new_parameter_position_table = parameter_position_table[
:, space_invariant_parameters_in_formula_step3
]
new_parameter_names = subset_iterable(
parameter_names, space_invariant_parameters_in_formula_step3, False
)
new_individual_parameter_min_max_tables = subset_iterable(
parameter_tables, space_invariant_parameters_in_formula_step3, False
)
for parameter_idx in range(len(new_individual_parameter_min_max_tables)):
new_individual_parameter_min_max_tables[
parameter_idx
] = new_individual_parameter_min_max_tables[parameter_idx][
:, -number_of_subsets - 3 : -number_of_subsets - 1
]
# estimate space variant parameters for all samples
(_, space_invariant_parameter_table, _,) = fit_formula_to_table_data(
formula_step3,
formula_names_step3,
formula_weights_step3,
new_observable_table,
new_observable_names,
new_parameter_position_table,
new_parameter_names,
new_individual_parameter_min_max_tables,
transfer_function_name,
min_max_number_of_tests,
)
else:
space_invariant_parameter_table = []
space_invariant_parameter_names = []
space_variant_parameter_table = []
space_variant_parameter_names = []
return (
parameter_tables,
space_invariant_parameter_table,
space_invariant_parameter_names,
space_variant_parameter_table,
space_variant_parameter_names,
)
# %% functions needed for function above
# swap variable names in formulas to slices of an array
def swap_names_and_merge_formula(
original_formulas,
observable_names,
parameter_names,
new_table_name,
use_observable_if_repeated_and_available=True,
):
"""
new_formulas = swap_names_and_merge_formula(
original_formulas, observable_names, parameter_names, new_table_name, use_observable_if_repeated_and_available=True
)
Swaps observable names and parameter names in the original formulas for slices of a table. Column names are determined by position of parameter/observable names in the combined list
of observable and parameter names
INPUT:
original_formulas is a list with formula strings
observable_names is a list of observable names
parameter_names is a list of parameter names
new_table_name is the name for the new table
use_observable_if_repeated_and_available indicates whether observables should be prioritised if the same name occurs both in the parameter_names and observable_names lists
OUTPUT:
new_formulas is a list of formula strings where the observable/parameter names have been replaced with slices of the new table
"""
original_variable_names = observable_names + parameter_names
unique_variable_names, name_counts = np.unique(
np.array(original_variable_names), return_counts=True
)
new_formula = []
for current_formula in original_formulas:
for unique_variable_name, name_count in zip(unique_variable_names, name_counts):
if name_count == 1:
position_in_variable_names_vector = np.where(
np.array(original_variable_names) == unique_variable_name
)[0][0]
elif name_count == 2:
position_in_variable_names_vector = np.where(
np.array(original_variable_names) == unique_variable_name
)[0][np.int32(~use_observable_if_repeated_and_available)]
current_formula = current_formula.replace(
unique_variable_name,
new_table_name + ("[:,%d]" % (position_in_variable_names_vector)),
)
new_formula.append(current_formula)
return new_formula
# %%
# function for converting columnwise indices (which are repeated within the same column if they represent identical values,
# but which may be repeated across different columns without meaning that they represent identical values)
# to unique indices (which are only repeated within the same table if they are to have identical values)
def regularise_indices(columnwise_index_table):
"""
regularised_index_table = regularise_indices(columnwise_index_table)
Converts columnwise indices to global indices.
INPUT:
columnwise_index_table is a 2D numpy array with different numbers indicating different parameter values within each column
OUTPUT:
regularised_index_table is a 2D numpy array with different numbers indicating different parameter values within the entire table
"""
offset = 0
unique_index_table = [] # position in a single x-vector
columnwise_to_unique_index_lut = (
[]
) # lut for converting between parameter id and column and parameter position in x
for column_idx, parameter_column in enumerate(columnwise_index_table.transpose()):
# add -1 at the beginning to avoid vectors with one element (which will not work with interp1d)
old_indices = np.concatenate((-1 * np.ones(1), np.unique(parameter_column)))
# new indices is a simple sequence from 0 to number of parameters-1 + offset due to previous parameters
new_indices = np.arange(len(old_indices)) - 1 + offset
# convert parameter indices and add to the list
unique_index_table.append(
sp.interpolate.interp1d(old_indices, new_indices, kind="nearest")(
parameter_column
)
)
# save the lut, removing the first, unnecessary element
columnwise_to_unique_index_lut.append(
np.column_stack(
(
new_indices[old_indices > -1],
column_idx + np.zeros(len(old_indices[old_indices > -1])),
old_indices[old_indices > -1],
)
)
)
# update offset based on current parameter column
offset = np.max(new_indices) + 1
# convert the list of vectors to an array
unique_index_table = np.int32(np.column_stack(unique_index_table))
# stack all luts to one lut
columnwise_to_unique_index_lut = np.row_stack(columnwise_to_unique_index_lut)
# # length of beta vector
# length_of_p_vector = columnwise_to_unique_index_lut.shape[0]
return unique_index_table, columnwise_to_unique_index_lut
# %%
def cost_function(
x_vector,
converted_formulas,
formula_weights,
observable_table,
index_table,
name_of_table_in_converted_formula,
transfer_function,
return_one_value=True,
):
"""
cost_function_value = cost_function(
x_vector, converted_formulas, formula_weights, observable_table, index_table, name_of_table_in_converted_formula, transfer_function, return_one_value=True
)
Calculates the cost function value from a one-dimensional vector
INPUT:
x_vector is a one-dimensional vector as required by the sp.optimize.minimize function
converted_formulas is a list of formula strings converted to use columns of one single table
formula_weights is a list of formula weights
observable_table is a 2D numpy array with observables
index_table is a 2D numpy array with parameter indices in x_vector
name_of_table_in_converted_formula is a string with the name that is used in the converted formulas for the combined table
transfer_function is a string with the transfer function type
return_one_value is a bool indicating if one value should be returned (default: True), or if the function should return one value for each formula term (False)
OUTPUT:
cost_function_value is either a scalar or an array of the same length as the list of formula strings
"""
p_vector = transfer_function(x_vector)
table_in_converted_formula = np.column_stack(
(observable_table, p_vector[index_table])
)
final_expression = "0"
for converted_formula, formula_weight in zip(converted_formulas, formula_weights):
final_expression += ",%.18f*np.nanmean((%s)**2)" % (
formula_weight / np.sum(formula_weights) * len(formula_weights),
converted_formula,
)
final_expression = "np.array([%s])" % (final_expression)
total_cost = eval(
final_expression,
{name_of_table_in_converted_formula: table_in_converted_formula, "np": np},
)[1:]
if return_one_value:
return np.sqrt(np.nansum(total_cost))
else:
return total_cost
# %%
def fit_formula_to_table_data(
original_formula,
formula_names,
formula_weights,
observable_table,
observable_table_column_names,
parameter_position_table,
parameter_position_table_column_names,
individual_parameter_min_max_tables,
transfer_function_name,
min_and_max_tests,
):
"""
estimated_parameter_lut, estimated_parameter_table, cost_function_values = fit_formula_to_table_data(
original_formula,
formula_names,
formula_weights,
observable_table,
observable_table_column_names,
parameter_position_table,
parameter_position_table_column_names,
individual_parameter_min_max_tables,
transfer_function_name,
min_and_max_tests,
)
Performs minimisation of the cost function defined by a formula and weights and returns a look-up-table with the estimated parameters, an estimated parameter table, and cost function values
INPUT:
original_formula is a list of formula strings
formula_names is a list of formula names (to be used in logging)
formula_weights is a nested list of formula weights
observable_table is a numpy array with the observables
observable_table_column_names is a list of strings with names for each of the observables in the observable table
parameter_position_table is a numpy array matching observable_table in number of rows with the positions of each unknown parameter in its unique table
parameter_position_table_column_names is a list of strings with the names for each column in parameter_position_table
individual_parameter_min_max_tables is a list of numpy arrays for each parameter, with min and max values for each parameter
transfer_function_name is a string with the transfer function to be used
min_and_max_tests is a 2-element list with the required minimum number of tests that need to be run with different start values and the maximal allowed number of tests to be run
OUTPUT:
estimated_parameters_lut is a minimal-size look-up-table with the estimated parameter values
estimated_parameter_table is a table matching parameter_position_table in size, where the parameters have been distributed across rows and columns using the estimated_parameters_lut
cost_function_values is a list of values for each cost function, including the weights
"""
# convert the columnwise indices in "parameter_position_table" to unique indices
unique_index_table, columnwise_to_unique_index_lut = regularise_indices(
parameter_position_table
)
# number of unique parameters to estimate
length_of_p_vector = columnwise_to_unique_index_lut.shape[0]
# create a table of min and max parameter values (requires looping through parameter tables and extracting relevant rows)
# this allows a possible flexible setting of intervals in the future
p_min_max_table = np.nan * np.zeros((length_of_p_vector, 2))
for parameter_idx, individual_parameter_min_max_table in enumerate(
individual_parameter_min_max_tables
):
current_rows_in_lut = columnwise_to_unique_index_lut[:, 1] == parameter_idx
current_positions_in_individual_parameter_table = np.int32(
columnwise_to_unique_index_lut[current_rows_in_lut, 2]
)
p_min_max_table[current_rows_in_lut, :] = individual_parameter_min_max_table[
current_positions_in_individual_parameter_table, :
]
# extract min, max, intiial values
p_lower = p_min_max_table[:, 0]
p_upper = p_min_max_table[:, 1]
# the name of the table that is used in the original_formula string
# (could be anything as long as the same string is used in swap_names_in_original_formula and cost_function)
table_name_in_converted_formula = "current_data_table"
# find rows for which all observable data exist
# rows_with_all_observables = np.all(~np.isnan(observable_table), axis=1)
# in this case, the formula should use the first occurence of the same quantity, if it is observed in both "observables" and "parameters"
converted_formula = swap_names_and_merge_formula(
original_formula,
observable_table_column_names,
parameter_position_table_column_names,
table_name_in_converted_formula,
use_observable_if_repeated_and_available=True,
)
cost_function_arguments = (
converted_formula,
formula_weights,
observable_table,
unique_index_table,
table_name_in_converted_formula,
lambda x: parameter_transfer_function(
x, p_lower, p_upper, False, transfer_function_name
),
True,
)
# iterate a few times with different initial values in case some initial value set fails
max_count = min_and_max_tests[1]
min_count = min_and_max_tests[0]
success_count = 0
# allocate list for estimated parameters
p_estimated = []
cost_function_values = []
# loop through tests
for counter in range(max_count):
# creating initial values by randomising
p_initial = p_lower + np.random.rand(length_of_p_vector) * (p_upper - p_lower)
# converting to x
x_initial = parameter_transfer_function(
p_initial, p_lower, p_upper, True, transfer_function_name
)
# fit the model
fitted_model = sp.optimize.minimize(
cost_function, x_initial, cost_function_arguments, method="BFGS"
)
if fitted_model.success:
# calculate parameters and cost function values
curr_p_estimated = parameter_transfer_function(
fitted_model.x, p_lower, p_upper, False, transfer_function_name
)
curr_cost_function_values = cost_function(
parameter_transfer_function(
curr_p_estimated, p_lower, p_upper, True, transfer_function_name
),
*cost_function_arguments[:-1],
False
)
if np.any(np.isnan(curr_cost_function_values)):
print('breaking')
# add current estimates to the lists
p_estimated.append(curr_p_estimated)
cost_function_values.append(curr_cost_function_values)
success_count += 1
# report and continue with next run or report and finish
if (success_count<min_count) & (counter<(max_count-1)):
logging.info(
" ... test with different initial values finished with success (%d/%d, cost function values: [%s]). Running next test..."
% (success_count,min_count,
", ".join(
[
"%s: %.2f" % (curr_name, curr_value)
for curr_value, curr_name in zip(
curr_cost_function_values, formula_names
)
]
)
)
)
continue
else:
# stack the results
p_estimated = np.row_stack(p_estimated)
cost_function_values = np.row_stack(cost_function_values)
# check if variability across estimates with different start values is not too big
parameter_consistency = np.nanmean(np.nanstd(p_estimated,axis=0))
min_pos = np.where(np.sum(cost_function_values,axis=1)==np.min(np.sum(cost_function_values,axis=1)))[0]
if len(min_pos)==0:
print('breaking')
selected_position = min_pos[0]
p_estimated = p_estimated[selected_position,:]
cost_function_values = cost_function_values[selected_position,:]
if (parameter_consistency<1e-4) & (success_count>1):
if success_count<min_count:
logging.warning(
" ... test with different initial values finished with success, but the obtained number of successes was not sufficient (%d/%d, cost function values: [%s]). Finishing (mean standard deviation for parameters across tests: %f)..."
% (success_count,min_count,
", ".join(
[
"%s: %.2f" % (curr_name, curr_value)
for curr_value, curr_name in zip(
curr_cost_function_values, formula_names
)
]
),
parameter_consistency,
)
)
else:
logging.info(
" ... test with different initial values finished with success (%d/%d, cost function values: [%s]). Finishing (mean standard deviation for parameters across tests: %f)..."
% (success_count,min_count,
", ".join(
[
"%s: %.2f" % (curr_name, curr_value)
for curr_value, curr_name in zip(
curr_cost_function_values, formula_names
)
]
),
parameter_consistency,
)
)
else:
if (parameter_consistency>=1e-4) & (success_count>1):
logging.warning(
" ... test with different initial values finished with suspicion of multiple minima (%d/%d, cost function values: [%s]). Mean standard deviation for parameters across tests: %f. Continue with caution..."
% (success_count,min_count,
", ".join(
[
"%s: %.2f" % (curr_name, curr_value)
for curr_value, curr_name in zip(
curr_cost_function_values, formula_names
)
]
),
parameter_consistency,
)
)
elif success_count==1:
logging.warning(
" ... test with different initial values finished with only one success (%d/%d, cost function values: [%s]). Mean standard deviation for parameters across tests: %f. Impossible to assess if there are multiple minima. Continue with caution..."
% (success_count,min_count,
", ".join(
[
"%s: %.2f" % (curr_name, curr_value)
for curr_value, curr_name in zip(
curr_cost_function_values, formula_names
)
]
),
parameter_consistency,
)
)
break
else:
if counter < (max_count - 1):
logging.info(
" ... finished with failure (message: {}). Rerunning with different initial values...".format(
fitted_model.message
)
)
continue
else:
p_estimated = np.nan * np.zeros(length_of_p_vector)
cost_function_values = np.nan * np.ones(len(converted_formula))
logging.info(
" ... finished with failure (message: {}). Entire estimation failed (skipping this subset)...".format(
fitted_model.message
)
)
break
return (
np.column_stack((columnwise_to_unique_index_lut, p_initial, p_estimated)),
p_estimated[unique_index_table],
cost_function_values,
)
# %%
def read_and_organise_3d_data(
current_block_extents,
block_has_data,
pixel_axis_north,
pixel_axis_east,
stack_info_table,
stack_info_table_column_names,
observable_names,
observable_sources,
observable_transforms,
observable_averaging_methods,
observable_ranges,
observable_is_required,
forest_class_sources,
forest_class_boundaries,
stack_id_vector,
forest_class_id_vector,
space_invariant_parameter_table,
space_invariant_parameter_names,
mask_out_area_outside_block=False,
):
"""
(
forest_class_3d,
observables_3d,
observable_names,
space_invariant_parameters_3d,
space_invariant_parameter_names,
identifiers_3d,
identifiers_3d_names,
) = read_and_organise_3d_data(
current_block_extents,
block_has_data,
pixel_axis_north,
pixel_axis_east,
stack_info_table,
stack_info_table_column_names,
observable_names,
observable_sources,
observable_transforms,
observable_averaging_methods,
observable_ranges,
observable_is_required,
forest_class_sources,
forest_class_boundaries,
stack_id_vector,
forest_class_id_vector,
space_invariant_parameter_table,
space_invariant_parameter_names,
mask_out_area_outside_block=False,
)
Uses a newton-based method to fit space variant parameters to observable data and space invariant parameters based on the formula that has to be minimised
INPUT:
current_block_extents contains the east and north boundaries of the current block
block_has_data contains a bool for each stack indicating whether this stack has any data within the block
pixel_axis_north, pixel_axis_east are the two axes onto which the read data should be interpolated
stack_info_table is a table with identifier values for each stack, each stack is represented by a new row
stack_info_table_column_names is a list of strings with column names for the stack_info_table
observable_names is a list of strings with names for the observables (quantities to be read)
observable_sources is a nested list with source data (paths & band_ids) for each stack
observable_transforms is a list of strings with transforms to be applied to each observable
observable_averaging_methods is a list of strings with methods of averaging to be applied to each observable
observable_ranges is a list of accepted ranges for each observable (prior to transformation)
observable_is_required is a list of bools telling the algorithm if the observable is required for AGB estimation or just optional
forest_class_sources is a nested list with source data for forest class
forest_class_boundaries is a list of min and max coordinates for the forest class images
stack_id_vector is a vector of stack ids matching the space_invariant_parameter_table (below)
forest_class_id_vector is a vector with forest class ids matching the space_invariant_parameter_table (below)
space_invariant_parameter_table is a table with the estimated space invariant parameters (which will be rasterised)
space_invariant_parameter_names is a list of names for the columns of the table above
mask_out_area_outside_block is a bool indicating whether the area outside block should be set to nan for the rasterised parameter images
OUTPUT:
forest_class_3d is a numpy array with forest class
observables_3d is a list of numpy_arrays with the observables
observable_names is a corresponding list of observable names
space_invariant_parameters_3d is a list of numpy arrays with space invariant parameters (rasterised from the table)
space_invariant_parameter_names is a corresponding list of parameter names
identifiers_3d is a numpy array with the identifiers
identifiers_3d_names is a corresponding list of identifier names
"""
# create mask for current block
current_block_mask = np.zeros(
(len(pixel_axis_north), len(pixel_axis_east)), dtype="bool"
)
# set areas within block to true
current_block_mask[
np.array(
[
np.where(
(pixel_axis_north > current_block_extents[3])
& (pixel_axis_north < current_block_extents[2])
)[0]
]
).transpose(),
np.array(
[
np.where(
(pixel_axis_east > current_block_extents[0])
& (pixel_axis_east < current_block_extents[1])
)[0]
]
),
] = True
def apply_look_up_table(lut_x, lut_y, output_xs):
output_y = np.nan * np.zeros(np.prod(output_xs).shape)
for row_in_lut in range(len(lut_y)):
positions_in_output = (
np.prod(
[
lut_x[row_in_lut, column_in_lut] == output_xs[column_in_lut]
for column_in_lut in range(len(output_xs))
]
)
== 1
)
output_y[positions_in_output] = lut_y[row_in_lut]
return output_y
# # derived parameters
number_of_stacks = stack_info_table.shape[0]
number_of_observables = len(observable_names)
# number_of_space_invariant_parameters = len(space_invariant_parameter_names)
### READING AND SAMPLING FOREST CLASS DATA
logging.info("AGB: reading forest class map")
forest_class_3d = np.zeros(
(len(pixel_axis_north), len(pixel_axis_east)), dtype="float"
)
for file_idx, file_source in enumerate(forest_class_sources):
forest_class_map_boundaries = forest_class_boundaries[file_idx]
forest_class_map_is_inside = check_intersection(
forest_class_map_boundaries[0],
forest_class_map_boundaries[1],
forest_class_map_boundaries[2],
forest_class_map_boundaries[3],
current_block_extents[0],
current_block_extents[1],
current_block_extents[3],
current_block_extents[2],
)
if forest_class_map_is_inside:
forest_class_3d_curr = np.round(
interp2d_wrapper(
file_source[0],
file_source[1] + 1,
pixel_axis_east,
pixel_axis_north,
fill_value=float(0),
)
)
# mean all the fnf tiles
forest_class_3d = np.ceil(
merge_agb_intermediate(
forest_class_3d, forest_class_3d_curr, method="nan_mean"
)
)
logging.info(
"AGB: reading forest class image data (file {}/{})".format(
file_idx + 1, len(forest_class_sources)
)
)
# set all unrealistic values to 0 = non-forest
forest_class_3d[(forest_class_3d <= 0) | np.isnan(forest_class_3d)] = 0
forest_class_3d = np.array([forest_class_3d]).transpose([1, 2, 0])
### READING OBSERVABLE DATA
# allocate observable tables (one table in a list for each observable)
observables_3d = []
# cycle through observable sources in the stack
for observable_idx in range(number_of_observables):
observables_3d.append(
np.nan
* np.zeros((len(pixel_axis_north), len(pixel_axis_east), number_of_stacks))
)
# number of stacks in current observable (must be either 1 or number_of_stacks)
current_number_of_stacks = len(observable_sources[observable_idx])
# check if observable is stacked (i.e., the list length equals number of stacks)
if current_number_of_stacks == number_of_stacks:
# cycle over stacks
for stack_idx in range(number_of_stacks):
# go ahead only if current stack is (at least partially) contained in the current parameter block:
if block_has_data[stack_idx]:
# cycle over all the equi7 tiles, interpolate over pixel grid and average
source_data_interp = np.NaN * np.zeros(
(len(pixel_axis_north), len(pixel_axis_east)), dtype="float"
)
for file_idx, file_source in enumerate(
observable_sources[observable_idx][stack_idx]
):
source_data_interp_curr = interp2d_wrapper(
file_source[0],
file_source[1] + 1,
pixel_axis_east,
pixel_axis_north,
fill_value=np.NaN,
)
source_data_interp = merge_agb_intermediate(
source_data_interp,
source_data_interp_curr,
method="nan_mean",
)
# masking the stack:
source_data_interp[forest_class_3d[:, :, 0] == 0] = np.NaN
logging.info(
"AGB: reading stacked image data for observable '{}' (stack {}/{}, file {}/{})".format(
observable_names[observable_idx],
stack_idx + 1,
number_of_stacks,
file_idx + 1,
len(observable_sources[observable_idx][stack_idx]),
)
)
observables_3d[observable_idx][
:, :, stack_idx
] = transform_function(
source_data_interp,
observable_ranges[observable_idx],
observable_transforms[observable_idx],
)
# otherwise, replicate across stacks
elif current_number_of_stacks == 1:
source_data_interp = np.nan * np.zeros(
(len(pixel_axis_north), len(pixel_axis_east)), dtype="float"
)
for file_idx, file_source in enumerate(
observable_sources[observable_idx][0]
):
source_data_interp_curr = interp2d_wrapper(
file_source[0],
file_source[1] + 1,
pixel_axis_east,
pixel_axis_north,
fill_value=np.NaN,
)
source_data_interp = merge_agb_intermediate(
source_data_interp, source_data_interp_curr, method="nan_mean"
)
# masking the stack:
source_data_interp[forest_class_3d[:, :, 0] == 0] = np.NaN
logging.info(
"AGB: reading unstacked image data for observable '{}' (file {}/{})".format(
observable_names[observable_idx],
file_idx + 1,
len(observable_sources[observable_idx][0]),
)
)
temporary_transf_image = transform_function(
source_data_interp,
observable_ranges[observable_idx],
observable_transforms[observable_idx],
)
for stack_idx in range(number_of_stacks):
observables_3d[observable_idx][:, :, stack_idx] = temporary_transf_image
# break if this is a required observable and all sampled data are nan
# (speeds up the reading)
if (
np.all(np.isnan(observables_3d[observable_idx]))
& observable_is_required[observable_idx]
):
logging.info(
"AGB: no data for the first required observable, skipping the rest"
)
break
identifiers_3d = []
for identifier_idx in range(stack_info_table.shape[1]):
identifiers_3d.append(np.array([[stack_info_table[:, identifier_idx]]]))
# create maps for the space invariant parameters
space_invariant_parameters_3d = []
for parameter_idx, parameter_name in enumerate(space_invariant_parameter_names):
current_lut = np.column_stack(
(
forest_class_id_vector,
stack_id_vector,
space_invariant_parameter_table[:, parameter_idx],
)
)
current_lut = np.unique(current_lut, axis=0)
current_parameter_map_3d = apply_look_up_table(
current_lut[:, :-1],
current_lut[:, -1],
(forest_class_3d, identifiers_3d[0]),
)
if mask_out_area_outside_block:
current_parameter_map_3d[~current_block_mask] = np.nan
space_invariant_parameters_3d.append(current_parameter_map_3d)
# return maps
return (
forest_class_3d,
observables_3d,
observable_names,
space_invariant_parameters_3d,
space_invariant_parameter_names,
identifiers_3d,
stack_info_table_column_names,
)
# %%
def map_space_variant_parameters(
formula,
formula_weights,
forest_class_3d,
observables_3d,
observables_3d_names,
space_invariant_parameters_3d,
space_invariant_parameters_3d_names,
identifiers_3d,
identifiers_3d_names,
space_variant_parameters_3d_names,
space_variant_parameters_3d_variabilities,
space_variant_parameters_3d_limits,
transfer_function_name,
):
"""
space_variant_parameters_3d, space_variant_parameters_3d_names = map_space_variant_parameters(
formula,
formula_weights,
forest_class_3d,
observables_3d,
observables_3d_names,
space_invariant_parameters_3d,
space_invariant_parameters_3d_names,
identifiers_3d,
identifiers_3d_names,
space_variant_parameters_3d_names,
space_variant_parameters_3d_variabilities,
space_variant_parameters_3d_limits,
transfer_function_name,
)
Uses a newton-based method to fit space variant parameters to observable data and space invariant parameters based on the formula that has to be minimised
INPUT:
formula is a list of strings with the original formulas
formula_weights is a list or array of floats representing weights for each formula
forest_class_3d is a numpy array with the forest class data
observables_3d is a list of numpy arrays with the observable data rasters
observables_3d_names is a list of strings with names for each observable
space_invariant_parameters_3d is a list of numpy arrays with the space invariant parameter rasters
space_invariant_parameters_3d_names is a corresponding list of space invariant parameter names
identifiers_3d is a list of numpy arrays with identifiers for stacks
identifiers_3d_names is a corresponding list of identifier names
space_variant_parameters_3d_names is a list of names for the space variant parameters
space_variant_parameters_3d_variabilities is a corresponding list of variabilities accross the eight dimesnions
space_variant_parameters_3d_limits is a corresponding list of 2 element vectors with lower and upper limits for each space variant parameter,
transfer_function_name is the name of the transfer function to be used for constraining parameters to their limits
OUTPUT:
space_variant_parameters_3d is a list of numpy arrays with the estimated space variant parameter maps
space_variant_parameters_3d_names is a corresponding list of parameter names
"""
def small_change_in_intermediate_parameters_3d(
intermediate_parameter, additional_arguments, small_step
):
def cost_function_3d(intermediate_parameter, additional_arguments):
# print(np.nanmean(intermediate_parameter))
(
converted_formula,
all_observables_3d,
transfer_function,
transfer_function_name,
space_variant_parameter_limits,
data_list_name,
) = additional_arguments
space_variant_parameter = np.kron(
np.ones((1, 1, all_observables_3d[0].shape[2])),
transfer_function(
intermediate_parameter,
space_variant_parameter_limits[0],
space_variant_parameter_limits[1],
False,
transfer_function_name,
),
)
data_list = all_observables_3d + [space_variant_parameter]
return np.sqrt(
np.nanmean(
eval(converted_formula, {data_list_name: data_list, "np": np}),
axis=2,
)
)
cost_function_value = cost_function_3d(
intermediate_parameter, additional_arguments
)
cost_function_value_after = cost_function_3d(
intermediate_parameter + small_step, additional_arguments
)
cost_function_value_before = cost_function_3d(
intermediate_parameter - small_step, additional_arguments
)
small_change = (
small_step
* (cost_function_value_after - cost_function_value_before)
/ (
2
* (
cost_function_value_after
- 2 * cost_function_value
+ cost_function_value_before
)
)
)
return np.array([small_change]).transpose([1, 2, 0]), cost_function_value
if (not len(space_variant_parameters_3d_names) == 1) or (
np.any(space_variant_parameters_3d_variabilities[0][1:])
):
logging.error(
"AGB: map creation is currently not implemented for multiple space-variant parameters or space-variant parameters that change in time or with forest class.",
exc_info=False,
)
else:
data_list_name = "data_list"
all_observables_3d_names = (
observables_3d_names + space_invariant_parameters_3d_names
)
converted_formula = swap_names_and_merge_formula_3d(
formula,
formula_weights,
all_observables_3d_names,
space_variant_parameters_3d_names,
data_list_name,
use_observable_if_repeated_and_available=True,
)
all_observables_3d = observables_3d + space_invariant_parameters_3d
# intermediate_parameters_3d = np.pi/4+np.zeros((len(pixel_axis_north),len(pixel_axis_east),1))
space_variant_parameters_3d_initial = np.mean(
space_variant_parameters_3d_limits[0]
) * np.ones((observables_3d[0].shape[0], observables_3d[0].shape[1], 1))
intermediate_parameters_3d = parameter_transfer_function(
space_variant_parameters_3d_initial,
space_variant_parameters_3d_limits[0][0],
space_variant_parameters_3d_limits[0][1],
True,
transfer_function_name,
)
additional_arguments = (
converted_formula,
all_observables_3d,
parameter_transfer_function,
transfer_function_name,
space_variant_parameters_3d_limits[0],
data_list_name,
)
# this is a tricky part because the small step and max change depend on the quantity that we optimise for
if transfer_function_name == "sin2":
small_step = 0.01
maximal_change_magnitude = 0.03
else:
small_step = 0.25
maximal_change_magnitude = 1
number_of_iterations = 1000
scaling_factor = 0.8
for ii in np.arange(number_of_iterations):
(
small_change,
cost_function_value,
) = small_change_in_intermediate_parameters_3d(
intermediate_parameters_3d, additional_arguments, small_step
)
intermediate_parameters_3d = intermediate_parameters_3d - np.maximum(
-maximal_change_magnitude,
np.minimum(maximal_change_magnitude, scaling_factor * small_change),
)
space_variant_parameters_3d = parameter_transfer_function(
intermediate_parameters_3d,
space_variant_parameters_3d_limits[0][0],
space_variant_parameters_3d_limits[0][1],
False,
transfer_function_name,
)
logging.info(
"AGB: map creation successful (average cost function value: %.2f)"
% (np.nanmean(cost_function_value))
)
return (
[space_variant_parameters_3d],
space_variant_parameters_3d_names,
)
# %% swap variable names in formulas to elements of a list
def swap_names_and_merge_formula_3d(
original_formulas,
weights,
observable_names,
parameter_names,
new_table_name,
use_observable_if_repeated_and_available=True,
):
"""
new_formula = swap_names_and_merge_formula_3d(
original_formulas, weights, observable_names, parameter_names, new_table_name, use_observable_if_repeated_and_available=True
)
Merges individual formula strings and weights to one formula strings and replaces observable and parameter names with elements of a list with pre-defined name.
INPUT:
original_formulas is a list of strings with the original formulas
weights is a list or array of floats representing weights for each formula
observable_names is a list of names to formula terms which are to be treated as observables (i.e., known values)
parameter_names is a list of names to formula terms which are to be treated as parameters (i.e., unknown values)
new_table_name is a string with the name for the list that contains both the observables and parameters
use_observable_if_repeated_and_available is a bool indicating whether a name that is observed both in the
observable_names and parameter_names should be treated as observable; by default this is True;
however, this function is going to be depraceted
OUTPUT:
new_formula is the output formula as a single string, where each of the original formulas have been squared and summed using the weights
"""
original_variable_names = observable_names + parameter_names
unique_variable_names, name_counts = np.unique(
np.array(original_variable_names), return_counts=True
)
new_formula = "0"
for current_formula, current_weight in zip(original_formulas, weights):
for unique_variable_name, name_count in zip(unique_variable_names, name_counts):
if name_count == 1:
position_in_variable_names_vector = np.where(
np.array(original_variable_names) == unique_variable_name
)[0][0]
elif name_count == 2:
position_in_variable_names_vector = np.where(
np.array(original_variable_names) == unique_variable_name
)[0][np.int32(~use_observable_if_repeated_and_available)]
current_formula = current_formula.replace(
unique_variable_name,
new_table_name + ("[%d]" % (position_in_variable_names_vector)),
)
new_formula = new_formula + " + %f*(%s)**2" % (current_weight, current_formula)
return new_formula.replace("0 + ", "")
# %%
def match_string_lists(ref_string_list, test_string_list):
"""
match_array = match_string_lists(ref_string_list, test_string_list)
Matches a test string list to a refernece string list element-by-element and returns positions at which the test strings are found in the reference strings
INPUT:
ref_string_list is a list with reference strings
test_string_list is a list with test strings
OUTPUT:
match_array is a 2D numpy array containing the positions at which strings from test_string_list are found in ref_string_list, -1 is returned if the string is not found
"""
is_in = np.zeros((len(ref_string_list), len(test_string_list)))
for ref_idx, current_ref_string in enumerate(ref_string_list):
for test_idx, current_test_string in enumerate(test_string_list):
is_in[ref_idx, test_idx] = current_ref_string.find(current_test_string)
return is_in
# %% define parameter transfer functions
def parameter_transfer_function(
in_vector, p_lower, p_upper, in_vector_is_p=False, transfer_function_name="sin2"
):
"""
out_vector = parameter_transfer_function(in_vector, p_lower, p_upper, in_vector_is_p=False,transfer_function_name='sin2')
Transforms an unconstrained parameter onto a constrained parameter or vice versa.
INPUT:
in_vector is a numpy array with data to be transformed
p_lower, p_upper are the lower and upper limits for the constrained parameter, they can be either scalars of numpy arrays of the same size as in_vector
in_vector_is_p determines if the in_vector is the constrained or the unconstrained parameter, if False, then in_vector is unconstrained and out_vector is constrained to [p_lower,p_upper],
otherwise, the in_vector is constrained to [p_lower,p_upper] and this function performs the inverted transform
transfer_function_name is a string defining the transfer function, currently the only allowed transform is "sin2", any other string will return in_vector as out_vector
OUTPUT:
out_vector is the transformed version of in_vector
"""
# note: no check of x_vector, p_upper, p_lower is done here,
# it is assumed that the inputs are correct
if transfer_function_name == "sin2":
if not in_vector_is_p:
return p_lower + (p_upper - p_lower) * np.sin(in_vector) ** 2
else:
return np.arcsin(np.sqrt((in_vector - p_lower) / (p_upper - p_lower)))
else:
return in_vector
# %%
def save_human_readable_table(
path,
table,
column_names,
data_type_lut,
table_delimiter,
table_precision,
table_column_width,
):
"""
save_human_readable_table(
path, table, column_names, data_type_lut, table_delimiter, table_precision, table_column_width
)
Saves a text file with the table on a nice, human-readable, fixed_width format.
INPUT:
path is a string with the table path
table is a n x m numpy array with the data
column_names is a list of length m with the names for each column
data_type_lut is a list of length 2, where the first contains all possible column names and the second contains associated data types ("f" or "d")
table_delimiter is the delimiter
table_precision is the precision for data of type "f"
table_column_width is the column width
OUTPUT:
no output returned
"""
table_format, table_header = get_fmt_and_header(
column_names,
data_type_lut[0],
data_type_lut[1],
table_delimiter,
table_precision,
table_column_width,
)
np.savetxt(
path,
table,
fmt=table_format,
delimiter=table_delimiter,
header=table_header,
comments="",
)
path_npy = ".".join(path.split(".")[:-1]) + ".npy"
np.save(path_npy, table)
# %% function for creating list of format strings and a header for subsequent saving of tables into text files
def get_fmt_and_header(
column_names,
all_column_groups,
all_data_types,
delimiter="\t",
precision=2,
column_width=10,
):
"""
out_format, out_header = get_fmt_and_header(column_names, all_column_groups, all_data_types, delimiter="\t", precision=2, column_width=10)
Prepares an fmt string and a header string for saving in a nice, human-readable table of fixed column width using np.savetxt
INPUT:
column_names is a list of strings for the current table columns
all_column_groups is a list of strings for all possible table columns
all_data_types is a list of strings for each of the columns in all_column_groups (two strings are currently allowed: "d" indicating integer and "f" indicating float)
delimiter is the delimiter to be used (default is tab)
precision is the precision to be used for data with type "f" (default is 2)
column_width is the width of each column (default is 10)
OUTPUT:
out_format is the fmt string required by np.savetxt
out_header is the header required by np.savetxt
"""
out_format = []
out_header = []
for curr_column in column_names:
for curr_column_group, curr_data_type in zip(all_column_groups, all_data_types):
if curr_column in curr_column_group:
if curr_data_type == "d":
out_format.append("%s%dd" % (r"%", column_width))
elif curr_data_type == "f":
out_format.append("%s%d.%df" % (r"%", column_width, precision))
break
out_header.append("%s%ds" % (r"%", column_width) % curr_column)
return out_format, delimiter.join(out_header)
# %%
def subset_iterable(iterable_to_subset, validity_mask, return_array=False):
"""
out_iterable = subset_iterable(in_iterable,validity_mask, return_array=False)
Applies validity mask to an interable, where the iterable can be numpy vector, a list, or a tuple
INPUT:
in_iterable is any iterable of length N (numpy vector, list, tuple)
validity_mask is a boolean vector of length with True marking that this particular item has to be taken out
return_array is a boolean indicating if the returned iterable should be a numpy vector
OUTPUT:
out_iterable is an iterable of length=sum(validity_mask) containing a subset of the in_iterable matching the validity_mask, if return_array=True then out_iterable is a numpy array
"""
out = [value for value, flag in zip(iterable_to_subset, validity_mask) if flag]
if return_array:
return np.array(out)
else:
return out
# %%
## in the future, improve this so it can handle polygons etc
def check_block_for_data_and_cal(
block_extents, stack_boundaries, calibration_boundaries
):
"""
block_has_data, block_has_cal = check_block_for_data_and_cal(block_extents, stack_boundaries, calibration_boundaries)
Checks if the current block has any stack data and calibration data
INPUT:
block_extents is a list with min and max easting and northing coordinates
stack_boundaries is an array with min and max easting and northing coordinates for each stack
calibration_boundaries is an array with min and max easting and northing coordinates for each calibration dataset
OUTPUT:
block_has_data is a vector of bools with length equal to the number of stacks, indicating if the block contains each stack
block_has_cal is a vector of bools with length equal to the number of calibration datasets, indicating if the block contains the calibration dataset
"""
# cycle through stacks and check that there are some data within current block
block_has_data = np.zeros(stack_boundaries.shape[0], dtype="bool")
for stack_idx, stack_boundary in enumerate(stack_boundaries):
# go ahead only if current parameter block is at least partially contained in the data stack:
block_has_data[stack_idx] = check_intersection(
stack_boundary[0],
stack_boundary[1],
stack_boundary[2],
stack_boundary[3],
block_extents[0],
block_extents[1],
block_extents[3],
block_extents[2],
)
# cycle through cals and see that there are some cals within current block
block_has_cal = np.zeros(calibration_boundaries.shape[0], dtype="bool")
for calibration_idx, calibration_boundary in enumerate(calibration_boundaries):
# go ahead only if current parameter block is at least partially contained in the data stack:
block_has_cal[calibration_idx] = check_intersection(
calibration_boundary[0],
calibration_boundary[1],
calibration_boundary[2],
calibration_boundary[3],
block_extents[0],
block_extents[1],
block_extents[3],
block_extents[2],
)
return block_has_data, block_has_cal
# %%
def compute_block_processing_order(
block_corner_coordinates_east,
block_corner_coordinates_north,
block_size_east,
block_size_north,
calibration_area_coordinates,
stack_data_coordinates,
):
"""
block_order = compute_block_processing_order(
block_corner_coordinates_east,
block_corner_coordinates_north,
block_size_east,
block_size_north,
calibration_area_coordinates,
stack_data_coordinates,
)
Calculates the order in which blocks are to be processed, based on the available calibration area coordinates and stack data coordinates
INPUT:
block_corner_coordinates_east,block_corner_coordinates_north are vectors of matching size specifying the corner coordinates of each block (in metres)
block_size_east,block_size_north are scalars specifying the block size in each direction (in metres)
calibration_area_coordinates, stack_data_coordiantes are arrays with coordinates for calibration and stack data, respectively, with one row for each calibration dataset/stack
OUTPUT:
block_order is a vector of the same length as the number of blocks containing block IDs in the order in which they should be run
"""
# in the future, this should be capable of reading polygons for both calibration areas and stack data
# now, it's just a wrapper around an old function
(current_block_index, block_order) = compute_processing_blocs_order(
calibration_area_coordinates,
block_corner_coordinates_east,
block_size_east,
block_corner_coordinates_north,
block_size_north,
)
return block_order
# %% transform functions
def transform_function(in_data, interval, kind, do_forward=True):
"""
out_data = transform_function(in_data,interval,kind,do_forward=True)
Transforms data within a certain interval using a transformation
INPUT:
in_data is a scalar or a numpy array with the data to be transformed
interval is a list/vector/tuple of length 2 with the lower and upper interval bounds (prior to transformation); outside this interval, the transformed values are nan
kind is a string with the type of transformation
do_forward is a bool determining the direction of the transformation (True means forward transformation, False means inverse transformation)
OUTPUT:
out_data is the transformed dataset with the same shape as in_data and with nans for values outside the interval
"""
out_data = np.copy(in_data)
out_data[(out_data < interval[0]) | (out_data > interval[1])] = np.nan
# note: no check of data and kind is done here,
# it is assumed that the inputs are correct
if kind == "db":
if do_forward:
out_data = 10 * np.log10(out_data)
else:
out_data = 10 ** (0.1 * in_data)
elif kind == "-db":
if do_forward:
out_data = -10 * np.log10(in_data)
else:
out_data = 10 ** (-0.1 * in_data)
elif kind == "-2db":
if do_forward:
out_data = -10 * np.log10(2 * in_data)
else:
out_data = 10 ** (-0.1 * in_data) / 2
elif kind == "cosdb":
if do_forward:
out_data = 10 * np.log10(np.cos(in_data))
else:
out_data = np.arccos(10 ** (0.1 * in_data))
elif kind == "cos":
if do_forward:
out_data = np.cos(in_data)
else:
out_data = np.arccos(in_data)
elif kind == "round":
out_data = np.round(in_data)
else:
out_data = np.copy(in_data)
return out_data
# %% function for calculating a given statistic for polygons of arbitrary shape
def stats_on_polygons(
data, pixel_axis_east, pixel_axis_north, reference_polygons, method
):
"""
stats = stats_on_polygons(data_image,pixel_axis_east,pixel_axis_north,polygons,method)
Calculates a statistic from an image over all polygons
INPUT:
data_image is a 2D array
pixel_axis_east, pixel_axis_north are two 1D arrays defining the east and north axes for the data_image
polygons is a list of N Polygon objects over which the statistic is calculated
method is a string with the statistic to be calculated (see "stats_on_polygons" to see the statistics that can be calculated)
OUTPUT:
stats isa 1D array with length N containing the statistic calculated for each polygon
"""
# initialize inputs:
Nx, Ny = data.shape
data_east_min = min(pixel_axis_east.flatten())
data_east_delta = (
max(pixel_axis_east.flatten()) - min(pixel_axis_east.flatten())
) / Nx
data_north_in = pixel_axis_north.flatten()[0]
data_north_delta = (
pixel_axis_north.flatten()[-1] - pixel_axis_north.flatten()[0]
) / Ny
# input data geotransform:
data_geotransform = [
data_east_min,
data_east_delta,
0,
data_north_in,
0,
data_north_delta,
]
# Setup working spatial reference
sr_wkt = 'LOCAL_CS["arbitrary"]'
sr = osr.SpatialReference(sr_wkt)
# initiale output stats vector
polygon_means_vec = np.nan * np.zeros(len(reference_polygons))
for index, polygon in enumerate(reference_polygons):
# Create a memory raster (gdal raster) to rasterize into.
raster_support_driver = gdal.GetDriverByName("MEM").Create(
"", Ny, Nx, 1, gdal.GDT_Byte
)
raster_support_driver.SetGeoTransform(data_geotransform)
raster_support_driver.SetProjection(sr_wkt)
# Create a memory ploygon layer (ogr vector) to rasterize from.
poly_layer_support_driver = ogr.GetDriverByName("Memory").CreateDataSource(
"wrk"
)
poly_layer = poly_layer_support_driver.CreateLayer("poly", srs=sr)
# Add a polygon to the layer.
if isinstance(polygon, str):
wkt_geom = polygon
elif isinstance(polygon, Polygon):
wkt_geom = polygon.wkt
feat = ogr.Feature(poly_layer.GetLayerDefn())
feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom))
poly_layer.CreateFeature(feat)
# Run the rasterization of polygon over raster data driver
gdal.RasterizeLayer(raster_support_driver, [1], poly_layer, burn_values=[1])
# read the mask from the rasterized polygon over data driver
bandmask = raster_support_driver.GetRasterBand(1)
datamask = bandmask.ReadAsArray().astype(np.bool)
if np.any(datamask):
# Calculate statistics of zonal raster
if method == "mean":
polygon_means_vec[index] = np.mean(data[datamask])
elif method == "nan_mean":
polygon_means_vec[index] = np.nanmean(data[datamask])
elif method == "median":
polygon_means_vec[index] = np.median(data[datamask])
elif method == "nan_median":
polygon_means_vec[index] = np.nanmedian(data[datamask])
elif method == "mode":
polygon_means_vec[index] = sp.stats.mode(data[datamask])[0]
elif method == "mode_knp":
if np.any(data[datamask] <= 0):
polygon_means_vec[index] = np.nan
else:
polygon_means_vec[index] = sp.stats.mode(data[datamask])[0]
elif method == "sum":
polygon_means_vec[index] = np.sum(data[datamask])
elif method == "range":
polygon_means_vec[index] = np.max(data[datamask]) - np.min(
data[datamask]
)
raster_support_driver = None
poly_layer_support_driver = None
return polygon_means_vec
# %% function for calculating a given statistic on all samples on a grid and all polygons
def stats_on_all_samples(
data_image, data_resolution, pixel_axis_east, pixel_axis_north, polygons, method,
):
"""
stats = stats_on_all_samples(data_image,data_resolution,pixel_axis_east,pixel_axis_north,polygons,method)
Calculates a statistic from an image over all polygons with size larger or equal to the resolution
INPUT:
data_image is a 2D array
data_resolution is resolution in metres
pixel_axis_east, pixel_axis_north are two 1D arrays defining the east and north axes for the data_image
polygons is a list of N Polygon objects over which the statistic is calculated
method is a string with the statistic to be calculated (see "stats_on_polygons" to see the statistics that can be calculated)
OUTPUT:
stats isa 1D array with length N containing the statistic calculated for each polygon (nan is returned if polygon.area < data_resolution**2)
"""
stats_polygons = stats_on_polygons(
data_image, pixel_axis_east, pixel_axis_north, polygons, method
)
def get_polygon_areas(polygons):
return np.array([polygon.area for polygon in polygons])
# kill the polygons with area smaller than the resolution cell
stats_polygons[get_polygon_areas(polygons) < data_resolution ** 2] = np.nan
return stats_polygons
|
the-stack_106_25916 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) cccs-is.
# Distributed under the terms of the Modified BSD License.
import pytest
from ipykernel.comm import Comm
from ipywidgets import Widget
class MockComm(Comm):
"""A mock Comm object.
Can be used to inspect calls to Comm's open/send/close methods.
"""
comm_id = 'a-b-c-d'
kernel = 'Truthy'
def __init__(self, *args, **kwargs):
self.log_open = []
self.log_send = []
self.log_close = []
super(MockComm, self).__init__(*args, **kwargs)
def open(self, *args, **kwargs):
self.log_open.append((args, kwargs))
def send(self, *args, **kwargs):
self.log_send.append((args, kwargs))
def close(self, *args, **kwargs):
self.log_close.append((args, kwargs))
_widget_attrs = {}
undefined = object()
@pytest.fixture
def mock_comm():
_widget_attrs['_comm_default'] = getattr(Widget, '_comm_default', undefined)
Widget._comm_default = lambda self: MockComm()
_widget_attrs['_ipython_display_'] = Widget._ipython_display_
def raise_not_implemented(*args, **kwargs):
raise NotImplementedError()
Widget._ipython_display_ = raise_not_implemented
yield MockComm()
for attr, value in _widget_attrs.items():
if value is undefined:
delattr(Widget, attr)
else:
setattr(Widget, attr, value)
|
the-stack_106_25918 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
logger = get_logger(__name__)
aem_extension_info = {
'Linux': {
'publisher': 'Microsoft.OSTCExtensions',
'name': 'AzureEnhancedMonitorForLinux',
'version': '3.0'
},
'Windows': {
'publisher': 'Microsoft.AzureCAT.AzureEnhancedMonitoring',
'name': 'AzureCATExtensionHandler',
'version': '2.2'
}
}
def set_aem(cmd, resource_group_name, vm_name, skip_storage_analytics=False):
aem = EnhancedMonitoring(cmd, resource_group_name, vm_name,
vm_client=get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE),
storage_client=get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE),
skip_storage_analytics=skip_storage_analytics)
aem.enable()
def delete_aem(cmd, resource_group_name, vm_name):
aem = EnhancedMonitoring(cmd, resource_group_name, vm_name,
vm_client=get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE),
storage_client=None)
aem.delete()
def verify_aem(cmd, resource_group_name, vm_name, wait_time_in_minutes=15, skip_storage_check=False):
aem = EnhancedMonitoring(cmd, resource_group_name, vm_name,
vm_client=get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE),
storage_client=get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE))
aem.verify(skip_storage_check, wait_time_in_minutes)
class EnhancedMonitoring(object):
def __init__(self, cmd, resource_group, vm_name, vm_client,
storage_client, skip_storage_analytics=None):
self._vm_client = vm_client
self._storage_client = storage_client
self._resource_group = resource_group
self._cmd = cmd
self._vm = vm_client.virtual_machines.get(resource_group, vm_name, expand='instanceView')
os_type = self._vm.storage_profile.os_disk.os_type.value.lower()
self._extension = aem_extension_info['Linux'] if (os_type == 'linux') else aem_extension_info['Windows']
self._skip_storage_analytics = skip_storage_analytics
def enable(self):
pub_cfg, pri_cfg = self._build_extension_cfgs(self._get_disk_info())
VirtualMachineExtension = self._cmd.get_models('VirtualMachineExtension',
resource_type=ResourceType.MGMT_COMPUTE,
operation_group='virtual_machine_extensions')
existing_ext = self._get_aem_extension()
extension_instance_name = existing_ext.name if existing_ext else self._extension['name']
existing_ext = VirtualMachineExtension(location=self._vm.location,
publisher=self._extension['publisher'],
virtual_machine_extension_type=self._extension['name'],
protected_settings={
'cfg': [{'key': k, 'value': pri_cfg[k]} for k in pri_cfg]
},
type_handler_version=self._extension['version'],
settings={
'cfg': [{'key': k, 'value': pub_cfg[k]} for k in pub_cfg]
},
auto_upgrade_minor_version=True)
return self._vm_client.virtual_machine_extensions.create_or_update(self._resource_group, self._vm.name,
extension_instance_name,
existing_ext)
def delete(self):
existing_ext = self._get_aem_extension()
if not existing_ext:
raise CLIError("'{}' is not installed".format(self._extension['name']))
return self._vm_client.virtual_machine_extensions.delete(self._resource_group, self._vm.name,
existing_ext.name)
def verify(self, skip_storage_check, wait_time_in_minutes):
import datetime
success = True
aem_ext = self._get_aem_extension()
result = {}
succ_word, fail_word = 'OK', 'Not OK'
if aem_ext:
logger.warning('Azure Enhanced Monitoring Extension for SAP Installation check: %s', succ_word)
else:
raise CLIError('Azure Enhanced Monitoring Extension for SAP was not installed')
disk_info = self._get_disk_info()
managed_disk = disk_info['managed_disk']
# os disk
logger.warning('Storage Metrics check...')
if not skip_storage_check:
unmanaged_disks = [] if managed_disk else [disk_info['os_disk']] + disk_info['data_disks']
for disk in unmanaged_disks:
storage_account_name = disk['account_name']
logger.warning("\tStorage Metrics check for '%s'...", storage_account_name)
if disk['is_premium']:
logger.warning("\t\tStorage Metrics not available for Premium Storage account '%s'...",
storage_account_name)
else:
logger.warning("\t\tStorage Metrics configuration check for '%s'...", storage_account_name)
storage_client = self._get_storage_client(storage_account_name, disk['key'])
service_properties = storage_client.get_blob_service_properties()
storage_cfg_ok = EnhancedMonitoring._check_storage_analytics(service_properties)
if storage_cfg_ok:
logger.warning('\t\tStorage Metrics configuration check: %s', succ_word)
else:
success = False
logger.error('\t\tMetrics configuration check: %s', fail_word)
if storage_cfg_ok:
logger.warning("\t\tStorage Metrics data check for '%s'. Wait up to %s minutes ...",
storage_account_name, wait_time_in_minutes)
filter_str = "Timestamp gt datetime'{}'".format(
(datetime.datetime.utcnow() + datetime.timedelta(minutes=-5)).isoformat())
result = self._check_table_and_content(storage_account_name, disk['key'],
'$MetricsMinutePrimaryTransactionsBlob', filter_str,
wait_time_in_minutes)
if result:
logger.warning("\t\tStorage Metrics data check '%s': %s", storage_account_name, succ_word)
else:
success = False
logger.error("\t\tStorage Metrics data check '%s': %s", storage_account_name, fail_word)
logger.warning('Azure Enhanced Monitoring Extension for SAP public configuration check...')
expected, _ = self._build_extension_cfgs(disk_info)
expected.pop('wad.isenabled')
public_cfg = {x['key']: x['value'] for x in self._vm.resources[0].settings['cfg']}
diffs = {k: [expected[k], public_cfg.get(k, None)] for k in expected if expected[k] != public_cfg.get(k, None)}
if diffs:
success = False
for err in diffs:
logger.error("\tConfiguration Error: Expected: '%s' Actual: '%s'", diffs[err][0], diffs[err][1])
else:
logger.warning('Configuration OK')
if not success:
raise CLIError('Configuration Not OK.')
def _build_extension_cfgs(self, disk_info):
vm_size = str(self._vm.hardware_profile.vm_size)
pub_cfg = pri_cfg = {}
vm_size_mapping = {
'ExtraSmall': 'ExtraSmall (A0)',
'Standard_A0': 'ExtraSmall (A0)',
'Basic_A0': 'ExtraSmall (A0)',
'Small': 'Small (A1)',
'Medium': 'Medium (A2)',
'Large': 'Large (A3)',
'ExtraLarge': 'ExtraLarge (A4)'
}
vm_size_mapping.get(vm_size, vm_size)
pub_cfg.update({
'vmsize': vm_size,
'vm.role': 'IaaS',
'vm.memory.isovercommitted': 0,
'vm.cpu.isovercommitted': 1 if vm_size == 'ExtraSmall (A0)' else 0,
'script.version': '3.0.0.0',
'verbose': '0',
'href': 'http://aka.ms/sapaem'
})
vm_sla_mappings = EnhancedMonitoring._populate_vm_sla_mappings()
vm_sla = vm_sla_mappings.get(vm_size)
if vm_sla:
pub_cfg.update({
'vm.sla.throughput': vm_sla['TP'],
'vm.sla.iops': vm_sla['IOPS']
})
managed_disk = disk_info['managed_disk']
# os disk
os_disk = disk_info['os_disk']
if os_disk['is_premium']:
sla = EnhancedMonitoring._get_disk_sla(os_disk['size'])
pub_cfg.update({
'osdisk.type': 'Premium',
'osdisk.sla.throughput': sla['TP'],
'osdisk.sla.iops': sla['IOPS'],
})
if managed_disk and not os_disk['is_premium']:
logger.warning('Standard Managed Disks are not supported. '
'Extension will be installed but no disk metrics will be available.')
else:
pub_cfg.update({
'osdisk.name': os_disk['name'],
'osdisk.caching': os_disk['caching'],
})
if not managed_disk:
pub_cfg.update({
'osdisk.account': os_disk['account_name']
})
if not os_disk['is_premium']:
pub_cfg.update({
'osdisk.type': 'Standard',
'osdisk.connminute': os_disk['account_name'] + '.minute',
'osdisk.connhour': os_disk['account_name'] + '.hour',
})
# data disks
for i, disk in enumerate(disk_info['data_disks']):
suffix = '.' + str(i + 1)
if disk['is_premium']:
sla = EnhancedMonitoring._get_disk_sla(disk['size'])
pub_cfg.update({
'disk.type' + suffix: 'Premium',
'disk.sla.throughput' + suffix: sla['TP'],
'disk.sla.iops' + suffix: sla['IOPS'],
})
if managed_disk and not disk['is_premium']:
logger.warning('Standard Managed Disks are not supported. '
'Extension will be installed but no disk metrics will be available.')
else:
pub_cfg.update({
'disk.lun' + suffix: disk['lun'],
'disk.name' + suffix: disk['name'],
'disk.caching' + suffix: disk['caching'],
})
if not managed_disk:
pub_cfg.update({
'disk.account' + suffix: disk['account_name']
})
if not os_disk['is_premium']:
pub_cfg.update({
'disk.type' + suffix: 'Standard',
'disk.connminute' + suffix: disk['account_name'] + '.minute',
'disk.connhour' + suffix: disk['account_name'] + '.hour',
})
if not managed_disk:
unmanaged_disks = [disk_info['os_disk']] + disk_info['data_disks']
for disk in unmanaged_disks:
account_name = disk['account_name']
if disk['is_premium']:
logger.info("'%s' is skipped - Storage Account Metrics are not available "
"for Premium Type Storage.", disk['name'])
pub_cfg.update({
account_name + '.hour.ispremium': 1,
account_name + '.minute.ispremium': 1,
})
else:
if not self._skip_storage_analytics:
self._enable_storage_analytics(account_name, disk['key'])
pri_cfg.update({
account_name + '.hour.key': disk['key'],
account_name + '.minute.key': disk['key'],
})
pub_cfg.update({
account_name + '.hour.uri': disk['table_endpoint'] + '$MetricsHourPrimaryTransactionsBlob',
account_name + '.minute.uri': disk['table_endpoint'] + '$MetricsMinutePrimaryTransactionsBlob',
account_name + '.hour.name': disk['account_name'],
account_name + '.minute.name': disk['account_name']
})
pub_cfg['wad.isenabled'] = 0
return pub_cfg, pri_cfg
def _get_aem_extension(self):
existing_ext = None
if self._vm.resources:
existing_ext = next((x for x in self._vm.resources
if x.virtual_machine_extension_type.lower() == self._extension['name'].lower() and
x.publisher.lower() == self._extension['publisher'].lower()), None)
return existing_ext
def _get_disk_info(self):
from msrestazure.tools import parse_resource_id # pylint: disable=import-error
disks_info = {}
disks_info['managed_disk'] = bool(getattr(self._vm.storage_profile.os_disk, 'managed_disk', None))
if disks_info['managed_disk']:
res_info = parse_resource_id(self._vm.storage_profile.os_disk.managed_disk.id)
disk = self._vm_client.disks.get(res_info['resource_group'], res_info['name'])
disks_info['os_disk'] = {
'name': disk.name,
'size': disk.disk_size_gb,
'is_premium': disk.sku.tier.lower() == 'premium',
'caching': self._vm.storage_profile.os_disk.caching.value,
}
disks_info['data_disks'] = []
for data_disk in self._vm.storage_profile.data_disks:
res_info = parse_resource_id(data_disk.managed_disk.id)
disk = self._vm_client.disks.get(res_info['resource_group'], res_info['name'])
disks_info['data_disks'].append({
'name': disk.name,
'size': disk.disk_size_gb,
'is_premium': disk.sku.tier.lower() == 'premium',
'caching': data_disk.caching.value,
'lun': data_disk.lun
})
else:
storage_accounts = list(self._storage_client.storage_accounts.list())
blob_uri = self._vm.storage_profile.os_disk.vhd.uri
parts = list(filter(None, blob_uri.split('/')))
storage_account_name = parts[1].split('.')[0]
disk_name, container_name = parts[-1], parts[-2]
storage_account = next(x for x in storage_accounts if x.name.lower() == storage_account_name.lower())
rg = parse_resource_id(storage_account.id)['resource_group']
key = self._storage_client.storage_accounts.list_keys(rg, storage_account.name).keys[0].value
disks_info['os_disk'] = {
'name': disk_name,
'account_name': storage_account_name,
'table_endpoint': storage_account.primary_endpoints.table,
'is_premium': storage_account.sku.tier.value.lower() == 'premium',
'caching': self._vm.storage_profile.os_disk.caching.value,
'key': key
}
if disks_info['os_disk']['is_premium']:
disks_info['os_disk']['size'] = self._get_blob_size(storage_account.name, container_name,
disk_name, key)
disks_info['data_disks'] = []
for data_disk in self._vm.storage_profile.data_disks:
blob_uri = data_disk.vhd.uri
parts = list(filter(None, blob_uri.split('/')))
storage_account_name = parts[1].split('.')[0]
disk_name, container_name = parts[-1], parts[-2]
storage_account = next(x for x in storage_accounts if x.name.lower() == storage_account_name.lower())
rg = parse_resource_id(storage_account.id)['resource_group']
key = self._storage_client.storage_accounts.list_keys(rg, storage_account.name).keys[0].value
is_premium = storage_account.sku.tier.value.lower() == 'premium'
disks_info['data_disks'].append({
'name': disk_name,
'account_name': storage_account_name,
'table_endpoint': storage_account.primary_endpoints.table,
'is_premium': is_premium,
'caching': self._vm.storage_profile.os_disk.caching.value,
'key': key,
'lun': data_disk.lun
})
if is_premium:
disks_info['data_disks'][-1]['size'] = self._get_blob_size(storage_account.name, container_name,
disk_name, key)
return disks_info
def _get_blob_size(self, storage_account_name, container, blob, key):
storage_client = self._get_storage_client(storage_account_name, key)
# convert to GB
return int(storage_client.get_blob_properties(container, blob).properties.content_length / (1 << 30))
def _get_storage_client(self, storage_account_name, key):
BlockBlobService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE,
'blob.blockblobservice#BlockBlobService')
return get_data_service_client(
self._cmd.cli_ctx,
BlockBlobService,
storage_account_name,
key,
endpoint_suffix=self._cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member
def _enable_storage_analytics(self, storage_account_name, key):
storage_client = self._get_storage_client(storage_account_name, key)
service_properties = storage_client.get_blob_service_properties()
if not EnhancedMonitoring._check_storage_analytics(service_properties):
t_logging, t_retention_policy, t_metrics = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE, 'Logging',
'RetentionPolicy', 'Metrics', mod='common.models')
retention_policy = t_retention_policy(enabled=True, days=13)
logging = t_logging(delete=True, read=True, write=True, retention_policy=retention_policy)
minute_metrics = t_metrics(enabled=True, include_apis=True, retention_policy=retention_policy)
if getattr(service_properties, 'hour_metrics', None):
service_properties.hour_metrics.retention_policy.days = 13
storage_client.set_blob_service_properties(logging, minute_metrics=minute_metrics,
hour_metrics=service_properties.hour_metrics)
@staticmethod
def _check_storage_analytics(service_properties):
return (service_properties and service_properties.logging and
service_properties.minute_metrics and service_properties.minute_metrics.include_apis and
service_properties.minute_metrics.retention_policy.days)
def _check_table_and_content(self, storage_account_name, key, table_name,
filter_string, timeout_in_minutes):
import time
sleep_period = 15
TableService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_COSMOS_TABLE, 'table#TableService')
table_client = get_data_service_client(
self._cmd.cli_ctx,
TableService,
storage_account_name,
key,
endpoint_suffix=self._cmd.cli_ctx.cloud.suffixes.storage_endpoint)
seconds = 60 * timeout_in_minutes
waited = 0
while waited < seconds:
entities = table_client.query_entities(table_name, filter_string)
if entities.items:
return True
logger.warning("\t\t\tWait %s seconds for table '%s' has date propagated ...",
sleep_period, table_name)
time.sleep(sleep_period)
waited += sleep_period
return False
@staticmethod
def _get_disk_sla(disk_size):
sla = {}
if 0 < disk_size <= 32:
# P4
sla['IOPS'] = 120
sla['TP'] = 125
elif 0 < disk_size <= 64:
# P6
sla['IOPS'] = 240
sla['TP'] = 50
elif 0 < disk_size <= 128:
# P10
sla['IOPS'] = 500
sla['TP'] = 100
elif 0 < disk_size <= 512:
# P20
sla['IOPS'] = 2300
sla['TP'] = 150
elif 0 < disk_size <= 1024:
# P30
sla['IOPS'] = 5000
sla['TP'] = 200
elif 0 < disk_size <= 2048:
# P40
sla['IOPS'] = 7500
sla['TP'] = 250
elif 0 < disk_size <= 4095:
# P50
sla['IOPS'] = 7500
sla['TP'] = 250
else:
raise CLIError("unsupported disk size for Premium Storage: '{}'".format(disk_size))
return sla
@staticmethod
def _populate_vm_sla_mappings():
mapping = {}
mapping['Standard_DS1'] = {
'IOPS': 3200,
'TP': 32,
}
mapping.update(dict.fromkeys(['Standard_DS1_v2', 'Standard_D2s_v3', 'Standard_E2s_v3'], {
'IOPS': 3200,
'TP': 48,
}))
mapping['Standard_DS2'] = {
'IOPS': 6400,
'TP': 64,
}
mapping.update(dict.fromkeys(['Standard_DS2_v2', 'Standard_D4s_v3', 'Standard_E4s_v3'], {
'IOPS': 6400,
'TP': 96,
}))
mapping['Standard_DS3'] = {
'IOPS': 12800,
'TP': 128,
}
mapping.update(dict.fromkeys(['Standard_DS3_v2', 'Standard_D8s_v3', 'Standard_E8s_v3'], {
'IOPS': 12800,
'TP': 192,
}))
mapping['Standard_DS4'] = {
'IOPS': 25600,
'TP': 256,
}
mapping.update(dict.fromkeys(['Standard_DS4_v2', 'Standard_D16s_v3', 'Standard_E16s_v3'], {
'IOPS': 25600,
'TP': 384,
}))
mapping.update(dict.fromkeys(['Standard_DS5_v2', 'Standard_D32s_v3'], {
'IOPS': 51200,
'TP': 768,
}))
mapping['Standard_DS11'] = {
'IOPS': 6400,
'TP': 64,
}
mapping['Standard_DS11_v2'] = {
'IOPS': 6400,
'TP': 96,
}
mapping['Standard_DS12'] = {
'IOPS': 12800,
'TP': 128,
}
mapping['Standard_DS12_v2'] = {
'IOPS': 12800,
'TP': 192,
}
mapping['Standard_DS13'] = {
'IOPS': 25600,
'TP': 256,
}
mapping['Standard_DS13_v2'] = {
'IOPS': 25600,
'TP': 384,
}
mapping['Standard_DS14'] = {
'IOPS': 51200,
'TP': 512,
}
mapping.update(dict.fromkeys(['Standard_DS14_v2', 'Standard_E32s_v3'], {
'IOPS': 51200,
'TP': 768,
}))
mapping['Standard_DS15_v2'] = {
'IOPS': 64000,
'TP': 960,
}
mapping['Standard_GS1'] = {
'IOPS': 5000,
'TP': 125,
}
mapping['Standard_GS2'] = {
'IOPS': 10000,
'TP': 250,
}
mapping['Standard_GS3'] = {
'IOPS': 20000,
'TP': 500,
}
mapping['Standard_GS4'] = {
'IOPS': 40000,
'TP': 1000,
}
mapping['Standard_GS5'] = {
'IOPS': 80000,
'TP': 2000,
}
mapping.update(dict.fromkeys(['Standard_M64ms', 'Standard_M64s'], {
'IOPS': 40000,
'TP': 1000,
}))
mapping['Standard_M128s'] = {
'IOPS': 80000,
'TP': 2000,
}
mapping.update(dict.fromkeys(['Standard_E64s_v3', 'Standard_D64s_v3'], {
'IOPS': 80000,
'TP': 1200,
}))
return mapping
|
the-stack_106_25919 | __version__ = '4.14.0'
def setup(app):
# We can't do the import at the module scope as setup.py has to be able to
# import this file to read __version__ without hitting any syntax errors
# from both Python 2 & Python 3.
# By the time this function is called, the directives code will have been
# converted with 2to3 if appropriate
from . import directives
directives.setup(app)
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True
}
|
the-stack_106_25922 | #!/usr/bin/env python3
# coding: utf-8
from src.lappy.models.well import Well
from src.lappy.models.point import Point
from src.lappy.models.pointPair import PointPair
from src.lappy.models.vector import Vector
from src.lappy.services import geom_oper, vect_oper, geom_numpy
from src.lappy.services import well_track_service
import numpy as np
class HorWellMaker(object):
"""
"""
class PointPairs(object):
def __init__(self):
self.pairs = []
def make_thin(self, well, nw):
"""
args:
nw - segment points count
"""
if well is None or well.track is None or nw is None:
return None, None
pts = np.empty((0, 2))
seg = np.empty((0, 2))
# forward
for k in range(len(well.track)-1):
res = geom_numpy.line(well.track[k], well.track[k+1],
nw, use_last_pt=False)
if res[0] is not None:
pts = np.vstack([pts, res[0]])
# backward
for k in range(len(well.track)-1, 0, -1):
res = geom_numpy.line(well.track[k], well.track[k-1],
nw, use_last_pt=False)
if res[0] is not None:
pts = np.vstack([pts, res[0]])
N = len(pts)
i = np.arange(N)
seg = np.stack([i, i + 1], axis=1) % N
return [[well.track[0].x, well.track[0].y], pts, seg]
def make_real(self, well: Well, nw: int, hnw: int):
"""
"""
# check well track suitable
if not well_track_service.well_track_suits(well.track):
print('well track is not suitable: sharp angles')
return None, None
tp = self.__get_line_points(well)
pts = np.empty((0, 2))
sf = geom_numpy.sector(tp[0].pl, well.track[0],
nw, well.track[1], np.pi, use_last_pt=False)
if sf[0] is not None:
pts = np.vstack([pts, sf[0]])
ltp = len(tp)
for i in range(ltp-1):
lnn = geom_numpy.line(tp[i].pr, tp[i+1].pr, hnw, use_last_pt=False)
if lnn[0] is not None:
pts = np.vstack([pts, lnn[0]])
sf = geom_numpy.sector(tp[ltp-1].pr, well.track[ltp-1],
nw, well.track[ltp-2], np.pi, use_last_pt=False)
if sf[0] is not None:
pts = np.vstack([pts, sf[0]])
for i in range(ltp-1):
lnn = geom_numpy.line(tp[ltp-1 - i].pl, tp[ltp-1 - (i+1)].pl, hnw,
use_last_pt=False)
if lnn[0] is not None:
pts = np.vstack([pts, lnn[0]])
N = len(pts)
i = np.arange(N)
seg = np.stack([i, i + 1], axis=1) % N
return [[well.track[0].x, well.track[0].y], pts, seg]
def __get_line_points(self, well: Well):
"""
"""
rw = well.radius
track = well.track
prs = [self.PointPairs() for i in range(len(well.track))]
for i in range(len(well.track)-1):
p0, p1 = track[i], track[i + 1]
pr1 = self.__get_bound_points(p0, p1, rw)
pr2 = self.__get_bound_points(p1, p0, rw)
prs[i].pairs.append(pr1)
prs[i+1].pairs.append(pr2)
# swap left and right
self.__order_left_right_points(prs, well.track)
result = self.__merge_points(prs, well.track, well.radius)
return result
def __order_left_right_points(self, pts, track):
"""
Args:
pts : list[PointPairs]
track : well track
"""
def check_swap(p1, q1, p2, q2):
res, p = geom_oper.is_segments_intersect(p1, q1, p2, q2)
return True if res else False
def do_swap(pts, k, j):
pts[k].pairs[j].pl, pts[k].pairs[j].pr = \
pts[k].pairs[j].pr, pts[k].pairs[j].pl
def intersect_track(p1, q1, track):
for k in range(len(track)-1):
p2 = track[k]
q2 = track[k+1]
res = check_swap(p1, q1, p2, q2)
if res:
return True
return False
for k, p in enumerate(pts):
for j in range(1, len(p.pairs)+1):
if k == len(pts)-1 and j == len(p.pairs):
continue
p1 = p.pairs[j-1].pr
q1 = pts[k+1].pairs[0].pr if j == len(p.pairs) \
else p.pairs[j].pr
if intersect_track(p1, q1, track):
if j == len(p.pairs):
a, b = k+1, 0
else:
a, b = k, j
do_swap(pts, a, b)
def __merge_points(self, prs, track, r):
result = []
for i, pr in enumerate(prs):
while (len(pr.pairs) != 1):
prs[i].pairs = self.__merge_inner_points(pr.pairs, track[i], r)
result.append(PointPair(pr.pairs[0].pl, pr.pairs[0].pr))
return result
def __merge_inner_points(self, prs, tp, r):
"""
"""
if len(prs) == 1:
return prs[0]
result = []
for i in range(1, len(prs)):
pl1, pr1 = prs[i-1].pl, prs[i-1].pr
pl2, pr2 = prs[i].pl, prs[i].pr
pl = self.__get_merged_inner_pair(pl1, pl2, tp, r)
pr = self.__get_merged_inner_pair(pr1, pr2, tp, r)
pp = PointPair(pl, pr)
result.append(pp)
return result
def __get_merged_inner_pair(self, p1, p2, tp, r):
"""
"""
e = Point((p1.x + p2.x) / 2.0, (p1.y + p2.y) / 2.0, -1)
ux, uy = vect_oper.normalize(e, tp)
x, y = tp.x - r * ux, tp.y - r * uy
return Point(x, y, -1)
def __get_bound_points(self, pt_main, pt2, rw):
"""
returns:
PointPair with "left" and "right" points
"""
[x0, y0] = [pt_main.x, pt_main.y]
[x1, y1] = [pt2.x, pt2.y]
[asg, bsg] = geom_oper.get_line_cf(x0, y0, x1, y1)
if asg is None: # x=const
xp0 = x0 + rw
yp0 = y0
xp1 = x0 - rw
yp1 = y0
elif abs(asg - 0.0) < 1e-6: # y = const
xp0 = x0
yp0 = y0 + rw
xp1 = x0
yp1 = y0 - rw
else:
[ap, bp] = geom_oper.ortho_line_cf(asg, bsg, x0, y0)
x2 = x0 + 1.0
y2 = ap * x2 + bp
vx, vy = x2 - x0, y2 - y0
ux, uy = vect_oper.normalize(vx, vy)
xp0 = x0 + rw * ux
yp0 = y0 + rw * uy
xp1 = x0 - rw * ux
yp1 = y0 - rw * uy
p0 = Point(xp0, yp0, -1)
p1 = Point(xp1, yp1, -1)
result = PointPair(p0, p1)
return result
|
the-stack_106_25925 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re,traceback,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
from resources.lib.modules import tvmaze
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.genre_filter = ['animation', 'anime']
self.domains = ['gogoanimemobile.com', 'gogoanimemobile.net', 'gogoanime.io']
self.base_link = 'http://ww1.gogoanime.io'
self.search_link = '/search.html?keyword=%s'
self.episode_link = '/%s-episode-%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
tv_maze = tvmaze.tvMaze()
tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
tvshowtitle = tvshowtitle['name']
t = cleantitle.get(tvshowtitle)
q = urlparse.urljoin(self.base_link, self.search_link)
q = q % urllib.quote_plus(tvshowtitle)
r = client.request(q)
r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
failure = traceback.format_exc()
log_utils.log('GoGoAnime - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
tv_maze = tvmaze.tvMaze()
num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode))
url = [i for i in url.strip('/').split('/')][-1]
url = self.episode_link % (url, num)
return url
except:
failure = traceback.format_exc()
log_utils.log('GoGoAnime - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'iframe', ret='src')
for u in r:
try:
if not u.startswith('http') and not 'vidstreaming' in u: raise Exception()
url = client.request(u)
url = client.parseDOM(url, 'source', ret='src')
for i in url:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('GoGoAnime - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return directstream.googlepass(url)
|
the-stack_106_25927 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bullets received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bulletd or Bullet-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the Bullet Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/BulletCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "BulletCore")
return os.path.expanduser("~/.bulletcore")
def read_bitcoin_config(dbdir):
"""Read the bullet.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bullet.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a Bullet Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19998 if testnet else 9998
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bulletd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bulletd):
info = bulletd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bulletd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bulletd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bulletd):
address_summary = dict()
address_to_account = dict()
for info in bulletd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bulletd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bulletd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bullet-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bulletd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bulletd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bulletd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bulletd.createrawtransaction(inputs, outputs)
signed_rawtx = bulletd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bulletd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bulletd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bulletd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bulletd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bulletd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bullets from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bullets to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bullet.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bulletd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bulletd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bulletd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bulletd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bulletd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bulletd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_25929 | from random import random, randrange
from .config import cfg
from .vocab_pick import calc_progress
class WordIterator:
def __init__(self, words: list):
self.__words = words
self.__add_after_iter = None
def __iter__(self):
return self
def add_word(self, index: int, value):
self.__add_after_iter = (index, value)
def __next__(self):
ws = self.__words
if not any(ws[:-1]):
i, res = self.__add_after_iter
self.__add_after_iter = None
if i < len(self.__words):
return i == 1, i, res
raise StopIteration
# Get non-empty buckets, position new word bucket at nwi
nwi = cfg['new-words-index']
ws = [l for l in ws[1:nwi+1] + [ws[0]] + ws[nwi+1:] if l]
# Pick a random word, favor lower indices
l = ws[int(random() ** cfg['critical-word-weight'] * len(ws))]
img_idx = randrange(len(l))
i = next(i for i, l_ in enumerate(self.__words) if l_ is l)
if self.__add_after_iter:
index, value = self.__add_after_iter
self.__add_after_iter = None
self.__words[min(len(self.__words)-1, index)].append(value)
# Calculate progress before popping the word
return i == 0, i, calc_progress(self.__words), l.pop(img_idx)
|
the-stack_106_25930 | from sqlalchemy import *
from sqlalchemy.orm import *
engine = create_engine('sqlite:///data/db.sqlite', echo=False)
metadata = MetaData()
metadata.bind = engine
ranking = Table(
'ranking', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('hiScore', Float),
Column('latestScore', Float)
)
class data():
"""docstring for data."""
def __init__(self):
metadata.create_all(engine)
def add(self, personId, personName, score):
if ranking.select().where(ranking.c.id == personId).execute().fetchone() != None:
#update
if ranking.select().where(ranking.c.id == personId).execute().fetchone()[2] > score:
ranking.update().where(ranking.c.id == personId).execute(latestScore=score, hiScore=score)
else:
ranking.update().where(ranking.c.id == personId).execute(latestScore=score)
else:
#create new record
ranking.insert().execute(id=personId, name=personName, latestScore=score, hiScore=score)
def rank(self, personId):
pass
|
the-stack_106_25931 | # -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.module_api import ModuleApi
from tests.unittest import HomeserverTestCase
class ModuleApiTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, homeserver):
self.store = homeserver.get_datastore()
self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler())
def test_can_register_user(self):
"""Tests that an external module can register a user"""
# Register a new user
user_id, access_token = self.get_success(
self.module_api.register(
"bob", displayname="Bobberino", emails=["[email protected]"]
)
)
# Check that the new user exists with all provided attributes
self.assertEqual(user_id, "@bob:test")
self.assertTrue(access_token)
self.assertTrue(self.get_success(self.store.get_user_by_id(user_id)))
# Check that the email was assigned
emails = self.get_success(self.store.user_get_threepids(user_id))
self.assertEqual(len(emails), 1)
email = emails[0]
self.assertEqual(email["medium"], "email")
self.assertEqual(email["address"], "[email protected]")
# Should these be 0?
self.assertEqual(email["validated_at"], 0)
self.assertEqual(email["added_at"], 0)
# Check that the displayname was assigned
displayname = self.get_success(self.store.get_profile_displayname("bob"))
self.assertEqual(displayname, "Bobberino")
|
the-stack_106_25935 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions of RevNet."""
from typing import Any, Callable, Dict, Optional
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
# Specifications for different RevNet variants.
# Each entry specifies block configurations of the particular RevNet variant.
# Each element in the block configuration is in the following format:
# (block_fn, num_filters, block_repeats)
REVNET_SPECS = {
38: [
('residual', 32, 3),
('residual', 64, 3),
('residual', 112, 3),
],
56: [
('bottleneck', 128, 2),
('bottleneck', 256, 2),
('bottleneck', 512, 3),
('bottleneck', 832, 2),
],
104: [
('bottleneck', 128, 2),
('bottleneck', 256, 2),
('bottleneck', 512, 11),
('bottleneck', 832, 2),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class RevNet(tf.keras.Model):
"""Creates a Reversible ResNet (RevNet) family model.
This implements:
Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse.
The Reversible Residual Network: Backpropagation Without Storing
Activations.
(https://arxiv.org/pdf/1707.04585.pdf)
"""
def __init__(
self,
model_id: int,
input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(
shape=[None, None, None, 3]),
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a RevNet model.
Args:
model_id: An `int` of depth/id of ResNet backbone model.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
axis = -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1
# Build RevNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = tf.keras.layers.Conv2D(
filters=REVNET_SPECS[model_id][0][1],
kernel_size=7, strides=2, use_bias=False, padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer)(inputs)
x = self._norm(
axis=axis, momentum=norm_momentum, epsilon=norm_epsilon)(x)
x = tf_utils.get_activation(activation)(x)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
endpoints = {}
for i, spec in enumerate(REVNET_SPECS[model_id]):
if spec[0] == 'residual':
inner_block_fn = nn_blocks.ResidualInner
elif spec[0] == 'bottleneck':
inner_block_fn = nn_blocks.BottleneckResidualInner
else:
raise ValueError('Block fn `{}` is not supported.'.format(spec[0]))
if spec[1] % 2 != 0:
raise ValueError('Number of output filters must be even to ensure '
'splitting in channel dimension for reversible blocks')
x = self._block_group(
inputs=x,
filters=spec[1],
strides=(1 if i == 0 else 2),
inner_block_fn=inner_block_fn,
block_repeats=spec[2],
batch_norm_first=(i != 0), # Only skip on first block
name='revblock_group_{}'.format(i + 2))
endpoints[str(i + 2)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(RevNet, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs: tf.Tensor,
filters: int,
strides: int,
inner_block_fn: Callable[..., tf.keras.layers.Layer],
block_repeats: int,
batch_norm_first: bool,
name: str = 'revblock_group') -> tf.Tensor:
"""Creates one reversible block for RevNet model.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
filters: An `int` number of filters for the first convolution of the
layer.
strides: An `int` stride to use for the first convolution of the layer. If
greater than 1, this block group will downsample the input.
inner_block_fn: Either `nn_blocks.ResidualInner` or
`nn_blocks.BottleneckResidualInner`.
block_repeats: An `int` number of blocks contained in this block group.
batch_norm_first: A `bool` that specifies whether to apply
BatchNormalization and activation layer before feeding into convolution
layers.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
x = inputs
for i in range(block_repeats):
is_first_block = i == 0
# Only first residual layer in block gets downsampled
curr_strides = strides if is_first_block else 1
f = inner_block_fn(
filters=filters // 2,
strides=curr_strides,
batch_norm_first=batch_norm_first and is_first_block,
kernel_regularizer=self._kernel_regularizer)
g = inner_block_fn(
filters=filters // 2,
strides=1,
batch_norm_first=batch_norm_first and is_first_block,
kernel_regularizer=self._kernel_regularizer)
x = nn_blocks.ReversibleLayer(f, g)(x)
return tf.identity(x, name=name)
def get_config(self) -> Dict[str, Any]:
config_dict = {
'model_id': self._model_id,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
}
return config_dict
@classmethod
def from_config(cls,
config: Dict[str, Any],
custom_objects: Optional[Any] = None) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> Dict[int, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('revnet')
def build_revnet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds RevNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'revnet', (f'Inconsistent backbone type '
f'{backbone_type}')
return RevNet(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
|
the-stack_106_25936 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Placement Group
"""
from boto.ec2.ec2object import EC2Object
from boto.exception import BotoClientError
class PlacementGroup(EC2Object):
def __init__(self, connection=None, name=None, strategy=None, state=None):
super(PlacementGroup, self).__init__(connection)
self.name = name
self.strategy = strategy
self.state = state
def __repr__(self):
return 'PlacementGroup:%s' % self.name
def endElement(self, name, value, connection):
if name == 'groupName':
self.name = value
elif name == 'strategy':
self.strategy = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_placement_group(
self.name,
dry_run=dry_run
)
|
the-stack_106_25937 | import random
from typing import Sequence
print("Hello, what is your name?")
name = input()
secretNum = random.randint(1,20)
print("Well, " + name + ", I am thinking of a number between 1 and 20")
guess = 0
for guessesTaken in range(1,7):
print("Take a guess.")
try:
guess = int(input())
if guess < secretNum:
print("Too low")
elif guess > secretNum:
print("Too high")
else:
break
except ValueError:
print("That's not a number")
if guess == secretNum:
print("Good job " + name + "! You got it right in " + str(guessesTaken) + " guesses")
else:
print("Nupe, the number was " + str(secretNum)) |
the-stack_106_25939 | from typing import Dict
from cereal import car
from selfdrive.car import dbc_dict
from selfdrive.car.docs_definitions import CarInfo
Ecu = car.CarParams.Ecu
SPEED_FROM_RPM = 0.008587
class CarControllerParams:
ANGLE_DELTA_BP = [0., 5., 15.]
ANGLE_DELTA_V = [5., .8, .15] # windup limit
ANGLE_DELTA_VU = [5., 3.5, 0.4] # unwind limit
LKAS_MAX_TORQUE = 1 # A value of 1 is easy to overpower
STEER_THRESHOLD = 1.0
class CAR:
BODY = "COMMA BODY"
CAR_INFO: Dict[str, CarInfo] = {
CAR.BODY: CarInfo("comma body", package="All", good_torque=True),
}
FW_VERSIONS = {
CAR.BODY: {
(Ecu.engine, 0x720, None): [
b'0.0.01',
b'02/27/2022'
],
(Ecu.debug, 0x721, None): [
b'166bd860' # git hash of the firmware used
],
},
}
DBC = {
CAR.BODY: dbc_dict('comma_body', None),
}
|
the-stack_106_25943 | #!/usr/bin/env python3
import requests
import bs4
import json
import format_json
import argparse
# following values(URL,FILENAME) are dummy and sample values
URL = "https://scholar.google.co.jp/scholar?start=10&hl=ja&as_sdt=2005&sciodt=0,5&cites=3982677450424843587&scipsc="
FILENAME = "Minimal solvers for generalized pose and scale estimation from two rays and one point2"
def get_soup():
get_url_info = requests.get(URL)
bs4Obj = bs4.BeautifulSoup(get_url_info.text, "lxml")
return bs4Obj
def get_link_section(soup):
temp = soup.find_all("div", attrs={"class": "gs_ri"})
list_section = []
for e in temp:
list_section.append(e)
return list_section
def get_dict_link_data(list_dict_result):
soup = get_soup()
list_section = get_link_section(soup)
for e in list_section:
dict_temp = format_json.get_dict_data_sub(e)
if not dict_temp in list_dict_result:
list_dict_result.append(dict_temp)
else:
print("same page")
return list_dict_result
def savejson_dict(list_dict_results):
count = 1
json_data_temp = {}
for e in list_dict_results:
json_data_temp["id_" + str(count)] = e
count += 1
format_json.savejson_dict(json_data_temp, FILENAME + ".json")
# ---- ----
# main
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('URL', metavar='URL', type=str, nargs='+',
help='A Page URL in Google Sholar ')
parser.add_argument('Filename', metavar='F', type=str, nargs='+',
help='Filename for saving data')
args = parser.parse_args()
FILENAME = args.Filename[0]
URL = args.URL[0]
list_dict_result = []
result = get_dict_link_data(list_dict_result)
#print (result)
savejson_dict(result)
|
the-stack_106_25946 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
TABLE_NAME = 'migrations'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table(TABLE_NAME, meta, autoload=True)
try:
table.drop()
except Exception:
LOG.error(_LE("migrations table not dropped"))
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table(
TABLE_NAME, meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('instance_uuid', String(length=255), nullable=True),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
try:
table.create()
except Exception:
LOG.error(_LE("Table |%s| not created"), repr(table))
raise
|
the-stack_106_25948 | from . import test_table
from ..src import table
from ..src import button
def test_init_seat_neg1():
t = test_table.custom_tbl(players=6)
btn = button.Button(t)
assert btn.seat == -1
def test_init_repr_neg1_str():
t = test_table.custom_tbl(players=6)
btn = button.Button(t)
assert repr(btn) == '-1'
def test_init_int_neg1():
t = test_table.custom_tbl(players=6)
btn = button.Button(t)
assert int(btn) == -1
def test_movebutton_newTable_returns0():
t = test_table.custom_tbl(players=6)
btn = button.Button(t)
btn.move()
assert btn.seat == 0
def test_movebutton_seat0removed_returns1():
t = test_table.custom_tbl(players=6)
t.pop(0)
btn = button.Button(t)
btn.move()
assert btn.seat == 1
def test_movebutton_2x_returns1():
t = test_table.custom_tbl(players=6)
btn = button.Button(t)
btn.move()
btn.move()
assert btn.seat == 1
def test_randomize_2seats_inrange0to1():
# Randomize button on table size 2, button is in range 0-1
seats = 2
t = test_table.custom_tbl(players=seats)
btn = button.Button(t)
btn.randomize()
assert btn.seat >= 0
assert btn.seat < seats
def test_randomize_6seats_validbtn():
seats = 6
t = test_table.custom_tbl(players=seats)
btn = button.Button(t)
btn.randomize()
assert btn.seat >= 0
assert btn.seat < seats
def test_randomize_9seats_inrange0to8():
# Randomize button on table size 9, button is in range 0-8
seats = 9
t = test_table.custom_tbl(players=seats)
btn = button.Button(t)
btn.randomize()
assert btn.seat >= 0
assert btn.seat < seats
def test_randomize_noplayers_raisesException():
# Randomize button on table size 9, but no players
seats = 9
t = table.Table(seats)
btn = button.Button(t)
assert btn.seat == -1
|
the-stack_106_25949 | import operator
import unittest
from metafunctions.core import FunctionMerge
from metafunctions.core import SimpleFunction
from metafunctions.tests.util import BaseTestCase
from metafunctions.operators import concat
from metafunctions import exceptions
from metafunctions.api import node, star
class TestUnit(BaseTestCase):
def test_str(self):
cmp = FunctionMerge(operator.add, (a, b))
self.assertEqual(str(cmp), '(a + b)')
self.assertEqual(repr(cmp), 'FunctionMerge({}, {})'.format(operator.add, (a, b)))
def test_call(self):
cmp = FunctionMerge(operator.add, (a, b))
self.assertEqual(cmp('_'), '_a_b')
self.assertEqual(cmp('-', '_'), '-a_b')
with self.assertRaises(exceptions.CallError):
cmp('_', '_', '_')
@SimpleFunction
def d():
return 'd'
abd = a & b & d
self.assertEqual(abd('-', '_'), ('-a', '_b', 'd'))
def test_format(self):
cmp = FunctionMerge(operator.add, (a, b), function_join_str='tacos')
self.assertEqual(str(cmp), '(a tacos b)')
def test_non_binary(self):
def my_concat(*args):
return ''.join(args)
cmp = FunctionMerge(my_concat, (a, a, a, a))
self.assertEqual(cmp('_'), '_a_a_a_a')
self.assertEqual(str(cmp), '(a {m} a {m} a {m} a)'.format(m=my_concat))
d = FunctionMerge(my_concat, (b, b), function_join_str='q')
self.assertEqual(str(d), '(b q b)')
def test_join(self):
# The first real non-binary function! Like the example above.
cmp = a & a & 'sweet as'
self.assertTupleEqual(cmp('_'), ('_a', '_a', 'sweet as'))
self.assertEqual(str(cmp), "(a & a & 'sweet as')")
self.assertEqual(repr(cmp), "FunctionMerge({}, {})".format(concat, (a, a, cmp._functions[-1])))
#__rand__ works too
a_ = 'sweet as' & a
self.assertEqual(a_('+'), ('sweet as', '+a'))
abc = (a & (b & c)) | ''.join
self.assertEqual(abc('_'), '_a_b_c')
def test_combine(self):
# Only combine FunctionMerges that have the same MergeFunc
add = a + b
also_add = b + a
div = a / b
#This combined FunctionMerge will fail if called (because addition is binary,
#operator.add only takes two args). I'm just using to combine for test purposes.
abba = FunctionMerge.combine(operator.add, add, also_add)
self.assertEqual(str(abba), '(a + b + b + a)')
self.assertEqual(repr(abba), "FunctionMerge({}, {})".format(operator.add, (a, b, b, a)))
ab_ba = FunctionMerge.combine(operator.sub, add, also_add)
self.assertEqual(str(ab_ba), '((a + b) - (b + a))')
self.assertEqual(repr(ab_ba), "FunctionMerge({}, {})".format(operator.sub, (add, also_add)))
abab = FunctionMerge.combine(operator.add, add, div)
self.assertEqual(str(abab), '(a + b + (a / b))')
self.assertEqual(repr(abab), "FunctionMerge({}, {})".format(operator.add, (a, b, div)))
def custom():
pass
abba_ = FunctionMerge.combine(custom, add, also_add, function_join_str='<>')
self.assertEqual(str(abba_), '((a + b) <> (b + a))')
self.assertEqual(repr(abba_), "FunctionMerge({}, {})".format(custom, (add, also_add)))
def my_concat(*args):
return ''.join(args)
bb = FunctionMerge(my_concat, (b, b), function_join_str='q')
aa = FunctionMerge(my_concat, (a, a), function_join_str='q')
bbaa = FunctionMerge.combine(my_concat, bb, aa, function_join_str='q')
self.assertEqual(str(bbaa), '(b q b q a q a)')
self.assertEqual(repr(bbaa), "FunctionMerge({}, {})".format(my_concat, (b, b, a, a)))
def test_len_mismatch(self):
# If len(inputs) <= len(functions), call remaining functions with no args.
@node
def f(x=None):
if x:
return x + 'f'
return 'F'
cmp = (a & b) | star(f&f&f&f)
self.assertEqual(cmp('_'), ('_af', '_bf', 'F', 'F'))
# if len(inputs) > len(functions), fail.
cmp = (a & b & c) | star(f+f)
with self.assertRaises(exceptions.CallError):
cmp('_')
@unittest.skip('TODO')
def test_binary_functions(self):
# The issue here is that f + f + f + f is not converted to a single FunctionMerge. Rather
# it becomes nested FunctionMerges: (((f + f) + f) + f). Ideally we would be able to
# handle this. One potential solution is to 'flatten' the FunctionMerge, but this doesn't
# work for functions that aren't commutative. E.g., (a / b / c) != (a / (b / c)). I'm
# leaving this test for now as a todo.
@node
def f(x=None):
if x:
return x + 'f'
return 'F'
cmp = (a & b) | star(f+f+f+f)
self.assertEqual(cmp('_'), '_af_bfFF')
@SimpleFunction
def a(x):
return x + 'a'
@SimpleFunction
def b(x):
return x + 'b'
@node
def c(x):
return x + 'c'
l = SimpleFunction(lambda x: x + 'l')
|
the-stack_106_25950 | """Test fixtures for the generic component."""
from io import BytesIO
from PIL import Image
import pytest
@pytest.fixture(scope="package")
def fakeimgbytes_png():
"""Fake image in RAM for testing."""
buf = BytesIO()
Image.new("RGB", (1, 1)).save(buf, format="PNG")
yield bytes(buf.getbuffer())
@pytest.fixture(scope="package")
def fakeimgbytes_jpg():
"""Fake image in RAM for testing."""
buf = BytesIO() # fake image in ram for testing.
Image.new("RGB", (1, 1)).save(buf, format="jpeg")
yield bytes(buf.getbuffer())
@pytest.fixture(scope="package")
def fakeimgbytes_svg():
"""Fake image in RAM for testing."""
yield bytes(
'<svg xmlns="http://www.w3.org/2000/svg"><circle r="50"/></svg>',
encoding="utf-8",
)
|
the-stack_106_25951 | # Copyright (c) 2007-2019 UShareSoft, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import os.path
import ntpath
from ussclicore.utils import generics_utils, printer
from hammr.utils import constants
from uforge.objects.uforge import *
def check_bundle(bundle):
if not "name" in bundle:
printer.out("There is no attribute [name] for a [bundle]", printer.ERROR)
return
if not "version" in bundle:
printer.out("no attribute [version] for [bundle]", printer.ERROR)
return
if not "files" in bundle:
printer.out("no attribute [files] for [bundle]", printer.ERROR)
return
for file in bundle["files"]:
bundle = check_files(bundle, file, 0)
if bundle is None:
return
return bundle
def check_files(bundle, file, level):
if not "name" in file:
printer.out("There is no attribute [name] for a [file]", printer.ERROR)
return
if not "source" in file:
printer.out("There is no attribute [source] for a [file]", printer.ERROR)
return
if level > 0 and "tag" in file and (file["tag"] == "ospkg" or file["tag"] == "bootscript"):
printer.out("The file '" + file["name"] + ", with tag '" + file["tag"] + "' must be in the first level files section", printer.ERROR)
return
if ("bootOrder" in file or "bootType" in file) and (not "tag" in file or file["tag"] != "bootscript"):
printer.out("There is the attribute [bootOrder] or [bootType] for file '" + file["name"] + "' but is not tagged as 'bootscript'", printer.ERROR)
return
if ("ownerGroup" in file or "rights" in file or "symlink" in file) and "tag" in file and file["tag"] != "softwarefile":
printer.out("There is the attribute [ownerGroup], [rights] or [symlink] for file '" + file["name"] + "' but is not tagged as 'softwarefile'", printer.ERROR)
return
if "files" in file:
if len(file["files"]) > 0:
if "tag" in file and file["tag"] != "softwarefile":
printer.out("The list of files is not empty for file '" + file["name"] + "' but is not tagged as 'softwarefile'", printer.ERROR)
return
elif not os.path.isdir(file["source"]):
printer.out("The list of files is not empty for file '" + file["name"] + "' but [source] doesn't represent a folder", printer.ERROR)
return
for subFile in file["files"]:
bundle = check_files(bundle, subFile, level + 1)
return bundle
def recursively_append_to_archive(bundle, files, parent_dir, duplicate_check_list, archive_files):
#must save the filepath before changing it after archive
filePathBeforeTar = files["source"]
if not "tag" in files or ("tag" in files and files["tag"] != "ospkg"):
add_file_to_archive_and_update_file_source(bundle, files, parent_dir, duplicate_check_list, archive_files)
if "files" in files:
for subFiles in files["files"]:
duplicate_check_list,archive_files = recursively_append_to_archive(
bundle, subFiles, parent_dir + ntpath.basename(files["source"]), duplicate_check_list, archive_files)
if (not "tag" in files or files["tag"] != "ospkg") and os.path.isdir(filePathBeforeTar):
duplicate_check_list, archive_files = process_files_from_folder(
bundle, files, filePathBeforeTar, parent_dir + ntpath.basename(filePathBeforeTar), duplicate_check_list, archive_files)
return duplicate_check_list, archive_files
def process_files_from_folder(bundle, files, filePath, parentDir, duplicate_check_list, archive_files):
for subFiles in os.listdir(filePath):
subFilesDict = dict({"name" : ntpath.basename(subFiles), "source" : filePath + os.sep + ntpath.basename(subFiles), "files" : []})
#must save the filepath before changing it after archive
subFilePathBeforeTar = subFilesDict["source"]
if add_file_to_archive_and_update_file_source(
bundle, subFilesDict, parentDir, duplicate_check_list, archive_files, False):
files["files"].append(subFilesDict)
if os.path.isdir(subFilePathBeforeTar):
duplicate_check_list,archive_files = process_files_from_folder(
bundle, subFilesDict, subFilePathBeforeTar, parentDir + ntpath.basename(subFilePathBeforeTar),
duplicate_check_list, archive_files)
return duplicate_check_list, archive_files
def add_file_to_archive_and_update_file_source(bundle, file, parent_dir, duplicate_check_list, archive_files, fail_on_duplicates=True):
file_tar_path = build_file_tar_path(bundle, file, parent_dir)
if file_tar_path not in duplicate_check_list:
duplicate_check_list.append(file_tar_path)
archive_files.append([file_tar_path, file["source"]])
# changing source path to archive related source path
file["source"] = file_tar_path
return True
elif fail_on_duplicates:
raise ValueError(
"Cannot have identical files in the bundles section: " + file_tar_path + " from " + file["source"])
return False
def build_file_tar_path(bundle, file, parent_dir):
# if parentDir is a no empty path or already ending with os.sep, add os.sep at the end
if parent_dir and not parent_dir.endswith(os.sep):
parent_dir = parent_dir + os.sep
return constants.FOLDER_BUNDLES + os.sep + generics_utils.remove_URI_forbidden_char(bundle["name"]) \
+ os.sep + generics_utils.remove_URI_forbidden_char(bundle["version"]) \
+ os.sep + parent_dir \
+ generics_utils.remove_URI_forbidden_char(ntpath.basename(file["name"]))
|
the-stack_106_25953 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains code for epoch modifiers in Keras
"""
from sparseml.keras.optim.modifier import KerasModifierYAML, ScheduledModifier
from sparseml.sparsification import EpochRangeModifier as BaseEpochRangeModifier
__all__ = ["EpochRangeModifier"]
@KerasModifierYAML()
class EpochRangeModifier(BaseEpochRangeModifier, ScheduledModifier):
"""
Simple modifier to set the range of epochs to train over for
the recalibration process.
Note, that if other modifiers exceed the range of this one for min or max epochs,
this modifier will not have an effect.
| Sample yaml:
| !EpochRangeModifier:
| start_epoch: 0
| end_epoch: 90
"""
def __init__(
self,
start_epoch: float,
end_epoch: float,
):
"""
:param start_epoch: The epoch to start the modifier at
:param end_epoch: The epoch to end the modifier at
"""
super(EpochRangeModifier, self).__init__(
start_epoch=start_epoch, end_epoch=end_epoch, end_comparator=-1
)
|
the-stack_106_25954 | import os
import numpy as np
import shutil
import logging
from oggm import cfg, utils, GlacierDirectory, tasks
from oggm.core import gcm_climate
from oggm.workflow import init_glacier_regions, execute_entity_task
from oggm.core.flowline import FileModel, run_from_climate_data
from relic.preprocessing import merge_pair_dict
log = logging.getLogger(__name__)
def run_and_store_from_disk(rgi, histalp_storage, storage):
cmip = ['CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'CanESM2',
'GFDL-CM3', 'GFDL-ESM2G', 'GISS-E2-R', 'IPSL-CM5A-LR',
'MPI-ESM-LR', 'NorESM1-M']
bp = 'https://cluster.klima.uni-bremen.de/~oggm/cmip5-ng/pr/pr_mon_{}_{}_r1i1p1_g025.nc'
bt = 'https://cluster.klima.uni-bremen.de/~oggm/cmip5-ng/tas/tas_mon_{}_{}_r1i1p1_g025.nc'
for i in np.arange(999):
# Local working directory (where OGGM will write its output)
storage_dir = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
new_dir = os.path.join(cfg.PATHS['working_dir'], 'per_glacier',
rgi[:8], rgi[:11], rgi)
# make sure directory is empty:
try:
shutil.rmtree(new_dir)
except FileNotFoundError:
pass
# if path does not exist, we handled all ensemble members:
try:
shutil.copytree(storage_dir, new_dir)
except FileNotFoundError:
log.info('processed {:02d} ensemble members'.format(i))
break
gdir = GlacierDirectory(rgi)
pdict = gdir.get_climate_info()['ensemble_calibration']
cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor']
default_glena = 2.4e-24
cfg.PARAMS['glen_a'] = pdict['glena_factor'] * default_glena
cfg.PARAMS['inversion_glen_a'] = pdict['glena_factor'] * default_glena
mbbias = pdict['mbbias']
tmp_mod = FileModel(
gdir.get_filepath('model_run',
filesuffix='_histalp_{:02d}'.format(i)))
tmp_mod.run_until(tmp_mod.last_yr)
for cm in cmip:
for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
ft = utils.file_downloader(bt.format(cm, rcp))
fp = utils.file_downloader(bp.format(cm, rcp))
if ft is None:
log.warning('no {} for model {}'.format(rcp, cm))
continue
filesuffix = '_{}_{}'.format(cm, rcp)
# bias correct them
if '_merged' in rgi:
process_cmip_for_merged_glacier(gdir, filesuffix, ft, fp)
else:
gcm_climate.process_cmip5_data(gdir,
filesuffix=filesuffix,
fpath_temp=ft,
fpath_precip=fp)
rid = '_{}_{}'.format(cm, rcp)
rid_out = '{}_{:02d}'.format(rid, i)
run_from_climate_data(gdir,
ys=2014, ye=2100,
climate_filename='gcm_data',
climate_input_filesuffix=rid,
init_model_fls=tmp_mod.fls,
output_filesuffix=rid_out,
bias=mbbias
)
fn1 = 'model_diagnostics{}.nc'.format(rid_out)
shutil.copyfile(
gdir.get_filepath('model_diagnostics',
filesuffix=rid_out),
os.path.join(storage, fn1))
fn4 = 'model_run{}.nc'.format(rid_out)
shutil.copyfile(
gdir.get_filepath('model_run',
filesuffix=rid_out),
os.path.join(storage, fn4))
def process_cmip_for_merged_glacier(gdir, filesuffix, ft, fp):
rgi = gdir.rgi_id.split('_')[0]
rgis = merge_pair_dict(rgi)[0] + [rgi]
gdirs = init_glacier_regions(rgis, prepro_border=10, from_prepro_level=1)
execute_entity_task(tasks.process_histalp_data, gdirs)
execute_entity_task(gcm_climate.process_cmip5_data, gdirs,
filesuffix=filesuffix, fpath_temp=ft, fpath_precip=fp)
for gd in gdirs:
# copy climate files
shutil.copyfile(
gd.get_filepath('gcm_data', filesuffix=filesuffix),
gdir.get_filepath('gcm_data',
filesuffix='_{}{}'.format(gd.rgi_id, filesuffix)
))
|
the-stack_106_25956 | """
Main script for the project that runs the program from start to finish.
:author: Jacob Singleton
"""
from pathlib import Path
from random import randint
from time import sleep
from audio import Audio
from config import Config
def show_startup_banner() -> None:
"""
Displays an ASCII art startup banner explaining the program.
"""
print(
'+------------------------------------------------------------------+',
'| Water Reminder |',
'+------------------------------------------------------------------+',
'',
'This program is designed to periodically remind you to drink water.',
'Water is the source of all life and is vital to living well!',
'',
'An audio segment will play to remind you to drink water periodically.',
'',
'Head to the \'config.yml\' file to configure this program to your needs.',
'To add more sounds, simply put audio files into the audio directory!',
'',
'----------------------------------------------------------------------',
'',
sep='\n'
)
def water_reminder_loop(audio_player: Audio, config: Config) -> None:
"""
Main loop of the program that periodically plays water reminding audio files.
:param audio_player: The Audio object for playing the water reminding audio files.
:param config: The configuration variables for the program.
"""
while True:
sleep(randint(config.min_interval, config.max_interval) * 60)
audio_player.play_random_audio()
if __name__ == '__main__':
show_startup_banner()
config: Config = Config(Path('config.yml'))
audio_player = Audio(config.audio_path)
audio_player.play_random_audio()
water_reminder_loop(audio_player, config)
|
the-stack_106_25958 | import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'CHANGE OF COMMITTEE NAME', 'number': '3'},
{'name': 'COMMITTEE NAME', 'number': '4'},
{'name': 'CHANGE OF ADDRESS', 'number': '5'},
{'name': 'STREET 1', 'number': '6'},
{'name': 'STREET 2', 'number': '7'},
{'name': 'CITY', 'number': '8'},
{'name': 'STATE', 'number': '9'},
{'name': 'ZIP', 'number': '10'},
{'name': 'COMMITTEE EMAIL', 'number': '11'},
{'name': 'COMMITTEE WEB URL', 'number': '12'},
{'name': 'COMMITTEE FAX NUMBER', 'number': '13'},
{'name': 'SUBMISSION DATE', 'number': '14'},
{'name': 'SIGNATURE LAST NAME', 'number': '15'},
{'name': 'SIGNATURE FIRST NAME', 'number': '16'},
{'name': 'SIGNATURE MIDDLE NAME', 'number': '17'},
{'name': 'SIGNATURE PREFIX', 'number': '18'},
{'name': 'SIGNATURE SUFFIX', 'number': '19'},
{'name': 'DATE SIGNED', 'number': '20'},
{'name': 'COMMITTEE TYPE', 'number': '21-5.'},
{'name': 'CANDIDATE ID NUMBER', 'number': '22-5.'},
{'name': 'CANDIDATE LAST NAME', 'number': '23-5.'},
{'name': 'CANDIDATE FIRST NAME', 'number': '24-5.'},
{'name': 'CANDIDATE MIDDLE NAME', 'number': '25-5.'},
{'name': 'CANDIDATE PREFIX', 'number': '26-5.'},
{'name': 'CANDIDATE SUFFIX', 'number': '27-5.'},
{'name': 'CANDIDATE OFFICE', 'number': '28-5.'},
{'name': 'CANDIDATE STATE', 'number': '29-5.'},
{'name': 'CANDIDATE DISTRICT', 'number': '30-5.'},
{'name': 'PARTY CODE', 'number': '31-5.'},
{'name': 'PARTY TYPE', 'number': '32-5.'},
{'name': 'CUSTODIAN LAST NAME', 'number': '33-7.'},
{'name': 'CUSTODIAN FIRST NAME', 'number': '34-7.'},
{'name': 'CUSTODIAN MIDDLE NAME', 'number': '35-7.'},
{'name': 'CUSTODIAN PREFIX', 'number': '36-7.'},
{'name': 'CUSTODIAN SUFFIX', 'number': '37-7.'},
{'name': 'CUSTODIAN STREET 1', 'number': '38-7.'},
{'name': 'CUSTODIAN STREET 2', 'number': '39-7.'},
{'name': 'CUSTODIAN CITY', 'number': '40-7.'},
{'name': 'CUSTODIAN STATE', 'number': '41-7.'},
{'name': 'CUSTODIAN ZIP', 'number': '42-7.'},
{'name': 'CUSTODIAN TITLE', 'number': '43-7.'},
{'name': 'CUSTODIAN TELEPHONE', 'number': '44-7.'},
{'name': 'TREASURER LAST NAME', 'number': '45-8.'},
{'name': 'TREASURER FIRST NAME', 'number': '46-8.'},
{'name': 'TREASURER MIDDLE NAME', 'number': '47-8.'},
{'name': 'TREASURER PREFIX', 'number': '48-8.'},
{'name': 'TREASURER SUFFIX', 'number': '49-8.'},
{'name': 'TREASURER STREET 1', 'number': '50-8.'},
{'name': 'TREASURER STREET 2', 'number': '51-8.'},
{'name': 'TREASURER CITY', 'number': '52-8.'},
{'name': 'TREASURER STATE', 'number': '53-8.'},
{'name': 'TREASURER ZIP', 'number': '54-8.'},
{'name': 'TREASURER TITLE', 'number': '55-8.'},
{'name': 'TREASURER TELEPHONE', 'number': '56-8.'},
{'name': 'AFFILIATED Committee ID NUM', 'number': '57-6.'},
{'name': 'AFFILIATED Committee NAME', 'number': '58-6.'},
{'name': 'AFFILIATED STREET 1', 'number': '59-6.'},
{'name': 'AFFILIATED STREET 2', 'number': '60-6.'},
{'name': 'AFFILIATED CITY', 'number': '61-6.'},
{'name': 'AFFILIATED STATE', 'number': '62-6.'},
{'name': 'AFFILIATED ZIP', 'number': '63-6.'},
{'name': 'RELATIONSHIP', 'number': '64-6.'},
{'name': 'ORGANIZATION TYPE', 'number': '65-6.'},
{'name': 'AGENT LAST NAME', 'number': '66-8.'},
{'name': 'AGENT FIRST NAME', 'number': '67-8.'},
{'name': 'AGENT MIDDLE NAME', 'number': '68-8.'},
{'name': 'AGENT PREFIX', 'number': '69-8.'},
{'name': 'AGENT SUFFIX', 'number': '70-8.'},
{'name': 'AGENT STREET 1', 'number': '71-8.'},
{'name': 'AGENT STREET 2', 'number': '72-8.'},
{'name': 'AGENT CITY', 'number': '73-8.'},
{'name': 'AGENT STATE', 'number': '74-8.'},
{'name': 'AGENT ZIP', 'number': '75-8.'},
{'name': 'AGENT TITLE', 'number': '76-8.'},
{'name': 'AGENT TELEPHONE', 'number': '77-8.'},
{'name': 'BANK NAME', 'number': '78-9. a)'},
{'name': 'BANK STREET 1', 'number': '79-9. a)'},
{'name': 'BANK STREET 2', 'number': '80-9. a)'},
{'name': 'BANK CITY', 'number': '81-9. a)'},
{'name': 'BANK STATE', 'number': '82-9. a)'},
{'name': 'BANK ZIP', 'number': '83-9. a)'},
{'name': 'BANK NAME', 'number': '84-9. b)'},
{'name': 'BANK STREET 1', 'number': '85-9. b)'},
{'name': 'BANK STREET 2', 'number': '86-9. b)'},
{'name': 'BANK CITY', 'number': '87-9. b)'},
{'name': 'BANK STATE', 'number': '88-9. b)'},
{'name': 'BANK ZIP', 'number': '89-9. b)'},
]
self.fields_names = self.hash_names(self.fields)
|
the-stack_106_25959 | # -*- coding: utf-8 -*-
"""
Testing class for report-data-entry endpoint of the Castor EDC API Wrapper.
Link: https://data.castoredc.com/api#/report-data-entry
@author: R.C.A. van Linschoten
https://orcid.org/0000-0003-3052-596X
"""
import pytest
from httpx import HTTPStatusError
from castoredc_api import CastorException
from castoredc_api.tests.test_api_endpoints.data_models import (
study_data_point_extended_model,
)
from castoredc_api.tests.test_api_endpoints.helpers_api_endpoints import allowed_value
class TestStudyDataEntry:
model_keys = study_data_point_extended_model.keys()
test_field = {
"record_id": "000004",
"field_variable_name": "ic_versions",
"field_id": "28D1A17B-51C3-4BDC-A604-7B2F6D5D5924",
"value": "1",
"updated_on": "2019-11-04 16:47:38",
"_embedded": {
"record": {
"id": "000004",
"record_id": "000004",
"ccr_patient_id": "",
"last_opened_step": "FFF23B2C-AEE6-4304-9CC4-9C7C431D5387",
"locked": False,
"progress": 17,
"status": "open",
"archived": False,
"archived_reason": None,
"created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"created_on": {
"date": "2019-10-28 10:54:07.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"updated_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"updated_on": {
"date": "2020-08-14 15:20:27.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"randomized_id": None,
"randomized_on": None,
"randomization_group": None,
"randomization_group_name": None,
"_embedded": {
"institute": {
"id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"name": "Test Institute",
"abbreviation": "TES",
"code": "TES",
"order": 0,
"deleted": False,
"country_id": 169,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5"
}
},
}
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/record/000004"
}
},
},
"field": {
"id": "28D1A17B-51C3-4BDC-A604-7B2F6D5D5924",
"parent_id": "FFF23B2C-AEE6-4304-9CC4-9C7C431D5387",
"field_id": "28D1A17B-51C3-4BDC-A604-7B2F6D5D5924",
"field_image": "",
"field_number": 3,
"field_label": "Consent forms (CFs) reviewed:",
"field_variable_name": "ic_versions",
"field_type": "checkbox",
"field_required": 1,
"field_hidden": 0,
"field_info": "",
"field_units": "",
"field_min": None,
"field_min_label": "",
"field_max": None,
"field_max_label": "",
"field_summary_template": "",
"field_slider_step": None,
"report_id": "",
"field_length": None,
"additional_config": "",
"exclude_on_data_export": False,
"field_enforce_decimals": None,
"option_group": None,
"metadata_points": [],
"validations": [],
"dependency_parents": [],
"dependency_children": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/field/28D1A17B-51C3-4BDC-A604-7B2F6D5D5924"
}
},
},
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/record/000004/data-point/study/28D1A17B-51C3-4BDC-A604-7B2F6D5D5924"
}
},
}
def test_all_study_data_record_success(self, client):
"""Test returning all study_data_points for a record"""
study_data = client.all_study_fields_record("000001")
for field in study_data:
field_keys = field.keys()
assert len(field_keys) == len(self.model_keys)
for key in field_keys:
assert key in self.model_keys
assert type(field[key]) in study_data_point_extended_model[key]
def test_all_study_data_record_fail(self, client):
"""Test failing to return all study_data_points for a record"""
with pytest.raises(HTTPStatusError) as e:
client.all_study_fields_record("00FAKE")
assert "404 Client Error: Not Found for url" in str(e.value)
def test_single_study_data_point_record_success(self, client):
"""Tests returning a single study data point for a record"""
study_data = client.single_study_field_record(
"000004", "28D1A17B-51C3-4BDC-A604-7B2F6D5D5924"
)
assert study_data == self.test_field
def test_single_study_data_point_record_fail(self, client):
"""Tests failing to return a single study data point for a record"""
with pytest.raises(HTTPStatusError) as e:
client.single_study_field_record(
"000004", "28D1A17B-51C3-4BDC-A604-7B2F6D5DFAKE"
)
assert "500 Server Error: Internal Server Error for url:" in str(e.value)
def test_update_single_study_field_record_success(self, client):
"""Tests changing a single study field."""
record = "000010"
field = "7E7868E1-946B-41EF-A96D-E3248251C6F1"
post_value = allowed_value(client, field)
# Update the field
change_reason = "Testing API"
client.update_single_study_field_record(
record, field, change_reason, post_value
)
# Check if changing worked
new_value = client.single_study_field_record(record, field)
assert new_value["value"] == str(post_value)
def test_update_single_study_field_record_fail(self, client):
"""Tests failing to change a single study field."""
record = "000010"
field = "7E7868E1-946B-41EF-A96D-E3248251C6F1"
post_value = allowed_value(client, field)
old_value = client.single_study_field_record(record, field)
# Update the field
change_reason = "Testing API"
with pytest.raises(HTTPStatusError) as e:
client.update_single_study_field_record(
record, "FAKE" + field + "FAKE", change_reason, post_value
)
assert "500 Server Error: Internal Server Error for url:" in str(e.value)
new_value = client.single_study_field_record(record, field)
assert new_value["value"] == old_value["value"]
|
the-stack_106_25960 | #!/usr/bin/python
import os
import re
import sys
import time
from rancher_metadata import MetadataAPI
__author__ = 'Sebastien LANGOUREAUX'
BACKUP_DIR = '/backup/gluster'
class ServiceRun():
def backup_duplicity_ftp(self, backend, target_path, full_backup_frequency, nb_full_backup_keep, nb_increment_backup_chain_keep, volume_size, is_init=False):
global BACKUP_DIR
if backend is None or backend == "":
raise KeyError("You must set the target backend")
if target_path is None or target_path == "":
raise KeyError("You must set the target path")
if full_backup_frequency is None or full_backup_frequency == "":
raise KeyError("You must set the full backup frequency")
if nb_full_backup_keep is None or nb_full_backup_keep == "":
raise KeyError("You must set how many full backup you should to keep")
if nb_increment_backup_chain_keep is None or nb_increment_backup_chain_keep == "":
raise KeyError("You must set how many incremental chain with full backup you should to keep")
if volume_size is None or volume_size == "":
raise KeyError("You must set the volume size")
backend = "%s%s" % (backend, target_path)
cmd = "duplicity"
# First, we restore the last backup
if is_init is True:
print("Starting init the backup folder")
os.system("%s --no-encryption %s %s" % (cmd, backend, BACKUP_DIR))
else:
# We backup on FTP
print("Starting backup")
os.system("%s --volsize %s --no-encryption --allow-source-mismatch --full-if-older-than %s %s %s" % (cmd, volume_size, full_backup_frequency, BACKUP_DIR, backend))
# We clean old backup
print("Starting cleanup")
os.system("%s remove-all-but-n-full %s --force --allow-source-mismatch --no-encryption %s" % (cmd, nb_full_backup_keep, backend))
os.system("%s remove-all-inc-of-but-n-full %s --force --allow-source-mismatch --no-encryption %s" % (cmd, nb_increment_backup_chain_keep, backend))
os.system("%s cleanup --force --no-encryption %s" % (cmd, backend))
def detect_gluster(self):
global BACKUP_DIR
# Identity database to backup
metadata_manager = MetadataAPI()
list_services = metadata_manager.get_service_links()
list_gluster = []
for service in list_services:
service_name = list_services[service]
service_name_env = service_name.upper().replace('-', '_')
gluster = {}
gluster['host'] = service_name
gluster['name'] = service
gluster['volumes'] = os.getenv(service_name_env + '_ENV_GLUSTER_VOLUMES').split(',')
list_gluster.append(gluster)
print("Found Gluster host to backup : %s (%s)" % (service, service_name))
return list_gluster
def mount_gluster(self, list_gluster):
global BACKUP_DIR
# Loop over gluster host and volume to mount it
for gluster in list_gluster:
for volume in gluster['volumes']:
# We mount the volume to backup it
path = "%s/%s/%s" % (BACKUP_DIR, gluster['name'], volume)
os.system('mkdir -p ' + path)
cmd = "mount -t glusterfs %s:%s %s" % (gluster['host'], volume, path)
os.system(cmd)
print("Mount %s:%s in %s to backup it" % (gluster['host'], volume, path))
def umount_gluster(self, list_gluster):
global BACKUP_DIR
# Loop over gluster host and volume to mount it
for gluster in list_gluster:
for volume in gluster['volumes']:
# We mount the volume to backup it
path = "%s/%s/%s" % (BACKUP_DIR, gluster['name'], volume)
os.system("umount " + path)
print("Umount %s" % (path))
if __name__ == '__main__':
service = ServiceRun()
service.backup_duplicity_ftp(os.getenv('TARGET_BACKEND'), os.getenv('TARGET_PATH', "/backup/postgres"),os.getenv('BK_FULL_FREQ', "7D"), os.getenv('BK_KEEP_FULL', "3"), os.getenv('BK_KEEP_FULL_CHAIN', "1"), os.getenv('VOLUME_SIZE', "25"), True)
list_gluster = service.detect_gluster()
service.mount_gluster(list_gluster)
service.backup_duplicity_ftp(os.getenv('TARGET_BACKEND'), os.getenv('TARGET_PATH', "/backup/postgres"),os.getenv('BK_FULL_FREQ', "7D"), os.getenv('BK_KEEP_FULL', "3"), os.getenv('BK_KEEP_FULL_CHAIN', "1"), os.getenv('VOLUME_SIZE', "25"))
service.umount_gluster(list_gluster)
|
the-stack_106_25961 | """
Copyright (c) 2018-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import subprocess
import tempfile
import collections
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import netaddr
import ujson
BASE_URL = 'https://opnsense.emergingthreats.net'
RELATED_SIDS_FILE = '/usr/local/etc/suricata/rules/telemetry_sids.txt'
UNFILTERED_OUTPUT_FIELDS = [
'timestamp', 'flow_id', 'in_iface', 'event_type', 'vlan',
'src_port', 'dest_port', 'proto', 'alert', 'tls', 'http', 'app_proto'
]
# remove from output, either sensitive or irrelevant
CLEANUP_OUTPUT_FIELDS = [
'alert.category', 'alert.severity', 'alert.gid', 'alert.signature', 'alert.metadata',
'http.http_user_agent', 'http.url', 'http.redirect'
]
def get_config(rule_update_config):
"""
retrieve device token, since we align our telemetry data to the existing rule download feature in OPNsense
it should be safe to assume rule-updater.config contains the token that is used.
:param rule_update_config: path to OPNsense rule update configuration
:return: token id or None if not found
"""
response = collections.namedtuple('sensor', 'token')
if os.path.exists(rule_update_config):
cnf = ConfigParser()
cnf.read(rule_update_config)
if cnf.has_section('__properties__'):
if cnf.has_option('__properties__', 'et_telemetry.token'):
response.token = cnf.get('__properties__', 'et_telemetry.token')
return response
def telemetry_sids():
""" collect sids of interest, which are part of the ET-Telemetry delivery
:return: set
"""
our_sids = set()
if os.path.isfile(RELATED_SIDS_FILE):
for line in open(RELATED_SIDS_FILE, 'r'):
if line.strip().isdigit():
our_sids.add(int(line.strip()))
return our_sids
class EventCollector:
""" Event collector, responsible for extracting and anonymising from an eve.json stream
"""
def __init__(self):
self._tmp_handle = tempfile.NamedTemporaryFile()
self._local_networks = list()
self._our_sids = telemetry_sids()
self._get_local_networks()
def _is_rule_of_interest(self, record):
""" check if rule is of interest for delivery
:param record: parsed eve log record
:return: boolean
"""
if not self._our_sids:
return True
elif 'alert' in record and 'signature_id' in record['alert']:
if record['alert']['signature_id'] in self._our_sids:
return True
return False
def _get_local_networks(self):
""" collect local attached networks for anonymization purposes
:return: None
"""
if os.path.isfile('/usr/local/etc/suricata/suricata.yaml'):
# home nets are considered local
with open('/usr/local/etc/suricata/suricata.yaml') as f_in:
parts = f_in.read().split('HOME_NET:')
if len(parts) > 1:
for net in parts[1].split("\n")[0].strip('" [ ]').split(','):
try:
self._local_networks.append(netaddr.IPNetwork(net))
except netaddr.core.AddrFormatError:
pass
with tempfile.NamedTemporaryFile() as output_stream:
subprocess.call(['ifconfig', '-a'], stdout=output_stream, stderr=open(os.devnull, 'wb'))
output_stream.seek(0)
for line in output_stream:
if line.startswith(b'\tinet'):
parts = line.split()
if len(parts) > 3:
if parts[0] == 'inet6' and parts[2] == 'prefixlen':
# IPv6 addresses
self._local_networks.append(
netaddr.IPNetwork("%s/%s" % (parts[1].split('%')[0], parts[3]))
)
elif parts[0] == 'inet' and len(parts) > 3 and parts[2] == 'netmask':
# IPv4 addresses
mask = int(parts[3], 16)
self._local_networks.append(
netaddr.IPNetwork("%s/%s" % (netaddr.IPAddress(parts[1]), netaddr.IPAddress(mask)))
)
def is_local_address(self, address):
""" check if provided address is local for this device
:param address: address (string)
:return: boolean
"""
addr_to_check = netaddr.IPAddress(address)
for local_network in self._local_networks:
if addr_to_check in local_network:
return True
return False
def push(self, record):
""" cleanup and write record
:param record: parsed eve log record
:return: None
"""
if self._is_rule_of_interest(record):
to_push = dict()
for address in ['src_ip', 'dest_ip']:
if address in record:
if self.is_local_address(record[address]):
if record[address].find(':') > -1:
# replace local IPv6 address
to_push[address] = 'xxxx:xxxx:%s' % ':'.join(record[address].split(':')[-2:])
else:
to_push[address] = 'xxx.xxx.xxx.%s' % record[address].split('.')[-1]
else:
# non local address
to_push[address] = record[address]
# unfiltered output fields
for attr in UNFILTERED_OUTPUT_FIELDS:
if attr in record:
to_push[attr] = record[attr]
# exclude partial fields
for attr in CLEANUP_OUTPUT_FIELDS:
to_push_ref = to_push
attr_parts = attr.split('.')
for item in attr_parts[:-1]:
if item in to_push_ref:
to_push_ref = to_push_ref[item]
else:
to_push_ref = None
continue
if to_push_ref and attr_parts[-1] in to_push_ref:
del to_push_ref[attr_parts[-1]]
self._tmp_handle.write(("%s\n" % ujson.dumps(to_push)).encode())
def get(self):
""" fetch all data from temp
:return:
"""
self._tmp_handle.seek(0)
return self._tmp_handle.read()
def __iter__(self):
""" Iterate parsed events
:return:
"""
self._tmp_handle.seek(0)
for line in self._tmp_handle:
yield line
|
the-stack_106_25962 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "pilpol-33525.botics.co"
site_params = {
"name": "PilPol",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
the-stack_106_25965 |
import numpy as np
import time as tm
import sys
def PrettyPrintComplexMatrix(matrix,prec=3,linewidth1=150,suppressOn=True):
np.set_printoptions(precision=prec,linewidth=linewidth1,suppress=suppressOn)
print('RE=\n', np.real(matrix))
print('Im=\n', np.imag(matrix))
def matrixMatrixMultiplication_good(A,B):
return np.dot(A,B)
def matrixMatrixMultiplication_dummy(A,B):
N = len(A)
C = np.zeros((N,N),dtype=complex)
for ii in range(0,N):
for jj in range(0,N):
for kk in range(0,N):
C[ii,jj] += A[ii,kk]*B[kk,jj]
return C
N = int(sys.argv[1])
print(N)
A = np.zeros((N,N),dtype=complex)
B = np.zeros((N,N),dtype=complex)
omega = np.exp(2.j*np.pi/N)
omega_i = 1.
omega_ij = 1.
# A is the Discrete Fourier Transform (DFT) matrix.
for ii in range(0,N):
for jj in range(0,N):
A[ii,jj] = omega_ij
B[jj,ii] = np.conj(A[ii,jj])/N
omega_ij *= omega_i
omega_i *= omega
print('\nA=\n')
PrettyPrintComplexMatrix(A)
print('\nB=\n')
PrettyPrintComplexMatrix(B)
t1 = tm.time()
C = matrixMatrixMultiplication_good(A,B)
print('\nC=\n')
PrettyPrintComplexMatrix(C)
print('\n\ntime for matrix multiplication: ', tm.time() - t1)
|
the-stack_106_25966 | from django.shortcuts import render, redirect
from django.conf.timezone import now
from .models import (
SandType,
Topdressing,
GreenTopdressing,
TeeTopdressing,
FairwayTopdressing
)
def curr_time():
return now()
def index(request):
context = {
'curr_time': curr_time(),
}
return render(request, 'topdressing/index.html', context)
def sandIndex(request):
sands = SandType.objects.all()
context = {
'curr_time': curr_time(),
'sands': sands,
}
return render(request, 'topdressing/sand_index.html', context)
def sandNew(request):
form = SandTypeForm()
context = {
'curr_time': curr_time(),
'form': form,
}
return render(request, 'topdressing/sand_new.html', context)
def sandCreate(request):
if request.method == 'POST':
form = SandTypeForm(data=request.POST)
if form.is_valid() and request.user.is_authenticated():
pending_form = form.save(commit=False)
pending_form.save()
return redirect('topd:sand_detail', pk=pending_form.pk)
def sandDetail(request, pk):
sand = SandType.objects.get(pk=pk)
context = {
'curr_time': curr_time(),
'sand': sand,
}
return render(request, 'topdressing/sand_detail.html', context)
def sandEdit(request, pk):
sand = SandType.objects.get(pk=pk)
form = SandTypeForm(instance=sand)
context = {
'curr_time': curr_time(),
'sand': sand,
'form': form,
}
return render(request, 'topressing/sand_edit.html', context)
def sandUpdate(request, pk):
sand = SandType.objects.get(pk=pk)
if request.method == 'POST':
form = SandTypeForm(request.POST, instance=sand)
if form.is_valid() and request.user.is_authenticated():
pending_form = form.save(commit=False)
pending_form.save()
return redirect('topd:sand_detail', pk=sand.pk)
def greensIndex(request):
topd = GreenTopdressing.objects.all().order_by(
'-topdress_date')[:20]
context = {
'curr_time': curr_time(),
'topd': topd,
}
return render(request, 'topdressing/greens_index.html', context)
|
the-stack_106_25967 | from __future__ import annotations
from typing import Union
from pathlib import Path
from dataclasses import dataclass, field
import math
import uuid
# useful geometric constants
TWO_PI = 2 * math.pi
PI_OVER_TWO = math.pi / 2
THREE_PI_OVER_TWO = 3 * math.pi / 2
def get_oriented_distance(p0: Point, p1: Point, p2: Point):
"""Calculate the oriented distance between a point and a line
Calculate the distance between a point p0 and a line formed between p1 and p2. This result is
the "oriented" distance, meaning it is signed. What this means that if you thought of the line
from p1-to-p2 as a vector that pointed up, as positive result would mean p0 was on the right
side of the vector and a negative result would mean p0 was on the left side.
"""
p21 = p2 - p1
p10 = p1 - p0
return (p21.x * p10.y - p21.y * p10.x) / abs(p21)
def get_distance(p0: Point, p1: Point, p2: Point):
"""Calculate the distance between a point and a line
Calculate the distance between a point p0 and a line formed between p1 and p2
"""
return abs(get_oriented_distance(p0, p1, p2))
@dataclass
class Point:
x: float
y: float
def __str__(self):
return f"{self.x} {self.y}"
def __add__(self, other: Point) -> Point:
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other: Point) -> Point:
return Point(self.x - other.x, self.y - other.y)
def __abs__(self) -> float:
return math.sqrt(self.x ** 2 + self.y ** 2)
def __eq__(self, other: Point) -> bool:
if not math.isclose(self.x, other.x):
return False
if not math.isclose(self.y, other.y):
return False
return True
def __ne__(self, other: Point) -> bool:
return not self.__eq__(other)
def __mul__(self, scaler: float) -> Point:
return Point(scaler * self.x, scaler * self.y)
def rotate_about(self, about: Point, angle: float):
""" Rotate a point around a reference point"""
delta = self - about
r = abs(delta)
a = math.atan2(delta.y, delta.x)
x = r * math.cos(a + angle)
y = r * math.sin(a + angle)
return Point(x, y)
def mirror_x(self):
return Point(self.x, -self.y)
def mirror_y(self):
return Point(-self.x, self.y)
def point_from_polar(radius, angle):
x = radius * math.cos(angle)
y = radius * math.sin(angle)
return Point(x, y)
@dataclass
class Arc:
center: Point
radius: float
start_angle: float
end_angle: float
start: Point = field(init=False)
mid: Point = field(init=False)
end: Point = field(init=False)
bulge: float = field(init=False)
def __post_init__(self):
"""Derived parameters"""
# three-point representation
mid_angle = (self.start_angle + self.end_angle) / 2
self.start = self.center + point_from_polar(self.radius, self.start_angle)
self.mid = self.center + point_from_polar(self.radius, mid_angle)
self.end = self.center + point_from_polar(self.radius, self.end_angle)
# bulge (for dxf generation)
width = abs(self.end - self.start)
if self.start == self.end:
self.buldge = 2
else:
sagitta = get_oriented_distance(self.mid, self.start, self.end)
self.bulge = 2 * sagitta / width
def __str__(self):
return f"(arc (start {self.start}) (mid {self.mid}) (end {self.end}))"
def __add__(self, other: Point):
return Arc(self.center + other, self.radius, self.start_angle, self.end_angle)
def __mul__(self, scaler: float) -> Arc:
return Arc(
scaler * self.center, scaler * self.radius, self.start_angle, self.end_angle
)
def rotates_clockwise(self):
return self.end_angle < self.start_angle
def rotates_counterclockwise(self):
return self.end_angle > self.start_angle
def reverse(self):
return Arc(self.center, self.radius, self.end_angle, self.start_angle)
def rotate(self, angle: float):
"""Rotate an arc about its center"""
return Arc(
self.center, self.radius, self.start_angle + angle, self.end_angle + angle
)
def rotate_about(self, about: Point, angle: float):
""" Rotate an arc around a reference point"""
# calculate the new center for the arc
center = self.center.rotate_about(about, angle)
return Arc(
center, self.radius, self.start_angle + angle, self.end_angle + angle
)
def mirror_x(self):
"""Mirror across the x axis"""
center = self.center.mirror_x()
start_angle = -self.start_angle
end_angle = -self.end_angle
return Arc(center, self.radius, start_angle, end_angle)
def mirror_y(self):
"""Mirror across the x axis"""
center = self.center.mirror_y()
start_angle = math.pi - self.start_angle
end_angle = math.pi - self.end_angle
return Arc(center, self.radius, start_angle, end_angle)
def interpolate(self, max_angle: float = math.pi / 36):
"""Create a PWL approximation of the arc with a list of points
"""
points = []
angle = self.start_angle
if self.rotates_counterclockwise():
while angle < self.end_angle:
point = Point(
self.radius * math.cos(angle), self.radius * math.sin(angle)
)
points.append(self.center + point)
angle += max_angle
else:
while angle > self.end_angle:
point = Point(
self.radius * math.cos(angle), self.radius * math.sin(angle)
)
points.append(self.center + point)
angle -= max_angle
point = Point(
self.radius * math.cos(self.end_angle),
self.radius * math.sin(self.end_angle),
)
points.append(self.center + point)
return points
def add_to_dxf_model(self, modelspace):
"""Add Arc to DXF model"""
modelspace.add_arc(
center=(self.center.x, self.center.y),
radius=self.radius,
start_angle=self.start_angle,
end_angle=self.end_angle,
)
@dataclass
class Polygon:
points: [Point]
layer: str = "F.Cu"
width: float = 0
fill: str = "solid"
tstamp: uuid.UUID = uuid.uuid4()
def __add__(self, other: Point):
return Polygon(
[point + other for point in self.points], self.layer, self.width, self.fill
)
def __mul__(self, scaler: float) -> Polygon:
return Polygon(
[scaler * point for point in self.points], self.layer, self.width, self.fill
)
def __str__(self):
points = "".join(
[
f"{point}" if isinstance(point, Arc) else f"(xy {point})"
for point in self.points
]
)
expression = f"(fp_poly(pts{points})(layer {self.layer}) (width {self.width}) (fill {self.fill}) (tstamp {self.tstamp}))"
return expression
def mirror_x(self):
"""Mirror the polygon about the x-axis"""
return Polygon(
[point.mirror_x() for point in self.points],
self.layer,
self.width,
self.fill,
)
def mirror_y(self):
"""Mirror the polygon about the x-axis"""
return Polygon(
[point.mirror_y() for point in self.points],
self.layer,
self.width,
self.fill,
)
def rotate_about(self, about: Point, angle: float):
""" Rotate a poygon around a reference point"""
return Polygon(
[point.rotate_about(about, angle) for point in self.points],
self.layer,
self.width,
self.fill,
)
def to_poly_path(self):
"""Create a true arc polypath
"""
points = []
last = None
for point in self.points:
if isinstance(point, Arc):
# if the start of this is not the end of the last arc, we need to add a point
if last is not None and point.start != last:
points.append((last.x, last.y))
points.append((point.start.x, point.start.y, 0, 0, point.bulge))
last = point.end
continue
if last is not None and point != last:
points.append((last.x, last.y))
points.append((point.x, point.y))
last = None
# Make sure to add the last point if there is one left over from an arc
if last is not None:
points.append((last.x, last.y))
return points
def to_pwl_path(self, max_angle: float = math.pi / 36):
"""Create a list of tuples representing the polypath as a PWL approximation
"""
points = []
for point in self.points:
if isinstance(point, Arc):
points.extend([(p.x, p.y) for p in point.interpolate(max_angle)])
continue
points.append((point.x, point.y))
return points
def to_wire(
self, z=0, closed=True, freecad_path: str = "C:/Program Files/FreeCAD 0.19/bin"
):
"""Convert the polygon to a FreeCAD Wire
"""
# try and import the FreeCAD python extension
try:
import sys
sys.path.append(freecad_path)
import FreeCAD as cad
except Exception:
raise ImportError("You must have FeeCAD installed")
import Part
# first covert the polygon into a simple path of points
points = self.to_pwl_path()
verts = [cad.Vector(p[0], p[1], z) for p in points]
if closed:
if verts[0] != verts[-1]:
verts.append(verts[0])
return Part.makePolygon(verts)
def to_dxf(
self,
filename: Union[str, Path],
version: str = "R2000",
encoding: str = None,
fmt: str = "asc",
) -> None:
"""Export the polygon to a dxf file
Args:
filename: file name as string
version: DXF version
encoding: override default encoding as Python encoding string like ``'utf-8'``
fmt: ``'asc'`` for ASCII DXF (default) or ``'bin'`` for Binary DXF
"""
import ezdxf
doc = ezdxf.new(version)
msp = doc.modelspace()
path = self.to_poly_path()
msp.add_lwpolyline(path, close=True)
doc.saveas(filename, encoding, fmt)
def plot(self, ax=None, max_angle: float = math.pi / 36):
"""Create a plot preview of the polygon
"""
import matplotlib.pyplot as mp
path = self.to_pwl_path(max_angle)
x, y = zip(*path)
if ax is None:
mp.figure(figsize=(8, 8))
mp.axis("equal")
mp.fill(x, y)
else:
ax.fill(x, y)
ax.axis("equal")
if __name__ == "__main__":
p0 = Point(0, 4)
p1 = Point(4, 4)
p2 = Point(4, 0)
p3 = Point(0, 0)
arc = Arc(Point(2, 4), 1, math.pi, 0)
polygon = Polygon([p0, arc, p1, p2, p3])
# polygon.plot(max_angle=math.pi / 8)
path = polygon.to_poly_path()
print(path)
|
the-stack_106_25968 | import netCDF4 as nc
import sys
import argparse
import numpy as np
import configparser
from itertools import islice, chain, repeat
from netcdfTools import *
from mapTools import readNumpyZTile
'''
Tools for genPIDSInput.py
Author:
Sasu Karttunen
[email protected]
Institute for Atmospheric and Earth System Research (INAR) / Physics
University of Helsinki
UPDATE:
- Jani Stromberg: Added building type and surface fraction type/dimension. Fixed a bug in building
ids.
- Mona Kurppa: Added and modified chemistry and salsa variables
+ changes in the function "map". Does not return a list in Python 3.
- Jukka-Pekka Keskinen: Added CRS support.
'''
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def setPIDSGlobalAtrributes(ds, globalAttributes):
''' Set PIDS global attributes to data set file '''
strAttributeKeys = ['acronym', 'author', 'campaign', 'contact_person', 'creation_time',\
'comment', 'Conventions', 'data_content', 'dependencies', 'history'\
'keywords', 'license', 'location', 'origin_time', 'references', 'site',\
'source', 'title']
floatAttributeKeys = ['origin_x', 'origin_y', 'origin_z', 'origin_lat', 'origin_lon',\
'rotation_angle']
intAttributeKeys = ['version']
for key in strAttributeKeys:
try:
setattr(ds, key, globalAttributes[key])
except KeyError:
if(getattr(ds, key, "")==""):
setattr(ds, key, "")
# Mandatory
setattr(ds, 'Conventions', "CF-1.7")
for key in floatAttributeKeys:
try:
setattr(ds, key, float(globalAttributes[key]))
except KeyError:
if key=='origin_lat':
if(getattr(ds, key, 0.0)==0.0):
print("WARNING: origin_lat (latitude) not set. Using default value (55˚).")
setattr(ds, key, 55.0)
else:
if key in ['origin_lon', 'rotation_angle']:
print("WARNING: "+key+" not set. Using default value (0.0).")
if(getattr(ds, key, 0.0)==0.0):
setattr(ds, key, 0.0)
for key in intAttributeKeys:
try:
setattr(ds, key, int(globalAttributes[key]))
except KeyError:
if(getattr(ds, key, 0)==0):
setattr(ds, key, 0)
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readConfigSection(config, name):
''' Prints and returns variables found from given section '''
try:
configSection = config._sections[name]
except KeyError:
return None
for attr, val in configSection.items():
print("{}: {}".format(attr,val))
return configSection
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readConfigVariable(config, section, name):
try:
var = config.get(section, name)
except configparser.NoOptionError:
return None
return var
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def parseStringArrayInput(input_str, dtype):
# Return string type input as 2D numpy array
# Example input: "2,5,6\n3,6,7\n6,7,9"
# Example output np.array([[2,5,6],[3,6,7],[6,7,9]])
rows = input_str.split("\\n")
if(len(rows[0].split(","))==1):
rows=rows[1:]
arr = np.zeros((len(rows),len(rows[0].split(","))),dtype=dtype)
for i in range(len(rows)):
items = rows[i].split(",")
try:
arr[i,:] = np.array(list(map(dtype, items)))
except ValueError:
continue
return arr
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def parseCharacterArray(input_str, maxstrlen):
# Return string type input as 2D character array
# Example input: "abba,cd,acdc"
# Example output np.array([['a','b','b','a'],['c','d','',''],['a','c','d','c']])
items = input_str.split(",")
charr = map(list, items)
# Fill missing elements with empty char and construct a 2d array
def pad_array(charr):
return list(islice(chain(charr, repeat('')),maxstrlen))
charr = np.array(list(map(pad_array, charr)))
return charr
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# UNIVERSAL #
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createXDim(ds, nPx, dPx, dims, offset=0.0):
# Creates a new x-axis unless it already exists
if('x' not in dims):
x_dim = createCoordinateAxis(ds, nPx, dPx, 1, 'x', 'f4', 'm', True, False, verbose=False, offset=offset)
x_dim.long_name = "distance to origin in x-direction"
dims.append('x')
return x_dim
else:
x_dim = ds.variables['x']
return x_dim
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createYDim(ds, nPx, dPx, dims, offset=0.0):
# Creates a new y-axis unless it already exists
if('y' not in dims):
y_dim = createCoordinateAxis(ds, nPx, dPx, 0, 'y', 'f4', 'm', True, False, verbose=False, offset=offset)
y_dim.long_name = "distance to origin in y-direction"
dims.append('y')
return y_dim
else:
y_dim = ds.variables['y']
return y_dim
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createZDim(ds, nPx, dPx, dims, offset=0.0):
# Creates a new z-axis unless it already exists
if('z' not in dims):
z_dim = createCoordinateAxis(ds, nPx, dPx, 2, 'z', 'f4', 'm', True, False, verbose=False, offset=offset)
z_dim.long_name = "height above origin"
dims.append('z')
return z_dim
else:
z_dim = ds.variables['z']
return z_dim
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# PIDS_STATIC #
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createZLADDim(ds, nPx, dPx, dims):
# Creates a new zlad-axis unless it already exists
if('zlad' not in dims):
zlad_dim = createCoordinateAxis(ds, nPx, dPx, 2, 'zlad', 'f4', 'm', True, False, verbose=False)
zlad_dim.long_name = "height above origin"
dims.append('zlad')
return zlad_dim
else:
zlad_dim = ds.variables['zlad']
return zlad_dim
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createZnsurfaceFractionDim(ds, nPx, dPx, dims):
# Creates a new nsurface_fraction-axis unless it already exists
if('nsurface_fraction' not in dims):
znsurfaceFraction_dim = createCoordinateAxis(ds, nPx, dPx, 2, 'nsurface_fraction', 'i4', '',
True, False, verbose=False)
znsurfaceFraction_dim.long_name = "height above origin"
dims.append('nsurface_fraction')
return znsurfaceFraction_dim
else:
znsurfaceFraction_dim = ds.variables['nsurface_fraction']
return znsurfaceFraction_dim
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processOrography(fname,ds,vars,dims):
# Write orography data to given ds
oroDict = readNumpyZTile(fname,verbose=False)
oroR = oroDict['R'][::-1,:]
oroDPx = oroDict['dPx']
oroNPx = np.shape(oroR)
if('zt' in vars):
ds.variables['zt'][:]=oroR
return ds.variables['zt']
else:
# Create new dimensions or check if the orography matches old ones
x_dim = createXDim(ds, oroNPx, oroDPx, dims)
y_dim = createYDim(ds, oroNPx, oroDPx, dims)
oroNCVar = createNetcdfVariable(ds, oroR, 'zt', 0, 'm', 'f4', ('y','x'), False, False,
fill_value=-9999., verbose=False)
oroNCVar.long_name= "terrain_height"
return oroNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processLAD(fname,ds,vars,dims):
ladDict = readNumpyZTile(fname,verbose=False)
if( 'R' in ladDict ):
ladR = ladDict['R'][::-1,:]
elif('S' in ladDict ):
ladR = ladDict['S'][::-1,:,:] # Mirror j-direction in 3D array data (at this point)
ladDPx = ladDict['dPx']
ladNPx = np.shape(ladR)
ladLOD = len(ladNPx)-1 # 1=2D height field, 2=3D mask
ladR = np.rollaxis(ladR, 2, 0) # i.e. take axis=2 and position it _before_ axis=0
if('lad' in vars):
print(' lad is in vars ')
ds.variables['lad'][:]=ladR
return ds.variables['lad']
else:
print(' lad is NOT in vars ')
x_dim = createXDim(ds, ladNPx, ladDPx, dims)
y_dim = createYDim(ds, ladNPx, ladDPx, dims)
zlad_dim = createZLADDim(ds, ladNPx, ladDPx, dims)
ladNCVar = createNetcdfVariable(ds, ladR, 'lad', 0, 'm', 'f4', ('zlad','y','x'), False, False,
fill_value=-9999., verbose=False)
ladNCVar.long_name = "basal area density"
return ladNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processBuildings(fname,ds,vars,dims):
buildDict = readNumpyZTile(fname,verbose=False)
if ('R' in buildDict):
buildR = buildDict['R'][::-1,:]
elif ('S' in buildDict):
buildR = buildDict['S'][::-1,:,:] # Mirror j-direction in 3D array data (at this point)
else:
sys.exit('No R or S array present in the given building file.')
buildDPx = buildDict['dPx']
buildNPx = np.shape(buildR)
buildLOD = len(buildNPx)-1 # 1=2D height field, 2=3D mask
if(buildLOD==1):
# Save as a 2D building height array
if('buildings_2d' in vars):
ds.variables['buildings_2d'][:]=buildR
return ds.variables['buildings_2d']
else:
x_dim = createXDim(ds, buildNPx, buildDPx, dims)
y_dim = createYDim(ds, buildNPx, buildDPx, dims)
buildNCVar = createNetcdfVariable(ds, buildR, 'buildings_2d', 0, 'm', 'f4', ('y','x'), False,
False, fill_value=-9999., verbose=False)
buildNCVar.long_name = "building_height"
buildNCVar.lod = int(buildLOD)
return buildNCVar
elif(buildLOD==2):
'''
The 3d numpy array must come in in [j,i,k] order. Here it is rolled back into [k,j,i]
for NetCDF output. Thus, we roll axis=2 such that it ends up before the 0th axis.
'''
topo = np.rollaxis(buildR, 2, 0) # i.e. take axis=2 and position it _before_ axis=0
if('buildings_3d' in vars):
ds.variables['buildings_3d'][:]=topo
return ds.variables['buildings_3d']
else:
x_dim = createXDim(ds, buildNPx, buildDPx, dims)
y_dim = createYDim(ds, buildNPx, buildDPx, dims)
z_dim = createZDim(ds, buildNPx, buildDPx, dims, offset=-0.5)
buildNCVar = createNetcdfVariable(ds, topo, 'buildings_3d', 0, 'm', 'b', ('z','y','x'), False,
False, fill_value=-127, verbose=False)
buildNCVar.long_name = "building_flag"
buildNCVar.lod = int(buildLOD)
return buildNCVar
else:
raise ValueError("invalid number of dimensions in buildings array: {}".format(buildLOD+1))
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processSurfaceTemperature(fname,ds,vars,dims):
stDict = readNumpyZTile(fname,verbose=False)
if ('R' in stDict):
Rst = stDict['R'][::-1,:]
elif ('S' in stDict):
Rst = stDict['S'][::-1,:,:] # Mirror j-direction in 3D array data (at this point)
else:
sys.exit('No R or S array present in the given surface temperature file.')
dPx = stDict['dPx']
NPx = np.shape(Rst)
LOD = len(NPx)-1 # 1=2D height field, 2=3D mask
if(LOD==1):
# Save as a 2D building height array
if('theta_2d' in vars):
ds.variables['theta_2d'][:]=Rst
return ds.variables['theta_2d']
else:
x_dim = createXDim(ds, NPx, dPx, dims)
y_dim = createYDim(ds, NPx, dPx, dims)
NCVar = createNetcdfVariable(ds, Rst, 'theta_2d', 0, 'K', 'f4', ('y','x'), False,
False, fill_value=-9999., verbose=False)
NCVar.long_name = "surface_temperature"
NCVar.lod = int(LOD)
return NCVar
elif(LOD==2):
'''
The 3d numpy array must come in in [j,i,k] order. Here it is rolled back into [k,j,i]
for NetCDF output. Thus, we roll axis=2 such that it ends up before the 0th axis.
'''
T3d = np.rollaxis(Rst, 2, 0) # i.e. take axis=2 and position it _before_ axis=0
Rst = None # clear memory
if('theta_3d' in vars):
ds.variables['theta_3d'][:]=T3d
return ds.variables['theta_3d']
else:
x_dim = createXDim(ds, NPx, dPx, dims)
y_dim = createYDim(ds, NPx, dPx, dims)
z_dim = createZDim(ds, NPx, dPx, dims, offset=-0.5)
NCVar = createNetcdfVariable(ds, T3d, 'theta_3d', 0, 'K', 'f4', ('z','y','x'), False,
False, fill_value=-9999., verbose=False)
NCVar.long_name = "surface temperature"
NCVar.lod = int(LOD)
return NCVar
else:
raise ValueError("invalid number of dimensions in surface temperature array: {}".format(LOD+1))
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processBuildingIDs(fname,ds,vars,dims):
buildIDDict = readNumpyZTile(fname,verbose=False)
buildIDR = buildIDDict['R'][::-1,:]
buildIDDPx = buildIDDict['dPx']
buildIDNPx = np.shape(buildIDR)
if('building_id' in vars):
ds.variables['building_id'][:]=buildIDR
return ds.variables['building_id']
else:
x_dim = createXDim(ds, buildIDNPx, buildIDDPx, dims)
y_dim = createYDim(ds, buildIDNPx, buildIDDPx, dims)
buildIDNCVar = createNetcdfVariable(ds, buildIDR, 'building_id', 0, 'm', 'i4', ('y','x'), False,
False, fill_value=-9999, verbose=False)
buildIDNCVar.long_name = "building id numbers"
return buildIDNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processPavementType(fname,ds,vars,dims):
pavementTypeDict = readNumpyZTile(fname,verbose=False)
pavementTypeR = pavementTypeDict['R'][::-1,:]
pavementTypeDPx = pavementTypeDict['dPx']
pavementTypeNPx = np.shape(pavementTypeR)
if('pavement_type' in vars):
ds.variables['pavement_type'][:]=pavementTypeR
return ds.variables['pavement_type']
else:
x_dim = createXDim(ds, pavementTypeNPx, pavementTypeDPx, dims)
y_dim = createYDim(ds, pavementTypeNPx, pavementTypeDPx, dims)
pavementTypeNCVar = createNetcdfVariable(ds, pavementTypeR, 'pavement_type', 0, 'm', 'b',
('y','x'), False, False, fill_value=-127, verbose=False)
pavementTypeNCVar.long_name = "pavement type classification"
return pavementTypeNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processWaterType(fname,ds,vars,dims):
waterTypeDict = readNumpyZTile(fname,verbose=False)
waterTypeR = waterTypeDict['R'][::-1,:]
waterTypeDPx = waterTypeDict['dPx']
waterTypeNPx = np.shape(waterTypeR)
if('water_type' in vars):
ds.variables['water_type'][:]=waterTypeR
return ds.variables['water_type']
else:
x_dim = createXDim(ds, waterTypeNPx, waterTypeDPx, dims)
y_dim = createYDim(ds, waterTypeNPx, waterTypeDPx, dims)
waterTypeNCVar = createNetcdfVariable(ds, waterTypeR, 'water_type', 0, 'm', 'b', ('y','x'),
False, False, fill_value=-127, verbose=False)
waterTypeNCVar.long_name = "water type classification"
return waterTypeNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processSoilType(fname,ds,vars,dims):
soilTypeDict = readNumpyZTile(fname,verbose=False)
soilTypeR = soilTypeDict['R'][::-1,:]
soilTypeDPx = soilTypeDict['dPx']
soilTypeNPx = np.shape(soilTypeR)
if('soil_type' in vars):
ds.variables['soil_type'][:]=soilTypeR
return ds.variables['soil_type']
else:
x_dim = createXDim(ds, soilTypeNPx, soilTypeDPx, dims)
y_dim = createYDim(ds, soilTypeNPx, soilTypeDPx, dims)
soilTypeNCVar = createNetcdfVariable(ds, soilTypeR, 'soil_type', 0, 'm', 'b', ('y','x'), False,
False, fill_value=-127, verbose=False)
soilTypeNCVar.long_name = "soil type classification"
return soilTypeNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processStreetType(fname,ds,vars,dims):
streetTypeDict = readNumpyZTile(fname,verbose=False)
streetTypeR = streetTypeDict['R'][::-1,:]
streetTypeDPx = streetTypeDict['dPx']
streetTypeNPx = np.shape(streetTypeR)
if('street_type' in vars):
ds.variables['street_type'][:]=streetTypeR
return ds.variables['street_type']
else:
x_dim = createXDim(ds, streetTypeNPx, streetTypeDPx, dims)
y_dim = createYDim(ds, streetTypeNPx, streetTypeDPx, dims)
streetTypeNCVar = createNetcdfVariable(ds, streetTypeR, 'street_type', 0, 'm', 'b', ('y','x'),
False, False, fill_value=-127, verbose=False)
streetTypeNCVar.long_name = "street type classification"
return streetTypeNCVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processVegetationType(fname,ds,vars,dims):
vegetationTypeDict = readNumpyZTile(fname,verbose=False)
vegetationTypeR = vegetationTypeDict['R'][::-1,:]
vegetationTypeDPx = vegetationTypeDict['dPx']
vegetationTypeNPx = np.shape(vegetationTypeR)
if('vegetation_type' in vars):
ds.variables['vegetation_type'][:]=vegetationTypeR
return ds.variables['vegetation_type']
else:
x_dim = createXDim(ds, vegetationTypeNPx, vegetationTypeDPx, dims)
y_dim = createYDim(ds, vegetationTypeNPx, vegetationTypeDPx, dims)
vegetationTypeNCVar = createNetcdfVariable(ds, vegetationTypeR, 'vegetation_type', 0, 'm', 'b',
('y','x'), False, False, fill_value=-127,
verbose=False)
vegetationTypeNCVar.long_name = "vegetation type classification"
return vegetationTypeNCVar
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processBuildingType(fname, ds, vars, dims):
buildingTypeDict = readNumpyZTile(fname, verbose=False)
buildingTypeR = buildingTypeDict['R'][::-1, :]
buildingTypeDPx = buildingTypeDict['dPx']
buildingTypeNPx = np.shape(buildingTypeR)
if ('building_type' in vars):
ds.variables['building_type'][:] = buildingTypeR
return ds.variables['building_type']
else:
x_dim = createXDim(ds, buildingTypeNPx, buildingTypeDPx, dims)
y_dim = createYDim(ds, buildingTypeNPx, buildingTypeDPx, dims)
buildingTypeNCVar = createNetcdfVariable(ds, buildingTypeR, 'building_type', 0, 'm', 'b',
('y', 'x'), False, False, fill_value=-127,
verbose=False)
buildingTypeNCVar.long_name = "building type classification"
return buildingTypeNCVar
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processSurfaceFraction(fname, ds, vars, dims):
surfaceFractionDict = readNumpyZTile(fname, verbose=False)
surfaceFractionR = surfaceFractionDict['R'][::-1, :, :]
surfaceFractionDPx = surfaceFractionDict['dPx']
surfaceFractionNPx = np.shape(surfaceFractionR)
# Same here as in buildings_3d, idk why this has to be done for 3D arrays
surfaceFractionR = np.swapaxes(surfaceFractionR, 0, 2)
surfaceFractionR = np.swapaxes(surfaceFractionR, 2, 1)
if ('surface_fraction' in vars):
ds.variables['surface_fraction'][:] = surfaceFractionR
return ds.variables['surface_fraction']
else:
x_dim = createXDim(ds, surfaceFractionNPx, surfaceFractionDPx, dims)
y_dim = createYDim(ds, surfaceFractionNPx, surfaceFractionDPx, dims)
znsurface_fraction_dim = createZnsurfaceFractionDim(ds, surfaceFractionNPx, surfaceFractionDPx,
dims)
surfaceFractionNCVar = createNetcdfVariable(ds, surfaceFractionR, 'surface_fraction', 0, 'm',
'f4', ('nsurface_fraction', 'y', 'x'), False, False,
fill_value=-9999., verbose=False)
surfaceFractionNCVar.long_name = "surface fraction"
return surfaceFractionNCVar
def processCRS(ds,cConf):
try:
crsVar = createNetcdfVariable(ds, None, 'crs', 0, cConf['units'], 'i4', [], False, False,
fill_value=None, verbose=False)
except KeyError:
print("WARNING: units for CRS are not set, using default (m)")
crsVar = createNetcdfVariable(ds, None, 'crs', 0, 'm', 'i4', [], False, False,
fill_value=None, verbose=False)
crsVar.long_name = 'coordinate reference system'
try:
crsVar.grid_mapping_name = cConf['grid_mapping_name']
except KeyError:
crsVar.grid_mapping_name = 'transverse_mercator'
print("WARNING: grid_mapping_name for CRS is not set, using default (transverse_mercator)")
try:
crsVar.semi_major_axis = float(cConf['semi_major_axis'])
crsVar.inverse_flattening = float(cConf['inverse_flattening'])
crsVar.longitude_of_prime_meridian = float(cConf['longitude_of_prime_meridian'])
crsVar.longitude_of_central_meridian = float(cConf['longitude_of_central_meridian'])
crsVar.latitude_of_projection_origin = float(cConf['latitude_of_projection_origin'])
crsVar.scale_factor_at_central_meridian = float(cConf['scale_factor_at_central_meridian'])
crsVar.false_easting = float(cConf['false_easting'])
crsVar.false_northing = float(cConf['false_northing'])
crsVar.epsg_code = cConf['epsg_code']
except KeyError:
print('\nOne or more mandatory crs keys are missing. Make sure you specify all: \n - semi_major_axis\n - inverse_flattening \n'
' - longitude_of_prime_meridian\n - longitude_of_central_meridian\n - latitude_of_projection_origin\n'
' - scale_factor_at_central_meridian\n - false_easting\n - false_northing\n - epsg_code\n')
raise KeyError("Missing CRS key(s)")
return crsVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# Both PIDS_CHEM and PIDS_SALSA #
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createNcatDim(ds, ncat, dims):
# Creates a new ncat dim unless it already exists
if ('ncat' not in dims):
ncat_dim = createNetcdfVariable(ds, ncat, 'ncat', len(ncat), '', 'i4', ('ncat',),
parameter=True, verbose=False)
ncat_dim.long_name = "number of emission categories"
dims.append('ncat')
return ncat_dim
else:
return ds.variables['ncat']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createStrlenDim(ds, strlen, dims):
# Creates a new strlen dim unless it already exists
if ('strlen' not in dims):
strlen_dim = createNetcdfVariable(ds, strlen, 'strlen', len(strlen), '', 'i4', ('strlen',),
parameter=True, verbose=False)
dims.append('strlen')
return strlen_dim
else:
return ds.variables['strlen']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createNhoursyearDim(ds, nhoursyear, dims):
# Creates a new nhoursyear dim unless it already exists
if ('nhoursyear' not in dims):
nhoursyear_dim = createNetcdfVariable(ds, nhoursyear, 'nhoursyear', len(nhoursyear), '', 'i4',
('nhoursyear',), parameter=True, verbose=False)
dims.append('nhoursyear')
return nhoursyear_dim
else:
return ds.variables['nhoursyear']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createNmonthdayhourDim(ds, nmonthdayhour, dims):
# Creates a new nmonthdayhour dim unless it already exists
if ('nmonthdayhour' not in dims):
nmonthdayhour_dim = createNetcdfVariable(ds, nmonthdayhour, 'nmonthdayhour', len(nmonthdayhour),
'', 'i4', ('nmonthdayhour',), parameter=True,
verbose=False)
dims.append('nmonthdayhour')
return nmonthdayhour_dim
else:
return ds.variables['nmonthdayhour']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createTimeDim(ds, time, dims):
# Creates a new ncat dim unless it already exists
if ('time' not in dims):
time = list(map(float, time.split(",")))
time_dim = createNetcdfVariable(ds, time, 'time', len(time), 's', 'f4', ('time',),
parameter=True, verbose=False)
time_dim.long_name = "seconds since the beginning of the simulation"
dims.append('time')
return time_dim
else:
return ds.variables['time']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionTimestamp(ds, originTime, vars, dims):
# Creates a new ncat dim unless it already exists
from datetime import datetime, timedelta
if ('field_len' not in dims):
field_len = 64 # some default value, I don't know why (Mona)
ds.createDimension('field_len', field_len)
if ('timestamp' not in vars):
time = ds.variables['time'][:]
dtOriginTime = datetime.strptime(originTime+"00", "%Y-%m-%d %H:%M:%S %z")
timestamp = []
emptyStr = " " * field_len
for t in range(len(time)):
dt = timedelta(seconds = int(time[t]))
dtStr = dtOriginTime + dt
timestampStr = dtStr.strftime("%Y-%m-%d %H:%M:%S %z")[:-2].ljust(field_len)
timestamp.append(timestampStr)
timestampVar = ds.createVariable('timestamp', 'S1', ('time','field_len',))
timestampVar[:] = list(map(lambda x : list(x), timestamp))
timestampVar.long_name = "timestamps since the beginning of the simulation"
vars.append('timestamp')
return timestampVar
else:
return ds.variables['timestamp']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionCategoryIndices(emiCatInds, ds, vars, dims):
# In my opinion ncat is completely redundant parameter, emission_category_index should
# be a coordinate variable with a length of ncat instead. Current setup in PALM doesn't
# make any sense (Sasu)
try:
emiCatInds = list(map(int, emiCatInds.split(",")))
except TypeError:
print("Error: invalid value for emission_category_index in configuration file, expected a"+
" comma-delimited list")
exit(1)
if ('emission_category_index' in vars):
ds.variables['emission_category_index'][:] = emiCatInds
return ds.variables['emission_category_index']
else:
ncat_dim = createNcatDim(ds, np.arange(1, len(emiCatInds)+1, 1), dims)
emiCatIndsVar = createNetcdfVariable(ds, emiCatInds, 'emission_category_index', 0, '', 'i1',
('ncat',), parameter=False, verbose=False)
emiCatIndsVar.long_name = "emission category index"
emiCatIndsVar.standard_name = 'emission_cat_index'
return emiCatIndsVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionCategoryNames(emiCatName, ds, vars, dims):
# Creates emission_category_name unless it already exists
try:
# Max string length is chosen quite arbitralily here
maxstrlen = 25
emiCatName = parseCharacterArray(emiCatName, maxstrlen)
except TypeError:
print("Error: invalid value for emission_category_name in configuration file, expected a " +
"comma delimited array")
exit(1)
if ('emission_category_name' in vars):
ds.variables['emission_category_name'][:] = emiCatName
return ds.variables['emission_category_name']
else:
ncat_dim = createNcatDim(ds, np.arange(1, np.shape(emiCatName)[0]+1, 1), dims)
strlen_dim = createStrlenDim(ds, np.arange(1, maxstrlen+1, 1), dims)
emiCatNameVar = createNetcdfVariable(ds, np.array(emiCatName), 'emission_category_name', 0, '',
'S1', ('ncat','strlen',), parameter=False, verbose=False)
emiCatNameVar.long_name = 'emission category name'
return emiCatNameVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionTimeFactors(fname, lod, ds, vars, dims):
# Create emission_time_factors unless it already exists
# Try to read 'emission_time_factors' array from a given input file
constant_factor = False
try:
emiTimeFactors = np.genfromtxt(fname, delimiter=",")
ncat = np.shape(emiTimeFactors)[0]
ncat_dim = createNcatDim(ds, np.arange(1, np.shape(emiTimeFactors)[0]+1, 1), dims)
except:
print("Cannot read emission_time_factors data from file \"{}\"".format(fname) +
". Use a constant factor emission_time_factors=1.0 and ncat=1.")
factor = 1.0
constant_factor = True
if ('ncat' in dims):
ncat = len(ds.variables['ncat'][:])
else:
ncat = 1
ncat_dim = createNcatDim(ds, [ncat], dims)
# Check the level of detail
if (lod is None or lod=='2'):
# Using a weighting factors given separately for each hour of the year
nhoursyear = np.arange(1, 8760+1, 1) # 24*365
if constant_factor:
emiTimeFactors = np.zeros([ncat, len(nhoursyear)], dtype=float) + factor
if (np.shape(emiTimeFactors)[-1]!=8760):
raise ValueError("emission_time_factors data must contain exactly 8760 datapoints for "+
"every emission category when emission_time_factors_lod = 2")
if ('emission_time_factors' in vars):
if (np.shape(ds.variables['emission_time_factors'])[-1]!=8760):
raise ValueError("The dimensions of the existing emission_time_factors does not match "+
"with the new emission_time_factors data")
ds.variables['emission_time_factors'][:] = emiTimeFactors
return ds.variables['emission_time_factors']
else:
nhoursyear_dim = createNhoursyearDim(ds, nhoursyear, dims)
emiTimeFactorsVar = createNetcdfVariable(ds, emiTimeFactors, 'emission_time_factors', 0, '',
'f4', ('ncat','nhoursyear',), parameter=False,
verbose=False)
emiTimeFactorsVar.long_name = "emission time scaling factors"
emiTimeFactorsVar.lod = 2
return emiTimeFactorsVar
elif (lod=='1'):
# Using a weighting factors based on the month, day of week and hour
nmonthdayhour = np.arange(1, 91+1, 1) # see the documentation
if constant_factor:
emiTimeFactors = np.zeros([ncat, len(monthdayhour)], dtype=float) + factor
if (np.shape(emiTimeFactors)[-1]!=91):
raise ValueError("emission_time_factors data must contain exactly 90 datapoints for "+
"every emission category when emission_time_factors_lod = 1")
if ('emission_time_factors' in vars):
if (np.shape(ds.variables['emission_time_factors'])[-1]!=90):
raise ValueError("The dimensions of the existing emission_time_factors does not match "+
"with the new emission_time_factors data")
ds.variables['emission_time_factors'][:] = emiTimeFactors
return ds.variables['emission_time_factors']
else:
nhoursyear_dim = createNmonthdayhourDim(ds, nmonthdayhour, dims)
emiTimeFactorsVar = createNetcdfVariable(ds, emiTimeFactors, 'emission_time_factors', 0, '',
'f4', ('ncat','nmonthdayhour',), parameter=False,
verbose=False)
emiTimeFactorsVar.long_name = "emission time scaling factors"
emiTimeFactorsVar.lod = 1
return emiTimeFactorsVar
else:
raise ValueError("invalid value for emission_time_factors_lod: {}".format(lod))
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# PIDS_CHEM #
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createZChemDim(ds, z, dims):
# Creates a new z dim unless it already exists
if ('z' not in dims):
zchem_dim = createNetcdfVariable(ds, z, 'z', len(z), '', 'i4', ('z',), parameter=True,
verbose=False)
zchem_dim.long_name = "distance to origin in z-direction"
dims.append('z')
return zchem_dim
else:
return ds.variables['z']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createNspeciesDim(ds, nspecies, dims):
# Creates a new nspecies dim unless it already exists
if ('nspecies' not in dims):
nspecies_dim = createNetcdfVariable(ds, nspecies, 'nspecies', len(nspecies), '', 'i4',
('nspecies',), parameter=True, verbose=False)
nspecies_dim.long_name = "number of emission species"
dims.append('nspecies')
return nspecies_dim
else:
return ds.variables['nspecies']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionIndices(emiInds, ds, vars, dims):
# Creates emission_index unless it already exists. Again, nspecies is redundant (Sasu)
try:
emiInds = list(map(int, emiInds.split(",")))
except TypeError:
print("Error: invalid value for emission_index in configuration file, expected a comma " +
"delimited list")
exit(1)
if ('emission_index' in vars):
ds.variables['emission_index'][:] = emiInds
return ds.variables['emission_index']
else:
nspecies_dim = createNspeciesDim(ds, np.arange(1, len(emiInds)+1, 1), dims)
emiIndsVar = createNetcdfVariable(ds, emiInds, 'emission_index', 0, '', 'u2', ('nspecies',),
parameter=False, verbose=False)
emiIndsVar.long_name = "emission species index"
return emiIndsVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionNames(emiName, ds, vars, dims):
# Creates emission_name unless it already exists
try:
# Max string length is chosen quite arbitralily here
maxstrlen = 25
emiName = parseCharacterArray(emiName, maxstrlen)
except TypeError:
print("Error: invalid value for emission_name in configuration file, expected a "+
"comma delimited array")
exit(1)
if ('emission_name' in vars):
ds.variables['emission_name'][:] = emiName
return ds.variables['emission_name']
else:
strlen_dim = createStrlenDim(ds, np.arange(1, maxstrlen+1, 1), dims)
emiNameVar = createNetcdfVariable(ds, np.array(emiName), 'emission_name', 0, '', 'S1',
('nspecies','strlen',), parameter=False, verbose=False)
emiNameVar.long_name = "emission species name"
return emiNameVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionValues(emiStr, fnameSource, unit, lod, ds, vars, dims):
# Creates aerosol_emission_values unless it already exists
# Read in a source map: contains emission category (ncat) values
sourceDict = readNumpyZTile(fnameSource, verbose=False)
sourceR = sourceDict['R'][::-1,:,:]
sourceDPx = sourceDict['dPx']
sourceNPx = np.shape(sourceR)
try:
emiVals = parseStringArrayInput(emiStr, float) # emission value (per time and) per category
except TypeError:
print("Error: invalid value for emission_values in configuration file, expected a " +
"comma-delimited list")
exit(1)
nspecies = len(ds.variables['nspecies'][:]) # number of chemical species
ncat = len(ds.variables['ncat'][:]) # number of emission categories
if (not 'emission_values' in vars):
x_dim = createXDim(ds, sourceNPx, sourceDPx, dims)
y_dim = createYDim(ds, sourceNPx, sourceDPx, dims)
z_dim = createZChemDim(ds, np.array([1]), dims)
if (lod == '1'): # emission_values(z,y,x,nspecies,ncat) where z=1
emission_values = np.zeros([1, sourceNPx[0], sourceNPx[1], nspecies, ncat],
dtype=float) - 9999.
emiVals = np.squeeze(emiVals)
for ispec in range(nspecies):
for n in range(ncat):
emission_values[0,sourceR[:,:,n]==1,ispec,n] = emiVals[ispec,n]
if ('emission_values' in vars):
ds.variables['emission_values'][:] = emission_values
return ds.variables['emission_values']
else:
emiValVar = createNetcdfVariable(ds, emission_values, 'emission_values', 0, unit, 'f4',
('z','y','x','nspecies','ncat',), False, False,
fill_value=-9999., verbose=False)
emiValVar.long_name= 'emission values'
emiValVar.lod = 1
return emiValVar
elif (lod is None or lod == '2'): # emission_values(time,z,y,x,nspecies) where z=1
times = ds.variables['time']
emission_values = np.zeros([len(times), 1, sourceNPx[0], sourceNPx[1], nspecies], dtype=float)
for n in range(ncat):
for t in range(len(times)):
for ispec in range(nspecies):
emission_values[t,0,sourceR[:,:,n]==1,ispec] += emiVals[ispec,t]
emission_values[emission_values < 1e-30] = -9999. # fill value
if ('emission_values' in vars):
ds.variables['emission_values'][:] = emission_values
return ds.variables['emission_values']
else:
emiValVar = createNetcdfVariable(ds, emission_values, 'emission_values', 0, unit, 'f4',
('time','z','y','x','nspecies',), False, False,
fill_value=-9999., verbose=False)
emiValVar.long_name= 'emission values'
emiValVar.lod = 2
return emiValVar
else:
raise ValueError("invalid value for emission_lod: {}".format(lod))
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# PIDS_SALSA #
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def createCompositionIndexDim(ds, composition_index, dims):
# Creates a new composition_index dim unless it already exists
if ('composition_index' not in dims):
composition_index_dim = createNetcdfVariable(ds, composition_index, 'composition_index',
len(composition_index), '', 'i4',
('composition_index',), parameter=True, verbose=False)
composition_index_dim.long_name = "aerosol composition index"
dims.append('composition_index')
return composition_index_dim
else:
return ds.variables['composition_index']
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processCompositionNames(compName, ds, vars, dims):
# Creates a new composition_name variable unless it already exists
try:
# Max string length is chosen quite arbitralily here
maxstrlen = 25
compName = parseCharacterArray(compName, maxstrlen)
except TypeError:
print("Error: invalid value for composition_name in configuration file, expected a "+
"comma delimited array")
exit(1)
if ('composition_name' in vars):
ds.variables['composition_name'][:] = compName
return ds.variables['composition_name']
else:
composition_index = np.arange(1, np.shape(np.array(compName))[0]+1, 1)
composition_index_dim = createCompositionIndexDim(ds, composition_index, dims)
compNameVar = ds.createVariable('composition_name', 'S1', ('composition_index','strlen',))
compNameVar[:] = list(map(lambda x : list(x), compName))
compNameVar.long_name = 'aerosol composition name'
return compNameVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processAerosolEmissionValues(emiStr, fnameSource, unit, lod, ds, vars, dims):
# Creates aerosol_emission_values unless it already exists
# Read in a source map: contains emission category (ncat) values
sourceDict = readNumpyZTile(fnameSource, verbose=False)
sourceR = sourceDict['R'][::-1,:,:]
sourceDPx = sourceDict['dPx']
sourceNPx = np.shape(sourceR)
try:
emiVals = parseStringArrayInput(emiStr, float) # emission value (per time and) per category
except TypeError:
print("Error: invalid value for aerosol_emission_values in configuration file, expected a " +
"comma-delimited list")
exit(1)
if ('aerosol_emission_values' not in vars):
x_dim = createXDim(ds, sourceNPx, sourceDPx, dims)
y_dim = createYDim(ds, sourceNPx, sourceDPx, dims)
ncat = np.shape(emiVals)[0]
ncat_dim = createNcatDim(ds, np.arange(1, ncat+1, 1), dims)
if (lod == '1'):
aerosol_emission_values = np.zeros([sourceNPx[0], sourceNPx[1], ncat], dtype=float) - 9999.
emiVals = np.squeeze(emiVals)
for n in range(ncat):
aerosol_emission_values[sourceR[:,:,n]==1,n] = emiVals[n]
if ('aerosol_emission_values' in vars):
ds.variables['aerosol_emission_values'][:] = aerosol_emission_values
return ds.variables['aerosol_emission_values']
else:
emiValVar = createNetcdfVariable(ds, aerosol_emission_values, 'aerosol_emission_values', 0,
unit, 'f4', ('y','x','ncat',), False, False, fill_value=-9999.,
verbose=False)
emiValVar.long_name= 'aerosol emission values'
emiValVar.lod = 1
return emiValVar
elif (lod is None or lod == '2'):
times = ds.variables['time']
aerosol_emission_values = np.zeros([len(times), sourceNPx[0], sourceNPx[1], ncat],
dtype=float) - 9999.
for t in range(len(times)):
for n in range(ncat):
aerosol_emission_values[t,sourceR[:,:,n]==1,n] = emiVals[n,t]
if ('aerosol_emission_values' in vars):
ds.variables['aerosol_emission_values'][:] = aerosol_emission_values
return ds.variables['aerosol_emission_values']
else:
emiValVar = createNetcdfVariable(ds, aerosol_emission_values, 'aerosol_emission_values', 0,
unit, 'f4', ('time','y','x','ncat',), False, False,
fill_value=-9999., verbose=False)
emiValVar.long_name= 'aerosol emission values'
emiValVar.lod = 2
return emiValVar
else:
raise ValueError("invalid value for aerosol_emission_lod: {}".format(lod))
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionMassFracs(emiMassFracsD, ds, vars, dims):
# Creates emission_mass_fracs unless it already exists
ncat = len(ds.variables['ncat'])
try:
emiMassFracs = parseStringArrayInput(emiMassFracsD, float)
except TypeError:
print("Error: invalid value for emission_mass_fracs in configuration file, expected a " +
"newline and comma delimited matrix")
exit(1)
if (np.shape(emiMassFracs)[0] != ncat):
raise ValueError("Not correct dimensions of emission_mass_fracs(ncat,composition_index)")
if (np.shape(emiMassFracs)[1] != len(ds.variables['composition_index'])):
raise ValueError("Not correct dimensions of emission_mass_fracs(ncat,composition_index)")
if ('emission_mass_fracs' in vars):
ds.variables['emission_mass_fracs'][:] = emiMassFracs
return ds.variables['emission_mass_fracs']
else:
emiMassFracsVar = createNetcdfVariable(ds, emiMassFracs, 'emission_mass_fracs', 0, '', 'f4',
('ncat','composition_index',), False, False,
fill_value=-9999., verbose=False)
emiMassFracsVar.long_name = "mass fractions of chemical components in aerosol emissions"
emiMassFracsVar.units = ""
return emiMassFracsVar
#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def processEmissionNumberFracs(emiNumberFracsD, emiDmid, ds, vars, dims):
# Creates emission_numbs_fracs unless it already exists
ncat = len(ds.variables['ncat'])
if ('Dmid' in dims):
emiDmids = ds.variables['Dmid']
else:
try:
emiDmids = list(map(float, emiDmid.split(",")))
dmid_Dim = createNetcdfVariable(ds, emiDmids, 'Dmid', len(emiDmids), 'm', 'f4', ('Dmid',),
parameter=True, verbose=False)
dims.append('Dmid')
except TypeError:
print("Error: invalid value for aerosol_emission_dmid in configuration file, expected a " +
"comma-delimited list")
exit(1)
nbins = len(emiDmids)
try:
emiNumberFracs = parseStringArrayInput(emiNumberFracsD, float)
except TypeError:
print("Error: invalid value for composition_aerosol in configuration file, expected a " +
"newline and comma delimited matrix")
exit(1)
if (np.shape(emiNumberFracs)[0] != ncat):
raise ValueError("Incorrect 0 dimension in emission_number_fracs(ncat,Dmid)")
if (np.shape(emiNumberFracs)[1] != nbins):
raise ValueError("Incorrect 1 dimension in emission_number_fracs(ncat,Dmid)")
if ('emission_number_fracs' in vars):
ds.variables['emission_number_fracs'][:] = emiNumberFracs
return ds.variables['emission_number_fracs']
else:
emiNumberFracsVar = createNetcdfVariable(ds, emiNumberFracs, 'emission_number_fracs', 0, '',
'f4', ('ncat','Dmid',), False, False,
fill_value=-9999., verbose=False)
emiNumberFracsVar.long_name = "number fractions of aerosol size bins in aerosol emissions"
emiNumberFracsVar.units = ""
return emiNumberFracsVar
|
the-stack_106_25970 | import os
import sys
import time
from contextlib import contextmanager
from dagster import check
if sys.version_info.major >= 3 and sys.version_info.minor >= 3:
time_fn = time.perf_counter
elif os.name == 'nt':
time_fn = time.clock
else:
time_fn = time.time
class TimerResult(object):
def __init__(self):
self.start_time = time_fn()
self.end_time = None
@property
def seconds(self):
check.invariant(self.end_time is not None, 'end time is not set')
return self.end_time - self.start_time
@property
def millis(self):
return self.seconds * 1000
@contextmanager
def time_execution_scope():
'''Usage:
from solid_util.timing import time_execution_scope
with time_execution_scope() as timer_result:
do_some_operation()
print(
'do_some_operation took {timer_result.millis} milliseconds'.format(
timer_result=timer_result
)
)
'''
timer_result = TimerResult()
yield timer_result
timer_result.end_time = time_fn()
|
the-stack_106_25972 | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
import sys
import os
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import (QTextEdit, QVBoxLayout, QLabel, QGridLayout, QHBoxLayout,
QRadioButton, QCheckBox, QLineEdit)
from electrum_redd.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton,
CancelButton, Buttons, icon_path, WWLabel, CloseButton)
from electrum_redd.gui.qt.qrcodewidget import QRCodeWidget
from electrum_redd.gui.qt.amountedit import AmountEdit
from electrum_redd.gui.qt.main_window import StatusBarButton
from electrum_redd.gui.qt.installwizard import InstallWizard
from electrum_redd.i18n import _
from electrum_redd.plugin import hook
from electrum_redd.util import is_valid_email
from electrum_redd.logging import Logger
from electrum_redd.base_wizard import GoBack
from .trustedcoin import TrustedCoinPlugin, server
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, Logger):
def __init__(self, plugin, window):
QObject.__init__(self)
self.plugin = plugin
self.window = window
Logger.__init__(self)
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
WaitingDialog(parent=window,
message=_('Waiting for TrustedCoin server to sign transaction...'),
task=lambda: wallet.on_otp(tx, auth_code),
on_success=lambda *args: on_success(tx),
on_error=on_failure)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(read_QIcon("trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog_for_billing_info(self, window, *, on_finished=None):
def task():
return self.request_billing_info(window.wallet, suppress_connection_error=False)
def on_error(exc_info):
e = exc_info[1]
window.show_error("{header}\n{exc}\n\n{tor}"
.format(header=_('Error getting TrustedCoin account info.'),
exc=repr(e),
tor=_('If you keep experiencing network problems, try using a Tor proxy.')))
return WaitingDialog(parent=window,
message=_('Requesting account info from TrustedCoin server...'),
task=task,
on_success=on_finished,
on_error=on_error)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.waiting_dialog_for_billing_info(window)
return True
return False
def settings_dialog(self, window):
self.waiting_dialog_for_billing_info(window,
on_finished=partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("trustedcoin-status.png")))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay()
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def go_online_dialog(self, wizard: InstallWizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.reset_stack()
try:
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
except GoBack:
# user clicked 'Cancel' and decided to move wallet file manually
storage, db = wizard.create_storage(wizard.path)
raise
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
self.logger.exception('Could not retrieve Terms of Service')
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + repr(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
the-stack_106_25973 | import logging
import os
import sys
import httpx
from coverage_comment import badge, comment_file
from coverage_comment import coverage as coverage_module
from coverage_comment import (
github,
github_client,
log,
settings,
subprocess,
template,
wiki,
)
def main():
logging.basicConfig(level="INFO")
log.info("Starting action")
config = settings.Config.from_environ(environ=os.environ)
if config.VERBOSE:
logging.getLogger().setLevel("DEBUG")
log.debug(f"Settings: {config}")
github_session = httpx.Client(
base_url="https://api.github.com",
follow_redirects=True,
headers={"Authorization": f"token {config.GITHUB_TOKEN}"},
)
#http_session = httpx.Client()
git = subprocess.Git()
exit_code = action(
config=config,
github_session=github_session,
#http_session=http_session,
git=git,
)
log.info("Ending action")
sys.exit(exit_code)
def action(
config: settings.Config,
github_session: httpx.Client,
#http_session: httpx.Client,
git: subprocess.Git,
):
log.debug(f"Operating on {config.GITHUB_REF}")
event_name = config.GITHUB_EVENT_NAME
if event_name not in {"pull_request", "push", "workflow_run"}:
log.error(
'This action has only been designed to work for "pull_request", "branch" '
f'or "workflow_run" actions, not "{event_name}". Because there are security '
"implications. If you have a different usecase, please open an issue, "
"we'll be glad to add compatibility."
)
return 1
if event_name in {"pull_request", "push"}:
coverage = coverage_module.get_coverage_info(merge=config.MERGE_COVERAGE_FILES)
log.info("coverage_percent = %s" % coverage.info.percent_covered)
if event_name == "pull_request":
return generate_comment(
config=config,
coverage=coverage,
github_session=github_session,
#http_session=http_session,
git=git,
)
else:
# event_name == "push"
return save_badge(
config=config,
coverage=coverage,
github_session=github_session,
git=git,
)
else:
# event_name == "workflow_run"
return post_comment(
config=config,
github_session=github_session,
)
def generate_comment(
config: settings.Config,
coverage: coverage_module.Coverage,
github_session: httpx.Client,
#http_session: httpx.Client,
git: subprocess.Git,
):
log.info("Generating comment for PR")
diff_coverage = coverage_module.get_diff_coverage_info(
base_ref=config.GITHUB_BASE_REF
)
previous_coverage_data_file = wiki.get_file_contents(
github_token=config.GITHUB_TOKEN,
repository=config.GITHUB_REPOSITORY,
filename=config.BADGE_FILENAME,
git=git,
)
previous_coverage = None
if previous_coverage_data_file:
previous_coverage = badge.parse_badge(contents=previous_coverage_data_file)
comment = template.get_markdown_comment(
coverage=coverage,
diff_coverage=diff_coverage,
previous_coverage_rate=previous_coverage,
template=template.read_template_file(),
)
gh = github_client.GitHub(session=github_session)
try:
github.post_comment(
github=gh,
me=github.get_my_login(github=gh),
repository=config.GITHUB_REPOSITORY,
pr_number=config.GITHUB_PR_NUMBER,
contents=comment,
marker=template.MARKER,
)
except github.CannotPostComment:
log.debug("Exception when posting comment", exc_info=True)
log.info(
"Cannot post comment. This is probably because this is an external PR, so "
"it's expected. Ensure you have an additional `workflow_run` step "
"configured as explained in the documentation (or alternatively, give up "
"on PR comments for external PRs)."
)
comment_file.store_file(
filename=config.COMMENT_FILENAME,
content=comment,
)
github.set_output(COMMENT_FILE_WRITTEN=True)
log.debug("Comment stored locally on disk")
else:
github.set_output(COMMENT_FILE_WRITTEN=False)
log.debug("Comment not generated")
return 0
def post_comment(config: settings.Config, github_session: httpx.Client):
log.info("Posting comment to PR")
if not config.GITHUB_PR_RUN_ID:
log.error("Missing input GITHUB_PR_RUN_ID. Please consult the documentation.")
return 1
gh = github_client.GitHub(session=github_session)
me = github.get_my_login(github=gh)
log.info(f"Search for PR associated with run id {config.GITHUB_PR_RUN_ID}")
try:
pr_number = github.get_pr_number_from_workflow_run(
github=gh,
run_id=config.GITHUB_PR_RUN_ID,
repository=config.GITHUB_REPOSITORY,
)
except github.CannotDeterminePR:
log.error(
"The PR cannot be found. That's strange. Please open an "
"issue at https://github.com/ewjoachim/python-coverage-comment-action",
exc_info=True,
)
return 1
log.info(f"PR number: {pr_number}")
log.info("Download associated artifacts")
try:
comment = github.download_artifact(
github=gh,
repository=config.GITHUB_REPOSITORY,
artifact_name=config.COMMENT_ARTIFACT_NAME,
run_id=config.GITHUB_PR_RUN_ID,
filename=config.COMMENT_FILENAME,
)
except github.NoArtifact:
log.info(
"Artifact was not found, which is probably because it was probably "
"already posted by a previous step.",
exc_info=True,
)
return 0
log.info("Comment file found in artifact, posting to PR")
github.post_comment(
github=gh,
me=me,
repository=config.GITHUB_REPOSITORY,
pr_number=pr_number,
contents=comment,
marker=template.MARKER,
)
log.info("Comment posted in PR")
return 0
def save_badge(
config: settings.Config,
coverage: coverage_module.Coverage,
github_session: httpx.Client,
git: subprocess.Git,
):
gh = github_client.GitHub(session=github_session)
is_default_branch = github.is_default_branch(
github=gh,
repository=config.GITHUB_REPOSITORY,
branch=config.GITHUB_REF,
)
log.debug(f"On default branch: {is_default_branch}")
if not is_default_branch:
log.info("Skipping badge save as we're not on the default branch")
return 0
log.info("Saving Badge into the repo wiki")
badge_info = badge.compute_badge(
line_rate=coverage.info.percent_covered,
minimum_green=config.MINIMUM_GREEN,
minimum_orange=config.MINIMUM_ORANGE,
)
log.info("badge_info: %s" % badge_info)
wiki.upload_file(
github_token=config.GITHUB_TOKEN,
repository=config.GITHUB_REPOSITORY,
filename=config.BADGE_FILENAME,
contents=badge_info,
git=git,
)
url = wiki.get_wiki_file_url(
repository=config.GITHUB_REPOSITORY,
filename=config.BADGE_FILENAME
)
badge_url = badge.get_badge_shield_url(json_url=url)
log.info(f"Badge JSON stored at {url}")
log.info(f"Badge URL: {badge_url}")
return 0
|
the-stack_106_25976 | import os
import sys
import cv2
curdir = os.path.dirname(__file__)
files = os.listdir(curdir)
for file in files:
_, ext = os.path.splitext(file)
if ext == '.py':
continue
img = cv2.imread(file)
size = img.shape
h, w = size[:2]
print('{} {}x{}'.format(file, w, h)) |
the-stack_106_25981 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
wait_until,
)
from test_framework.mininode import P2PInterface
from test_framework.messages import CAddress, msg_addr, NODE_NETWORK, NODE_WITNESS
class NetTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001000"],["-minrelaytxfee=0.00000500"]]
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
self._test_getnodeaddresses()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
self.nodes[0].setnetworkactive(state=True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
def _test_getnodeaddresses(self):
self.nodes[0].add_p2p_connection(P2PInterface())
# send some addresses to the node via the p2p message addr
msg = msg_addr()
imported_addrs = []
for i in range(256):
a = "123.123.123.{}".format(i)
imported_addrs.append(a)
addr = CAddress()
addr.time = 100000000
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = a
addr.port = 9666
msg.addrs.append(addr)
self.nodes[0].p2p.send_and_ping(msg)
# obtain addresses via rpc call and check they were ones sent in before
REQUEST_COUNT = 10
node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT)
assert_equal(len(node_addresses), REQUEST_COUNT)
for a in node_addresses:
assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
assert a["address"] in imported_addrs
assert_equal(a["port"], 9666)
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
# so only test that requesting a large number of addresses returns less than that
LARGE_REQUEST_COUNT = 10000
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
if __name__ == '__main__':
NetTest().main()
|
the-stack_106_25983 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/17 13:19
# @Author : glacier
# @Site :
# @File : db_test.py
# @Software: PyCharm Edu
import pymysql
import pymysql
def db_deal(title_url,title,count):
# 打开数据库连接
db = pymysql.connect(
"localhost",
"root",
"123456",
"python",
charset='utf8'
)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用预处理语句创建表
# SQL 插入语句
sql = "INSERT INTO p_zhihu_topic(title,title_url, \
vote_count) \
VALUES ('%s', '%s', '%s')" % \
(title_url,title,count)
try:
# 执行SQL语句
cursor.execute(sql)
print('插入成功')
# 提交到数据库执行
db.commit()
except:
print("出错啦!")
# 发生错误时回滚
db.rollback()
if __name__ == '__main__':
title_url ="/question/22425541"
title ="一个人旅行应该怎样自拍"
count ="24K"
db_deal(title_url,title,count)
|
the-stack_106_25984 | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from torch.nn.utils import weight_norm
# Basic ResNet model
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0,math.sqrt(2.0/float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = weight_norm(nn.Linear(indim, outdim, bias=False), name='weight', dim=0)
self.relu = nn.ReLU()
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm + 0.00001)
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function
scores = 10 * cos_dist #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
return scores
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class Linear_fw(nn.Linear): #used in MAML to forward input with fast weight
def __init__(self, in_features, out_features):
super(Linear_fw, self).__init__(in_features, out_features)
self.weight.fast = None #Lazy hack to add fast weight link
self.bias.fast = None
def forward(self, x):
if self.weight.fast is not None and self.bias.fast is not None:
out = F.linear(x, self.weight.fast, self.bias.fast)
else:
out = super(Linear_fw, self).forward(x)
return out
class Conv2d_fw(nn.Conv2d): #used in MAML to forward input with fast weight
def __init__(self, in_channels, out_channels, kernel_size, stride=1,padding=0, bias = True):
super(Conv2d_fw, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
self.weight.fast = None
if not self.bias is None:
self.bias.fast = None
def forward(self, x):
if self.bias is None:
if self.weight.fast is not None:
out = F.conv2d(x, self.weight.fast, None, stride= self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
else:
if self.weight.fast is not None and self.bias.fast is not None:
out = F.conv2d(x, self.weight.fast, self.bias.fast, stride= self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
return out
class BatchNorm2d_fw(nn.BatchNorm2d): #used in MAML to forward input with fast weight
def __init__(self, num_features):
super(BatchNorm2d_fw, self).__init__(num_features)
self.weight.fast = None
self.bias.fast = None
def forward(self, x):
running_mean = torch.zeros(x.data.size()[1]).cuda()
running_var = torch.ones(x.data.size()[1]).cuda()
if self.weight.fast is not None and self.bias.fast is not None:
out = F.batch_norm(x, running_mean, running_var, self.weight.fast, self.bias.fast, training = True, momentum = 1)
#batch_norm momentum hack: follow hack of Kate Rakelly in pytorch-maml/src/layers.py
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias, training = True, momentum = 1)
return out
# Simple Conv Block
class ConvBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, pool = True, padding = 1):
super(ConvBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C = Conv2d_fw(indim, outdim, 3, padding = padding)
self.BN = BatchNorm2d_fw(outdim)
else:
self.C = nn.Conv2d(indim, outdim, 3, padding= padding)
self.BN = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C, self.BN, self.relu]
if pool:
self.pool = nn.MaxPool2d(2)
self.parametrized_layers.append(self.pool)
for layer in self.parametrized_layers:
init_layer(layer)
self.trunk = nn.Sequential(*self.parametrized_layers)
def forward(self,x):
out = self.trunk(x)
return out
# Simple ResNet Block
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
# Bottleneck block
class BottleneckBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim/4)
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = BatchNorm2d_fw(bottleneckdim)
self.C2 = Conv2d_fw(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = BatchNorm2d_fw(bottleneckdim)
self.C3 = Conv2d_fw(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.BatchNorm2d(bottleneckdim)
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = nn.BatchNorm2d(bottleneckdim)
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
class ConvNet(nn.Module):
def __init__(self, depth, flatten = True):
super(ConvNet,self).__init__()
self.grads = []
self.fmaps = []
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i <4 ) ) #only pooling for fist 4 layers
trunk.append(B)
if flatten:
trunk.append(Flatten())
self.trunk = nn.Sequential(*trunk)
if flatten:
self.final_feat_dim = 1600
else:
self.final_feat_dim = [64, 5, 5]
def forward(self,x):
out = self.trunk(x)
return out
class ResNet(nn.Module):
maml = True #Default
def __init__(self, block, list_of_num_layers, list_of_out_dims, flatten=True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet, self).__init__()
self.grads = []
self.fmaps = []
assert len(list_of_num_layers)==4, 'Can have only four stages'
# initial layers
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU(inplace=True)
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
# residual blocks
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
# final pooling
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def Conv4(flatten=True):
return ConvNet(4, flatten)
def Conv6(flatten=True):
return ConvNet(6, flatten)
def ResNet10(flatten=True):
return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)
def ResNet18(flatten=True):
return ResNet(SimpleBlock, [2,2,2,2],[64,128,256,512], flatten)
def ResNet34(flatten=True):
return ResNet(SimpleBlock, [3,4,6,3],[64,128,256,512], flatten)
def ResNet50(flatten=True):
return ResNet(BottleneckBlock, [3,4,6,3], [256,512,1024,2048], flatten)
def ResNet101(flatten=True):
return ResNet(BottleneckBlock, [3,4,23,3],[256,512,1024,2048], flatten)
model_dict = dict(Conv4 = Conv4,
Conv6 = Conv6,
ResNet10 = ResNet10,
ResNet18 = ResNet18,
ResNet34 = ResNet34,
ResNet50 = ResNet50,
ResNet101 = ResNet101)
|
the-stack_106_25985 | from unittest.mock import patch
from django.shortcuts import reverse
from api.tests.helpers import (
BaseFeedAPITestCase, create_entry_objects, FEED_DETAIL_FIELDS
)
from feeds.models import Feed
class FeedListTest(BaseFeedAPITestCase):
"""Tests GET request on `feed-list` endpoint.
This API call should return a JSON payload that includes
a paginated list of Feed objects.
"""
def setUp(self):
self.create_and_authenticate_user('testadmin')
self.endpoint_url = reverse('feed-list')
def test_successful_get_request(self):
response = self.client.get(self.endpoint_url)
payload = response.json()
self.assert_http_status(response)
self.assertIsInstance(payload['results'], list)
self.assertEqual(payload['count'], self.n_items)
def test_has_no_feeds_to_fetch(self):
# delete all entries currently in db
Feed.objects.all().delete()
response = self.client.get(self.endpoint_url)
payload = response.json()
self.assert_http_status(response)
self.assertIsInstance(payload['results'], list)
self.assertEqual(payload['count'], 0)
class FeedDetailTest(BaseFeedAPITestCase):
"""Tests GET request on `feed-detail` endpoint.
This API call should retrieve a single Feed object.
"""
def setUp(self):
self.create_and_authenticate_user('testadmin')
def test_successful_retrieval(self):
url = reverse('feed-detail', kwargs={'pk': self.pk})
response = self.client.get(url)
payload = response.json()
self.assert_http_status(response)
self.assertIsInstance(payload, dict)
self.assertCountEqual(payload.keys(), FEED_DETAIL_FIELDS)
def test_retrieving_nonexistent_feed(self):
url = reverse('feed-detail', kwargs={'pk': 100})
response = self.client.get(url)
self.assert_http_status(response, 404)
@patch('feeds.api.views.Feed.fetch_and_set_feed_details')
class CreateFeedTest(BaseFeedAPITestCase):
"""Tests POST request on `feed-list` endpoint.
This API call should create a new Feed object.
"""
def setUp(self):
self.create_and_authenticate_user('testadmin')
self.endpoint_url = reverse('feed-list')
def test_successful_feed_creation(self, mock_fetch):
valid_payload = {
'link': 'https://www.feeds-me-up.com/',
'title': 'Title 1',
'description': 'Description',
'version': 'rss20'
}
response = self.client.post(
self.endpoint_url,
data=valid_payload,
format='json'
)
self.assert_http_status(response, 201)
self.assertTrue(mock_fetch.called)
def test_feed_creation_with_link_only(self, mock_fetch):
valid_payload = {'link': 'https://www.feeds-me-up-x.com/'}
response = self.client.post(
self.endpoint_url,
data=valid_payload,
format='json'
)
self.assert_http_status(response, 201)
self.assertTrue(mock_fetch.called)
def test_invalid_feed_creation_request(self, mock_fetch):
invalid_payload = {'link': 'invalid'}
response = self.client.post(
self.endpoint_url,
data=invalid_payload,
format='json'
)
self.assert_http_status(response, 400)
self.assertFalse(mock_fetch.called)
@patch('feeds.api.views.Feed.fetch_and_set_feed_details')
class UpdateFeedTest(BaseFeedAPITestCase):
"""Tests PUT requests on `feed-detail` endpoint.
This API call should update a given Feed object.
"""
def setUp(self):
self.create_and_authenticate_user('testadmin')
self.feed = Feed.objects.get(pk=self.pk)
self.valid_payload = {'link': self.feed.link}
self.endpoint_url = reverse('feed-detail', kwargs={'pk': self.pk})
def test_valid_update(self, mock_fetch):
self.valid_payload['title'] = 'New Title'
self.valid_payload['description'] = 'New description'
response = self.client.put(
self.endpoint_url,
data=self.valid_payload,
format='json'
)
self.assert_http_status(response)
def test_invalid_update(self, mock_fetch):
invalid_payload = {'notvalid': 'notvalid'}
response = self.client.put(
self.endpoint_url,
data=invalid_payload,
format='json'
)
self.assert_http_status(response, 400)
class DeleteFeedTest(BaseFeedAPITestCase):
"""Tests DELETE requests on `feed-detail` endpoint.
This API call should delete a given Feed object.
"""
def setUp(self):
self.create_and_authenticate_user('testadmin')
self.feed = Feed.objects.get(pk=self.pk)
self.endpoint_url = reverse('feed-detail', kwargs={'pk': self.pk})
def test_successful_delete(self):
response = self.client.delete(self.endpoint_url)
self.assert_http_status(response, 204)
with self.assertRaises(Feed.DoesNotExist):
Feed.objects.get(pk=self.pk)
class FeedEntriesListTest(BaseFeedAPITestCase):
"""Tests GET requests on `feed-entries` endpoint.
This API call should return a JSON payload that includes a list of
Entry objects associated with a given Feed object.
"""
def setUp(self):
self.create_and_authenticate_user('testadmin')
self.endpoint_url = reverse('feed-entries', kwargs={'pk': self.pk})
self.feed = Feed.objects.get(pk=self.pk)
def test_valid_feed_entries_retrieval(self):
# Create some Entry objects and associate with a Feed
n_entries = 120
feed_url = self.feed.link
entries = create_entry_objects(n_entries, feed_url)
for entry in entries:
self.feed.entries.add(entry)
# Fetch this feed's entries via API endpoint
response = self.client.get(self.endpoint_url)
payload = response.json()
self.assert_http_status(response)
self.assertIsInstance(payload['results'], list)
self.assertEqual(payload['count'], self.feed.entries.count())
|
the-stack_106_25987 | import array
import logging
from . import machine
from spec_exceptions import ChannelError
log = logging.getLogger('puka')
class ChannelCollection(object):
channel_max = 65535
def __init__(self):
self.channels = {}
self.free_channels = []
# Channel 0 is a special case.
self.free_channel_numbers = [0]
zero_channel = self.new()
self.free_channels.append( zero_channel )
def tune_channel_max(self, new_channel_max):
new_channel_max = new_channel_max if new_channel_max != 0 else 65535
self.channel_max = min(self.channel_max, new_channel_max)
self.free_channel_numbers = array.array('H',
xrange(self.channel_max, 0, -1))
return self.channel_max
def new(self):
try:
number = self.free_channel_numbers.pop()
except IndexError:
raise ChannelError('No free channels')
channel = Channel(number)
self.channels[number] = channel
return channel
def allocate(self, promise, on_channel):
if self.free_channels:
channel = self.free_channels.pop()
channel.promise = promise
promise.channel = channel
promise.after_machine_callback = on_channel
else:
channel = self.new()
channel.promise = promise
promise.channel = channel
machine.channel_open(promise, on_channel)
return channel
def deallocate(self, channel):
channel.promise.channel = channel.promise = None
if channel.alive:
self.free_channels.append( channel )
else:
del self.channels[channel.number]
self.free_channel_numbers.append( channel.number )
class Channel(object):
alive = False
def __init__(self, number):
self.number = number
self.promise = None
self._clear_inbound_state()
def _clear_inbound_state(self):
self.method_frame = self.props = None
self.body_chunks = []
self.body_len = self.body_size = 0
def inbound_method(self, frame):
if frame.has_content:
self.method_frame = frame
else:
self._handle_inbound(frame)
def inbound_props(self, body_size, props):
self.body_size = body_size
self.props = props
if self.body_size == 0: # don't expect body frame
self.inbound_body('')
def inbound_body(self, body_chunk):
self.body_chunks.append( body_chunk )
self.body_len += len(body_chunk)
if self.body_len == self.body_size:
result = self.method_frame
props = self.props
result['body'] = ''.join(self.body_chunks)
result['headers'] = props.get('headers', {})
# Aint need a reference loop.
if 'headers' in props:
del props['headers']
result['headers'].update( props )
self._clear_inbound_state()
return self._handle_inbound(result)
def _handle_inbound(self, result):
self.promise.recv_method(result)
|
the-stack_106_25988 | """Test figures.tasks Daily task functions
# Overview of daily pipeline
Figures daily pipeline collects and aggregates data from Open edX (edx-platform)
into Figures models.
The Figures task functions are Celery tasks. However currently only the top
level task function is called asynchronously.
# Daily pipeline execution
The daily pipeline executes at the following levels
1. Top level initiates the daily metrics collection job. It iterates over each
site to be processed (currently all sites)
2. Per site execution. It iterates over each course in the site. After all the
courses have been processed, Figures `EnrollmentData` are updated
3. Per course execution. It iterates over each enrollment in the course
4. Per enrollment exeuction. It processes per enrollment (learner + course pair)
These are the functions called, the purpose and details
1. 'populate_daily_metrics'
* Gets list (queryset) of Site objects
* Iterates over each site and calls 'populate_daily_metrics_for_site' for each
site
2. 'populate_daily_metrics_for_site'
* Gets list (queryset) of course id strings for the site
* Iterates over each course id and calls 'populate_single_cdm' for each course
* After all the CourseDailyMetrics records have been collected, calls
'populate_single_sdm'
3. 'populate_single_sdm'
* Fills the SiteDailyMetrics record for the site + date pair
4. 'populate_single_cdm'
* Fills the CourseDailyMetrics record for the specified course
5. 'update_enrollment_data'
* Updates Figures 'EnrollmentData' records. These records provide a current
snapshot of enrollment data
# Testing priorities
We have two top priorities for this testing module:
1. Verify nominal operation will work
2. Verify failures at one level do not cause any of the levels above to fail
Of secondary importance is testing log output
"""
from __future__ import absolute_import
from datetime import date
import pytest
from six.moves import range
from django.contrib.sites.models import Site
from figures.helpers import as_date, as_datetime
from figures.models import (CourseDailyMetrics,
SiteDailyMetrics)
from figures.tasks import (FPD_LOG_PREFIX,
populate_single_cdm,
populate_single_sdm,
populate_daily_metrics_for_site,
populate_daily_metrics)
from tests.factories import (CourseDailyMetricsFactory,
CourseOverviewFactory,
SiteDailyMetricsFactory,
SiteFactory)
from tests.helpers import OPENEDX_RELEASE, GINKGO, FakeException
def test_populate_single_cdm(transactional_db, monkeypatch):
"""Test figures.tasks.populate_single_cdm nominal case
This tests the normal execution to popluate a single CourseDailyMetrics
record
"""
assert CourseDailyMetrics.objects.count() == 0
date_for = '2019-01-02'
course_id = "course-v1:certs-appsembler+001+2019"
created = False
def mock_cdm_load(self, date_for, **kwargs):
return (CourseDailyMetricsFactory(date_for=date_for), created, )
monkeypatch.setattr('figures.sites.get_site_for_course',
lambda val: SiteFactory())
monkeypatch.setattr(
'figures.pipeline.course_daily_metrics.CourseDailyMetricsLoader.load',
mock_cdm_load)
populate_single_cdm(course_id, date_for)
assert CourseDailyMetrics.objects.count() == 1
assert as_date(CourseDailyMetrics.objects.first().date_for) == as_date(date_for)
def test_populate_single_sdm(transactional_db, monkeypatch):
"""Test figures.tasks.populate_single_sdm
Test the task function that fills the SiteDailyMetrics record for a given
site
"""
assert SiteDailyMetrics.objects.count() == 0
date_for = '2019-01-02'
created = False
site = SiteFactory()
def mock_sdm_load(self, site, date_for, **kwargs):
return (SiteDailyMetricsFactory(site=site), created, )
monkeypatch.setattr(
'figures.pipeline.site_daily_metrics.SiteDailyMetricsLoader.load',
mock_sdm_load)
populate_single_sdm(site.id, date_for=date_for)
assert SiteDailyMetrics.objects.count() == 1
@pytest.mark.parametrize('date_for', [
'2020-12-12',
as_date('2020-12-12'),
as_datetime('2020-12-12')
])
def test_populate_daily_metrics_for_site_basic(transactional_db,
monkeypatch,
date_for):
site = SiteFactory()
course_ids = ['fake-course-1', 'fake-course-2']
collected_course_ids = []
def fake_populate_single_cdm(course_id, **_kwargs):
collected_course_ids.append(course_id)
def fake_populate_single_sdm(site_id, **_kwargs):
assert site_id == site.id
monkeypatch.setattr('figures.tasks.site_course_ids', lambda site: course_ids)
monkeypatch.setattr('figures.tasks.populate_single_cdm',
fake_populate_single_cdm)
monkeypatch.setattr('figures.tasks.populate_single_sdm',
fake_populate_single_sdm)
populate_daily_metrics_for_site(site_id=site.id, date_for=date_for)
assert set(collected_course_ids) == set(course_ids)
@pytest.mark.skipif(OPENEDX_RELEASE == GINKGO,
reason='Apparent Django 1.8 incompatibility')
def test_populate_daily_metrics_for_site_error_on_cdm(transactional_db,
monkeypatch,
caplog):
date_for = date.today()
site = SiteFactory()
fake_course_ids = ['fake-course-id-1']
def fake_pop_single_cdm_fails(**kwargs):
# TODO: test with different exceptions
# At least one with and without `message_dict`
raise FakeException('Hey!')
monkeypatch.setattr('figures.tasks.site_course_ids',
lambda site: fake_course_ids)
monkeypatch.setattr('figures.tasks.populate_single_cdm',
fake_pop_single_cdm_fails)
populate_daily_metrics_for_site(site_id=site.id, date_for=date_for)
last_log = caplog.records[-1]
expected_msg = ('{prefix}:SITE:COURSE:FAIL:populate_daily_metrics_for_site. '
'site_id:{site_id}, date_for:{date_for}. '
'course_id:{course_id} exception:{exception}'
).format(prefix=FPD_LOG_PREFIX,
site_id=site.id,
date_for=date_for,
course_id=fake_course_ids[0],
exception='Hey!')
assert last_log.message == expected_msg
@pytest.mark.skipif(OPENEDX_RELEASE == GINKGO,
reason='Apparent Django 1.8 incompatibility')
def test_populate_daily_metrics_for_site_site_dne(transactional_db,
monkeypatch,
caplog):
"""
If there is an invalid site id, logs error and raises it
"""
bad_site_id = Site.objects.order_by('id').last().id + 1
date_for = date.today()
assert not Site.objects.filter(id=bad_site_id).exists()
with pytest.raises(Exception) as e:
populate_daily_metrics_for_site(site_id=bad_site_id, date_for=date_for)
assert str(e.value) == 'Site matching query does not exist.'
last_log = caplog.records[-1]
expected_message = ('FIGURES:PIPELINE:DAILY:SITE:FAIL:'
'populate_daily_metrics_for_site:site_id: {} does not exist')
assert last_log.message == expected_message.format(bad_site_id)
@pytest.mark.skipif(OPENEDX_RELEASE == GINKGO,
reason='Apparent Django 1.8 incompatibility')
def test_populate_daily_metrics_site_level_error(transactional_db,
monkeypatch,
caplog):
"""
Generic test that the first site fails but we can process the second site
"""
assert Site.objects.count() == 1 # Because we always have 'example.com'
good_site = Site.objects.first()
bad_site = SiteFactory()
populated_site_ids = []
failed_site_ids = []
date_for = date.today()
def fake_populate_daily_metrics_for_site(site_id, **_kwargs):
"""
"""
if site_id == bad_site.id:
failed_site_ids.append(site_id)
raise FakeException('Hey!')
else:
populated_site_ids.append(site_id)
monkeypatch.setattr('figures.tasks.populate_daily_metrics_for_site',
fake_populate_daily_metrics_for_site)
populate_daily_metrics(date_for=date_for)
assert set(populated_site_ids) == set([good_site.id])
assert set(failed_site_ids) == set([bad_site.id])
last_log = caplog.records[-1]
expected_msg = ('{prefix}:FAIL populate_daily_metrics unhandled site level'
' exception for site[{site_id}]={domain}').format(
prefix=FPD_LOG_PREFIX,
site_id=bad_site.id,
domain=bad_site.domain)
assert last_log.message == expected_msg
# TODO: def test_populate_daily_metrics_future_date_error
@pytest.mark.skipif(OPENEDX_RELEASE == GINKGO,
reason='Apparent Django 1.8 incompatibility')
def test_populate_daily_metrics_enrollment_data_error(transactional_db,
monkeypatch,
caplog):
# Needs to be 'today' so that enrollment data update gets called
date_for = date.today()
site = SiteFactory()
def fake_populate_daily_metrics_for_site(**_kwargs):
pass
def fake_update_enrollment_data_fails(**kwargs):
# TODO: test with different exceptions
# At least one with and without `message_dict`
raise FakeException('Hey!')
monkeypatch.setattr('figures.tasks.populate_daily_metrics_for_site',
fake_populate_daily_metrics_for_site)
monkeypatch.setattr('figures.tasks.update_enrollment_data',
fake_update_enrollment_data_fails)
populate_daily_metrics(date_for=date_for)
last_log = caplog.records[-1]
expected_msg = ('{prefix}:FAIL figures.tasks update_enrollment_data '
' unhandled exception. site[{site_id}]:{domain}').format(
prefix=FPD_LOG_PREFIX,
site_id=site.id,
domain=site.domain)
assert last_log.message == expected_msg
@pytest.mark.skipif(OPENEDX_RELEASE == GINKGO,
reason='Broken test. Apparent Django 1.8 incompatibility')
def test_populate_daily_metrics_multisite(transactional_db, monkeypatch):
# Stand up test data
date_for = '2019-01-02'
site_links = []
for domain in ['alpha.domain', 'bravo.domain']:
site_links.append(dict(
site=SiteFactory(domain=domain),
courses=[CourseOverviewFactory() for i in range(2)],
))
populate_daily_metrics(date_for=date_for)
|
the-stack_106_25989 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_52',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
)
]
setup(
name='tf_faster_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
|
the-stack_106_25992 | # -*- coding: utf-8 -*-
# Author: XuMing <[email protected]>
# Brief:
from xml.dom import minidom
import pycorrector.rnn_crf.rnn_crf_config as config
from pycorrector.tokenizer import segment
def parse_xml_file(path):
print('Parse data from %s' % path)
id_lst, word_lst, label_lst = [], [], []
with open(path, 'r', encoding='utf-8') as f:
dom_tree = minidom.parse(path)
docs = dom_tree.documentElement.getElementsByTagName('DOC')
for doc in docs:
# Input the text
text = doc.getElementsByTagName('TEXT')[0]. \
childNodes[0].data.strip()
text_id = doc.getElementsByTagName('TEXT')[0].getAttribute('id')
errors = doc.getElementsByTagName('ERROR')
# Locate the error position and error type
locate_dict = {}
for error in errors:
start_off = error.getAttribute('start_off')
end_off = error.getAttribute('end_off')
error_type = error.getAttribute('type')
for i in range(int(start_off) - 1, int(end_off)):
if i == int(start_off) - 1:
error_type_change = 'B-' + error_type
else:
error_type_change = 'I-' + error_type
# locate_dict[i] = error_type_change
locate_dict[i] = error_type
# Segment with pos
word_seq, pos_seq = segment(text, cut_type='char', pos=True)
word_arr, label_arr = [], []
for i in range(len(word_seq)):
if i in locate_dict:
word_arr.append(word_seq[i])
# Fill with error type
label_arr.append(locate_dict[i])
else:
word_arr.append(word_seq[i])
# Fill with pos tag
label_arr.append(pos_seq[i])
id_lst.append(text_id)
word_lst.append(word_arr)
label_lst.append(label_arr)
return id_lst, word_lst, label_lst
def parse_txt_file(input_path, truth_path):
print('Parse data from %s and %s' % (input_path, truth_path))
id_lst, word_lst, label_lst = [], [], []
# read truth file
truth_dict = {}
with open(truth_path, 'r', encoding='utf-8') as truth_f:
for line in truth_f:
parts = line.strip().split(',')
# Locate the error position
locate_dict = {}
if len(parts) == 4:
text_id = parts[0]
start_off = parts[1]
end_off = parts[2]
error_type = parts[3].strip()
for i in range(int(start_off) - 1, int(end_off)):
if i == int(start_off) - 1:
error_type_change = 'B-' + error_type
else:
error_type_change = 'I-' + error_type
# locate_dict[i] = error_type_change
locate_dict[i] = error_type
# for i in range(int(start_off) - 1, int(end_off)):
# locate_dict[i] = error_type
if text_id in truth_dict:
truth_dict[text_id].update(locate_dict)
else:
truth_dict[text_id] = locate_dict
# read input file and get token
with open(input_path, 'r', encoding='utf-8') as input_f:
for line in input_f:
parts = line.strip().split('\t')
text_id = parts[0].replace('(sid=', '').replace(')', '')
text = parts[1]
# segment with pos
word_seq, pos_seq = segment(text, cut_type='char', pos=True)
word_arr, label_arr = [], []
if text_id in truth_dict:
locate_dict = truth_dict[text_id]
for i in range(len(word_seq)):
if i in locate_dict:
word_arr.append(word_seq[i])
# fill with error type
label_arr.append(locate_dict[i])
else:
word_arr.append(word_seq[i])
# fill with pos tag
label_arr.append(pos_seq[i])
else:
word_arr = word_seq
label_arr = pos_seq
id_lst.append(text_id)
word_lst.append(word_arr)
label_lst.append(label_arr)
return id_lst, word_lst, label_lst
def save_data_list(data_list, data_path):
with open(data_path, 'w', encoding='utf-8') as f:
count = 0
for line in data_list:
f.write(' '.join(line) + '\n')
count += 1
print("save line size:%d to %s" % (count, data_path))
if __name__ == '__main__':
# train data
train_words, train_labels = [], []
for path in config.train_paths:
_, word_list, label_list = parse_xml_file(path)
train_words.extend(word_list)
train_labels.extend(label_list)
save_data_list(train_words, config.train_word_path)
save_data_list(train_labels, config.train_label_path)
# test data
test_ids, test_words, test_labels = [], [], []
for input_path, truth_path in config.test_paths.items():
id_list, word_list, label_list = parse_txt_file(input_path, truth_path)
test_ids.extend(id_list)
test_words.extend(word_list)
test_labels.extend(label_list)
save_data_list(test_ids, config.test_id_path)
save_data_list(test_words, config.test_word_path)
save_data_list(test_labels, config.test_label_path)
|
the-stack_106_25993 | import os
import pandas as pd
import numpy as np
import shutil
from data_handling_functions import get_article_row, m_j_dict
import math
this_path = 'Bilbokning/'
#PARAMETERS
EUR_PALLET_VOLUME = 1.20*0.80*1.00
NUM_OF_ZONES = 9
def calculate_dij_mj(degree_of_filling, date):
#Remove /csv/... if it exists, then create a new
if os.path.exists(this_path+"csv"):
shutil.rmtree(this_path+"csv")
os.mkdir(this_path+"csv")
df_utlev = pd.read_csv(this_path+'data/df_utleveranser_TATA62.csv', sep=';')
df_prod = pd.read_csv(this_path+'data/df_artiklar_TATA62.csv', sep=';')
#Select only todays orders
todays_date = df_utlev[df_utlev.Datum == date]
#---------------------------Augmenting todays_date-------------------------
#Augment todays_date with Zone, Volumes, Type and Quantities)
zone_list=[]
article_volume_list=[]
pack_volume_list=[]
pallet_volume_list=[]
pickup_type_list=[]
pack_quantity_list=[]
pallet_quantity_list=[]
for i in todays_date["Artikelnr"]:
zone_list.append(df_prod.loc[get_article_row(i, df_prod), "PlockOmråde"])
for i in todays_date["Artikelnr"]:
article_volume_list.append(df_prod.loc[get_article_row(i, df_prod), "ArtikelVolym"])
for i in todays_date["Artikelnr"]:
pack_volume_list.append(df_prod.loc[get_article_row(i, df_prod), "FörpVolym"])
for i in todays_date["Artikelnr"]:
pallet_volume_list.append(df_prod.loc[get_article_row(i, df_prod), "PallVolym"])
for i in todays_date["Artikelnr"]:
pickup_type_list.append(df_prod.loc[get_article_row(i, df_prod), "PlockTyp"])
for i in todays_date["Artikelnr"]:
pack_quantity_list.append(df_prod.loc[get_article_row(i, df_prod), "FörpQ"])
for i in todays_date["Artikelnr"]:
pallet_quantity_list.append(df_prod.loc[get_article_row(i, df_prod), "PallQ"])
todays_date.loc[:,"PlockOmråde"] = zone_list
todays_date.loc[:,"ArtikelVolym"] = article_volume_list
todays_date.loc[:,"FörpVolym"] = pack_volume_list
todays_date.loc[:,"PallVolym"] = pallet_volume_list
todays_date.loc[:,"PlockTyp"] = pickup_type_list
todays_date.loc[:,"FörpQ"] = pack_quantity_list
todays_date.loc[:,"PallQ"] = pallet_quantity_list
#--------------------------------------------------------------------------
todays_date.to_csv(this_path+"csv/"+date+".csv", index=False)
#List with shop_ids
shop_ids = set(todays_date["Butiksnr"].tolist())
#Create directories and sorted csv-files (shop-ids, product_types)
os.mkdir(this_path+"csv/stores")
dij = np.array([])
dij.shape=(NUM_OF_ZONES,0)
m_j = []
for shop in shop_ids:
os.mkdir(this_path+"csv/stores/"+str(shop))
specific_shop = todays_date[todays_date.Butiksnr == shop]
specific_shop.to_csv(this_path+"csv/stores/"+str(shop)+"/"+str(shop)+".csv", index=False)
product_types = set(specific_shop["Varugrupp"].tolist())
Dij = np.array([])
Dij.shape=(NUM_OF_ZONES,0)
for product_type in product_types:
os.mkdir(this_path+"csv/stores/"+str(shop)+"/"+str(product_type))
specific_product = specific_shop[specific_shop.Varugrupp == product_type]
specific_product.to_csv(this_path+"csv/stores/"+str(shop)+"/"+str(product_type)+"/"+str(product_type)+".csv", index=False)
#----------------------------Packing problem----------------------------
num_of_rows = range(specific_product.shape[0])
zone_volume_article = np.zeros((NUM_OF_ZONES,1))
zone_volume_pack = np.zeros((NUM_OF_ZONES,1))
zone_pallet_pallets = np.zeros((NUM_OF_ZONES,1))
for i in num_of_rows:
item = specific_product.iloc[i,:]
#Decide the real zone (The order is important!!)
if item.PlockOmråde == "Tält":
real_zone = 0
elif item.PlockOmråde == "Fristapling" or item.PlockOmråde == "Udda":
real_zone = 1
elif item.PlockOmråde == "Helpall" or item.PlockTyp == "Pall":
real_zone = 5
elif item.PlockOmråde == "ADR":
real_zone = 6
elif product_type == "2 El & belysning":
real_zone = 7
elif item.PlockOmråde == "Entresol":
real_zone = 8
elif product_type == "6 Trädgård" or product_type == "5 Färg":
real_zone = 2
elif product_type == "3 VVS & Bad" or product_type == "4 Bygg":
real_zone = 3
elif product_type == "1 Järn":
real_zone = 4
#Sort pallets after zone for each product_type
if item.PlockTyp == "Styck":
zone_volume_article[real_zone] += item.Kvantitet * float(item.ArtikelVolym.replace(',', '.'))
elif item.PlockTyp == "Förpackning" or item.PlockTyp == "":
zone_volume_pack[real_zone] += (item.Kvantitet / item.FörpQ) * float(item.FörpVolym.replace(',', '.'))
elif item.PlockTyp == "Pall":
zone_pallet_pallets[real_zone] += math.ceil(item.Kvantitet / item.PallQ)
zone_article_pallets = np.ceil(zone_volume_article / (degree_of_filling * EUR_PALLET_VOLUME))
zone_pack_pallets = np.ceil(zone_volume_pack / (degree_of_filling * EUR_PALLET_VOLUME))
#Number of pallets from a zone for a specific product_type for a specific shop
pallets_from_zone = zone_article_pallets + zone_pack_pallets + zone_pallet_pallets
#One failsafe. It is now okay to send 96 pallets from one zone for one product type.
if np.any((pallets_from_zone > 48) == True) or pallets_from_zone[0] > 30:
tmp = np.floor(pallets_from_zone / 2)
pallets_from_zone -= tmp
Dij = np.append(Dij, tmp, axis=1)
#Control to check if any zone sends more than 48 pallets for one product type
if np.any((pallets_from_zone > 48) == True):
print(str(shop)+" "+str(product_type)+" "+ str(real_zone) + " greater than 48 will generate error, or tent greater than 30")
Dij = np.append(Dij, pallets_from_zone, axis=1)
#-----------------------------------------------------------------------
#----------------------------Combination problem---------------------------
#Every initial order is set to less than 48 (Split large product_types)
col = 0
while col < Dij.shape[1]:
max_pallets = 0
new_column = np.zeros((NUM_OF_ZONES,1))
for i in range(Dij.shape[0]):
if Dij[i,col] + max_pallets <= 48:
max_pallets += Dij[i,col]
new_column[i,:] = 0
else:
new_column[i,:] = Dij[i,col]
Dij[i,col] = 0
if sum(new_column[:,:]) != 0:
Dij = np.append(Dij,new_column,axis=1)
col += 1
#Combine orders
fixed_Dij = np.zeros((NUM_OF_ZONES,Dij.shape[1]))
deleted = []
for j in range(fixed_Dij.shape[1]):
test_range = range(Dij.shape[1])
for k in test_range:
if k in deleted:
continue
if sum(fixed_Dij[:,j]) + sum(Dij[:,k]) <= 48 and (fixed_Dij[0,j] + Dij[0,k]) <= 30:
fixed_Dij[:,j] = fixed_Dij[:,j] + Dij[:,k]
deleted.append(k)
#Delete empty columns
delete_col = []
for j in range(fixed_Dij.shape[1]):
if sum(fixed_Dij[:,j]) == 0:
delete_col.append(j)
fixed_Dij = np.delete(fixed_Dij,delete_col,axis=1)
#Generate m_j
for n in range(fixed_Dij.shape[1]):
m_j = np.append(m_j,m_j_dict[str(shop)])
#Append Dij for specific shop to total dij
dij = np.append(dij,fixed_Dij,axis=1)
#--------------------------------------------------------------------------
return dij, m_j
|
the-stack_106_25994 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
"""
Sections in this module are
1. top-level functions
2. plan creators
3. plan runners
4. individual operations
5. helper functions
The top-level functions compose and execute full plans.
A plan is created by composing various individual operations. The plan data structure is a
list of dicts, where each dict represents an individual operation. The dict contains two
keys--`function` and `kwargs`--where function is the name of the individual operation function
within this module.
Each individual operation must
a) return a `Result` (i.e. NEEDS_SUDO, MODIFIED, or NO_CHANGE)
b) have no side effects if context.dry_run is True
c) be verbose and descriptive about the changes being made or proposed is context.verbosity >= 1
The plan runner functions take the plan (list of dicts) as an argument, and then coordinate the
execution of each individual operation. The docstring for `run_plan_elevated()` has details on
how that strategy is implemented.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from difflib import unified_diff
from errno import ENOENT
from glob import glob
from itertools import chain
import json
from logging import getLogger
import os
from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join
from random import randint
import re
import sys
from tempfile import NamedTemporaryFile
from .. import CONDA_PACKAGE_ROOT, CondaError, __version__ as CONDA_VERSION
from .._vendor.auxlib.ish import dals
from ..activate import (CshActivator, FishActivator,
PosixActivator, XonshActivator, PowerShellActivator)
from ..base.context import context
from ..common.compat import (PY2, ensure_binary, ensure_fs_path_encoding, ensure_text_type, on_mac,
on_win, open)
from ..common.path import (expand, get_bin_directory_short_path, get_python_short_path,
get_python_site_packages_short_path, win_path_ok)
from ..exceptions import CondaValueError
from ..gateways.disk.create import copy, mkdir_p
from ..gateways.disk.delete import rm_rf
from ..gateways.disk.link import lexists
from ..gateways.disk.permissions import make_executable
from ..gateways.disk.read import compute_md5sum
from ..gateways.subprocess import subprocess_call
if on_win:
if PY2:
import _winreg as winreg
else:
import winreg
from menuinst.knownfolders import get_folder_path, FOLDERID
from menuinst.winshortcut import create_shortcut
log = getLogger(__name__)
CONDA_INITIALIZE_RE_BLOCK = (r"^# >>> conda initialize >>>(?:\n|\r\n)"
r"([\s\S]*?)"
r"# <<< conda initialize <<<(?:\n|\r\n)?")
class Result:
NEEDS_SUDO = "needs sudo"
MODIFIED = "modified"
NO_CHANGE = "no change"
# #####################################################
# top-level functions
# #####################################################
def install(conda_prefix):
plan = make_install_plan(conda_prefix)
run_plan(plan)
if not context.dry_run:
assert not any(step['result'] == Result.NEEDS_SUDO for step in plan)
print_plan_results(plan)
return 0
def initialize(conda_prefix, shells, for_user, for_system, anaconda_prompt):
plan1 = []
if os.getenv('CONDA_PIP_UNINITIALIZED') == 'true':
plan1 = make_install_plan(conda_prefix)
run_plan(plan1)
if not context.dry_run:
run_plan_elevated(plan1)
plan2 = make_initialize_plan(conda_prefix, shells, for_user, for_system, anaconda_prompt)
run_plan(plan2)
if not context.dry_run:
run_plan_elevated(plan2)
plan = plan1 + plan2
print_plan_results(plan)
if any(step['result'] == Result.NEEDS_SUDO for step in plan):
print("Operation failed.", file=sys.stderr)
return 1
def initialize_dev(shell, dev_env_prefix=None, conda_source_root=None):
# > alias conda-dev='eval "$(python -m conda init --dev)"'
# > eval "$(python -m conda init --dev)"
dev_env_prefix = expand(dev_env_prefix or sys.prefix)
conda_source_root = expand(conda_source_root or os.getcwd())
python_exe, python_version, site_packages_dir = _get_python_info(dev_env_prefix)
if not isfile(join(conda_source_root, 'conda', '__main__.py')):
raise CondaValueError("Directory is not a conda source root: %s" % conda_source_root)
plan = make_install_plan(dev_env_prefix)
plan.append({
'function': remove_conda_in_sp_dir.__name__,
'kwargs': {
'target_path': site_packages_dir,
},
})
plan.append({
'function': make_conda_egg_link.__name__,
'kwargs': {
'target_path': join(site_packages_dir, 'conda.egg-link'),
'conda_source_root': conda_source_root,
},
})
plan.append({
'function': modify_easy_install_pth.__name__,
'kwargs': {
'target_path': join(site_packages_dir, 'easy-install.pth'),
'conda_source_root': conda_source_root,
},
})
plan.append({
'function': make_dev_egg_info_file.__name__,
'kwargs': {
'target_path': join(conda_source_root, 'conda.egg-info'),
},
})
run_plan(plan)
if context.dry_run or context.verbosity:
print_plan_results(plan, sys.stderr)
if any(step['result'] == Result.NEEDS_SUDO for step in plan): # pragma: no cover
raise CondaError("Operation failed. Privileged install disallowed for 'conda init --dev'.")
env_vars = {
'ADD_COV': '--cov-report xml --cov-report term-missing --cov conda',
'PYTHONHASHSEED': str(randint(0, 4294967296)),
'PYTHON_MAJOR_VERSION': python_version[0],
'TEST_PLATFORM': 'win' if on_win else 'unix',
}
unset_env_vars = (
'CONDA_DEFAULT_ENV',
'CONDA_EXE',
'CONDA_PREFIX',
'CONDA_PREFIX_1',
'CONDA_PREFIX_2',
'CONDA_PROMPT_MODIFIER',
'CONDA_PYTHON_EXE',
'CONDA_SHLVL',
)
if shell == "bash":
builder = []
builder += ["unset %s" % unset_env_var for unset_env_var in unset_env_vars]
builder += ["export %s='%s'" % (key, env_vars[key]) for key in sorted(env_vars)]
sys_executable = abspath(sys.executable)
if on_win:
sys_executable = "$(cygpath '%s')" % sys_executable
builder += [
"eval \"$(\"%s\" -m conda shell.bash hook)\"" % sys_executable,
"conda activate '%s'" % dev_env_prefix,
]
print("\n".join(builder))
elif shell == 'cmd.exe':
builder = []
builder += ["@IF NOT \"%CONDA_PROMPT_MODIFIER%\" == \"\" @CALL "
"SET \"PROMPT=%%PROMPT:%CONDA_PROMPT_MODIFIER%=%_empty_not_set_%%%\""]
builder += ["@SET %s=" % unset_env_var for unset_env_var in unset_env_vars]
builder += ['@SET "%s=%s"' % (key, env_vars[key]) for key in sorted(env_vars)]
builder += [
'@CALL \"%s\"' % join(dev_env_prefix, 'condabin', 'conda_hook.bat'),
'@IF %errorlevel% NEQ 0 @EXIT /B %errorlevel%',
'@CALL \"%s\" activate \"%s\"' % (join(dev_env_prefix, 'condabin', 'conda.bat'),
dev_env_prefix),
'@IF %errorlevel% NEQ 0 @EXIT /B %errorlevel%',
]
if not context.dry_run:
with open('dev-init.bat', 'w') as fh:
fh.write('\n'.join(builder))
if context.verbosity:
print('\n'.join(builder))
print("now run > .\\dev-init.bat")
else:
raise NotImplementedError()
return 0
# #####################################################
# plan creators
# #####################################################
def make_install_plan(conda_prefix):
try:
python_exe, python_version, site_packages_dir = _get_python_info(conda_prefix)
except EnvironmentError:
python_exe, python_version, site_packages_dir = None, None, None # NOQA
plan = []
# ######################################
# executables
# ######################################
if on_win:
conda_exe_path = join(conda_prefix, 'Scripts', 'conda-script.py')
conda_env_exe_path = join(conda_prefix, 'Scripts', 'conda-env-script.py')
plan.append({
'function': make_entry_point_exe.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'Scripts', 'conda.exe'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': make_entry_point_exe.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'Scripts', 'conda-env.exe'),
'conda_prefix': conda_prefix,
},
})
else:
# We can't put a conda.exe in condabin on Windows. It'll conflict with conda.bat.
plan.append({
'function': make_entry_point.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'conda'),
'conda_prefix': conda_prefix,
'module': 'conda.cli',
'func': 'main',
},
})
conda_exe_path = join(conda_prefix, 'bin', 'conda')
conda_env_exe_path = join(conda_prefix, 'bin', 'conda-env')
plan.append({
'function': make_entry_point.__name__,
'kwargs': {
'target_path': conda_exe_path,
'conda_prefix': conda_prefix,
'module': 'conda.cli',
'func': 'main',
},
})
plan.append({
'function': make_entry_point.__name__,
'kwargs': {
'target_path': conda_env_exe_path,
'conda_prefix': conda_prefix,
'module': 'conda_env.cli.main',
'func': 'main',
},
})
# ######################################
# shell wrappers
# ######################################
if on_win:
plan.append({
'function': install_condabin_conda_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'conda.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_library_bin_conda_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'Library', 'bin', 'conda.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_condabin_conda_activate_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', '_conda_activate.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_condabin_conda_auto_activate_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'conda_auto_activate.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_condabin_hook_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'conda_hook.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_Scripts_activate_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'Scripts', 'activate.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_activate_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'activate.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_deactivate_bat.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'deactivate.bat'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_activate.__name__,
'kwargs': {
'target_path': join(conda_prefix, get_bin_directory_short_path(), 'activate'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_deactivate.__name__,
'kwargs': {
'target_path': join(conda_prefix, get_bin_directory_short_path(), 'deactivate'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_conda_sh.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'etc', 'profile.d', 'conda.sh'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_conda_fish.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'etc', 'fish', 'conf.d', 'conda.fish'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_conda_psm1.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'shell', 'condabin', 'Conda.psm1'),
'conda_prefix': conda_prefix,
},
})
plan.append({
'function': install_conda_hook_ps1.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'shell', 'condabin', 'conda-hook.ps1'),
'conda_prefix': conda_prefix,
},
})
if site_packages_dir:
plan.append({
'function': install_conda_xsh.__name__,
'kwargs': {
'target_path': join(site_packages_dir, 'xonsh', 'conda.xsh'),
'conda_prefix': conda_prefix,
},
})
else:
print("WARNING: Cannot install xonsh wrapper without a python interpreter in prefix: "
"%s" % conda_prefix, file=sys.stderr)
plan.append({
'function': install_conda_csh.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'etc', 'profile.d', 'conda.csh'),
'conda_prefix': conda_prefix,
},
})
return plan
def make_initialize_plan(conda_prefix, shells, for_user, for_system, anaconda_prompt):
plan = make_install_plan(conda_prefix)
shells = set(shells)
if shells & {'bash', 'zsh'}:
if 'bash' in shells and for_user:
bashrc_path = expand(join('~', '.bash_profile' if on_mac else '.bashrc'))
plan.append({
'function': init_sh_user.__name__,
'kwargs': {
'target_path': bashrc_path,
'conda_prefix': conda_prefix,
'shell': 'bash',
},
})
if 'zsh' in shells and for_user:
zshrc_path = expand(join('~', '.zshrc'))
plan.append({
'function': init_sh_user.__name__,
'kwargs': {
'target_path': zshrc_path,
'conda_prefix': conda_prefix,
'shell': 'zsh',
},
})
if for_system:
plan.append({
'function': init_sh_system.__name__,
'kwargs': {
'target_path': '/etc/profile.d/conda.sh',
'conda_prefix': conda_prefix,
},
})
if 'fish' in shells:
if for_user:
config_fish_path = expand(join('~', '.config', 'config.fish'))
plan.append({
'function': init_fish_user.__name__,
'kwargs': {
'target_path': config_fish_path,
'conda_prefix': conda_prefix,
},
})
if for_system:
raise NotImplementedError()
if 'tcsh' in shells:
if for_user:
raise NotImplementedError()
if for_system:
raise NotImplementedError()
if 'powershell' in shells:
# There's several places PowerShell can store its path, depending
# on if it's Windows PowerShell, PowerShell Core on Windows, or
# PowerShell Core on macOS/Linux. The easiest way to resolve it is to
# just ask different possible installations of PowerShell where their
# profiles are.
def find_powershell_paths(*exe_names):
for exe_name in exe_names:
try:
yield subprocess_call(
(exe_name, '-NoProfile', '-Command', '$PROFILE')
).stdout.strip()
except Exception:
pass
config_powershell_paths = tuple(
find_powershell_paths('powershell', 'pwsh', 'pwsh-preview')
)
for config_path in config_powershell_paths:
if config_path is not None:
plan.append({
'function': init_powershell_user.__name__,
'kwargs': {
'target_path': config_path,
'conda_prefix': conda_prefix
}
})
if for_system:
raise NotImplementedError(
"PowerShell hooks are only implemented for per-user profiles."
)
if 'cmd.exe' in shells:
if for_user:
plan.append({
'function': init_cmd_exe_registry.__name__,
'kwargs': {
'target_path': 'HKEY_CURRENT_USER\\Software\\Microsoft\\'
'Command Processor\\AutoRun',
'conda_prefix': conda_prefix,
},
})
if for_system:
plan.append({
'function': init_cmd_exe_registry.__name__,
'kwargs': {
'target_path': 'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\'
'Command Processor\\AutoRun',
'conda_prefix': conda_prefix,
},
})
# it would be nice to enable this on a user-level basis, but unfortunately, it is
# a system-level key only.
plan.append({
'function': init_long_path.__name__,
'kwargs': {
'target_path': 'HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\'
'FileSystem\\LongPathsEnabled'
}
})
if anaconda_prompt:
plan.append({
'function': install_anaconda_prompt.__name__,
'kwargs': {
'target_path': join(conda_prefix, 'condabin', 'Anaconda Prompt.lnk'),
'conda_prefix': conda_prefix,
},
})
if on_win:
desktop_dir, exception = get_folder_path(FOLDERID.Desktop)
assert not exception
else:
desktop_dir = join(expanduser('~'), "Desktop")
plan.append({
'function': install_anaconda_prompt.__name__,
'kwargs': {
'target_path': join(desktop_dir, "Anaconda Prompt.lnk"),
'conda_prefix': conda_prefix,
},
})
return plan
# #####################################################
# plan runners
# #####################################################
def run_plan(plan):
for step in plan:
previous_result = step.get('result', None)
if previous_result in (Result.MODIFIED, Result.NO_CHANGE):
continue
try:
result = globals()[step['function']](*step.get('args', ()), **step.get('kwargs', {}))
except EnvironmentError as e:
log.info("%s: %r", step['function'], e, exc_info=True)
result = Result.NEEDS_SUDO
step['result'] = result
def run_plan_elevated(plan):
"""
The strategy of this function differs between unix and Windows. Both strategies use a
subprocess call, where the subprocess is run with elevated privileges. The executable
invoked with the subprocess is `python -m conda.core.initialize`, so see the
`if __name__ == "__main__"` at the bottom of this module.
For unix platforms, we convert the plan list to json, and then call this module with
`sudo python -m conda.core.initialize` while piping the plan json to stdin. We collect json
from stdout for the results of the plan execution with elevated privileges.
For Windows, we create a temporary file that holds the json content of the plan. The
subprocess reads the content of the file, modifies the content of the file with updated
execution status, and then closes the file. This process then reads the content of that file
for the individual operation execution results, and then deletes the file.
"""
if any(step['result'] == Result.NEEDS_SUDO for step in plan):
if on_win:
from ..common.os.windows import run_as_admin
temp_path = None
try:
with NamedTemporaryFile('w+b', suffix='.json', delete=False) as tf:
# the default mode is 'w+b', and universal new lines don't work in that mode
tf.write(ensure_binary(json.dumps(plan, ensure_ascii=False)))
temp_path = tf.name
python_exe = '"%s"' % abspath(sys.executable)
hinstance, error_code = run_as_admin((python_exe, '-m', 'conda.core.initialize',
'"%s"' % temp_path))
if error_code is not None:
print("ERROR during elevated execution.\n rc: %s" % error_code,
file=sys.stderr)
with open(temp_path) as fh:
_plan = json.loads(ensure_text_type(fh.read()))
finally:
if temp_path and lexists(temp_path):
rm_rf(temp_path)
else:
stdin = json.dumps(plan)
result = subprocess_call(
'sudo %s -m conda.core.initialize' % sys.executable,
env={},
path=os.getcwd(),
stdin=stdin
)
stderr = result.stderr.strip()
if stderr:
print(stderr, file=sys.stderr)
_plan = json.loads(result.stdout.strip())
del plan[:]
plan.extend(_plan)
def run_plan_from_stdin():
stdin = sys.stdin.read().strip()
plan = json.loads(stdin)
run_plan(plan)
sys.stdout.write(json.dumps(plan))
def run_plan_from_temp_file(temp_path):
with open(temp_path) as fh:
plan = json.loads(ensure_text_type(fh.read()))
run_plan(plan)
with open(temp_path, 'w+b') as fh:
fh.write(ensure_binary(json.dumps(plan, ensure_ascii=False)))
def print_plan_results(plan, stream=None):
if not stream:
stream = sys.stdout
for step in plan:
print("%-14s%s" % (step.get('result'), step['kwargs']['target_path']), file=stream)
changed = any(step.get('result') == Result.MODIFIED for step in plan)
if changed:
print("\n==> For changes to take effect, close and re-open your current shell. <==\n",
file=stream)
else:
print("No action taken.", file=stream)
# #####################################################
# individual operations
# #####################################################
def make_entry_point(target_path, conda_prefix, module, func):
# target_path: join(conda_prefix, 'bin', 'conda')
conda_ep_path = target_path
if isfile(conda_ep_path):
with open(conda_ep_path) as fh:
original_ep_content = fh.read()
else:
original_ep_content = ""
if on_win:
# no shebang needed on windows
new_ep_content = ""
else:
new_ep_content = "#!%s\n" % join(conda_prefix, get_python_short_path())
conda_extra = dals("""
# Before any more imports, leave cwd out of sys.path for internal 'conda shell.*' commands.
# see https://github.com/conda/conda/issues/6549
if len(sys.argv) > 1 and sys.argv[1].startswith('shell.') and sys.path and sys.path[0] == '':
# The standard first entry in sys.path is an empty string,
# and os.path.abspath('') expands to os.getcwd().
del sys.path[0]
""")
new_ep_content += dals("""
# -*- coding: utf-8 -*-
import sys
%(extra)s
if __name__ == '__main__':
from %(module)s import %(func)s
sys.exit(%(func)s())
""") % {
'extra': conda_extra if module == 'conda.cli' else '',
'module': module,
'func': func,
}
if new_ep_content != original_ep_content:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(original_ep_content, new_ep_content))
if not context.dry_run:
mkdir_p(dirname(conda_ep_path))
with open(conda_ep_path, 'w') as fdst:
fdst.write(new_ep_content)
if not on_win:
make_executable(conda_ep_path)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def make_entry_point_exe(target_path, conda_prefix):
# target_path: join(conda_prefix, 'Scripts', 'conda.exe')
exe_path = target_path
bits = 8 * tuple.__itemsize__
source_exe_path = join(CONDA_PACKAGE_ROOT, 'shell', 'cli-%d.exe' % bits)
if isfile(exe_path):
if compute_md5sum(exe_path) == compute_md5sum(source_exe_path):
return Result.NO_CHANGE
if not context.dry_run:
if not isdir(dirname(exe_path)):
mkdir_p(dirname(exe_path))
# prefer copy() over create_hard_link_or_copy() because of windows file deletion issues
# with open processes
copy(source_exe_path, exe_path)
return Result.MODIFIED
def install_anaconda_prompt(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', 'Anaconda Prompt.lnk')
# target: join(os.environ["HOMEPATH"], "Desktop", "Anaconda Prompt.lnk")
icon_path = join(CONDA_PACKAGE_ROOT, 'shell', 'conda_icon.ico')
args = (
'/K',
'""%s" && "%s""' % (join(conda_prefix, 'condabin', 'conda_hook.bat'),
join(conda_prefix, 'condabin', 'conda_auto_activate.bat')),
)
# The API for the call to 'create_shortcut' has 3
# required arguments (path, description, filename)
# and 4 optional ones (args, working_dir, icon_path, icon_index).
if not context.dry_run:
create_shortcut(
"%windir%\\System32\\cmd.exe",
"Anconda Prompt",
'' + target_path,
' '.join(args),
'' + expanduser('~'),
'' + icon_path,
)
# TODO: need to make idempotent / support NO_CHANGE
return Result.MODIFIED
def _install_file(target_path, file_content):
if isfile(target_path):
with open(target_path) as fh:
original_content = fh.read()
else:
original_content = ""
new_content = file_content
if new_content != original_content:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(original_content, new_content))
if not context.dry_run:
mkdir_p(dirname(target_path))
with open(target_path, 'w') as fdst:
fdst.write(new_content)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def install_conda_sh(target_path, conda_prefix):
# target_path: join(conda_prefix, 'etc', 'profile.d', 'conda.sh')
file_content = PosixActivator().hook(auto_activate_base=False)
return _install_file(target_path, file_content)
def install_Scripts_activate_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'Scripts', 'activate.bat')
src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'Scripts', 'activate.bat')
with open(src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_activate_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', 'activate.bat')
src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'activate.bat')
with open(src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_deactivate_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', 'deactivate.bat')
src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'deactivate.bat')
with open(src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_activate(target_path, conda_prefix):
# target_path: join(conda_prefix, get_bin_directory_short_path(), 'activate')
src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'bin', 'activate')
file_content = (
"#!/bin/sh\n"
"_CONDA_ROOT=\"%s\"\n"
) % conda_prefix
with open(src_path) as fsrc:
file_content += fsrc.read()
return _install_file(target_path, file_content)
def install_deactivate(target_path, conda_prefix):
# target_path: join(conda_prefix, get_bin_directory_short_path(), 'deactivate')
src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'bin', 'deactivate')
file_content = (
"#!/bin/sh\n"
"_CONDA_ROOT=\"%s\"\n"
) % conda_prefix
with open(src_path) as fsrc:
file_content += fsrc.read()
return _install_file(target_path, file_content)
def install_condabin_conda_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', 'conda.bat')
conda_bat_src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'conda.bat')
with open(conda_bat_src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_library_bin_conda_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'Library', 'bin', 'conda.bat')
conda_bat_src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'Library', 'bin', 'conda.bat')
with open(conda_bat_src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_condabin_conda_activate_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', '_conda_activate.bat')
conda_bat_src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', '_conda_activate.bat')
with open(conda_bat_src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_condabin_conda_auto_activate_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', 'conda_auto_activate.bat')
conda_bat_src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'conda_auto_activate.bat')
with open(conda_bat_src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_condabin_hook_bat(target_path, conda_prefix):
# target_path: join(conda_prefix, 'condabin', 'conda_hook.bat')
conda_bat_src_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'conda_hook.bat')
with open(conda_bat_src_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_conda_fish(target_path, conda_prefix):
# target_path: join(conda_prefix, 'etc', 'fish', 'conf.d', 'conda.fish')
file_content = FishActivator().hook(auto_activate_base=False)
return _install_file(target_path, file_content)
def install_conda_psm1(target_path, conda_prefix):
# target_path: join(conda_prefix, 'shell', 'condabin', 'Conda.psm1')
conda_psm1_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'Conda.psm1')
with open(conda_psm1_path) as fsrc:
file_content = fsrc.read()
return _install_file(target_path, file_content)
def install_conda_hook_ps1(target_path, conda_prefix):
# target_path: join(conda_prefix, 'shell', 'condabin', 'conda-hook.ps1')
file_content = PowerShellActivator().hook(auto_activate_base=False)
return _install_file(target_path, file_content)
def install_conda_xsh(target_path, conda_prefix):
# target_path: join(site_packages_dir, 'xonsh', 'conda.xsh')
file_content = XonshActivator().hook(auto_activate_base=False)
return _install_file(target_path, file_content)
def install_conda_csh(target_path, conda_prefix):
# target_path: join(conda_prefix, 'etc', 'profile.d', 'conda.csh')
file_content = CshActivator().hook(auto_activate_base=False)
return _install_file(target_path, file_content)
def _config_fish_content(conda_prefix):
if on_win:
from ..activate import native_path_to_unix
conda_exe = native_path_to_unix(join(conda_prefix, 'Scripts', 'conda.exe'))
else:
conda_exe = join(conda_prefix, 'bin', 'conda')
conda_initialize_content = dals("""
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
eval (eval %(conda_exe)s shell.fish hook $argv)
# <<< conda initialize <<<
""") % {
'conda_exe': conda_exe,
}
return conda_initialize_content
def init_fish_user(target_path, conda_prefix):
# target_path: ~/.config/config.fish
user_rc_path = target_path
with open(user_rc_path) as fh:
rc_content = fh.read()
rc_original_content = rc_content
conda_initialize_content = _config_fish_content(conda_prefix)
if not on_win:
rc_content = re.sub(
r"^[ \t]*?(set -gx PATH ([\'\"]?).*?%s\/bin\2 [^\n]*?\$PATH)"
r"" % basename(conda_prefix),
r"# \1 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
rc_content = re.sub(
r"^[ \t]*[^#\n]?[ \t]*((?:source|\.) .*etc\/fish\/conf\.d\/conda\.fish.*?)\n"
r"(conda activate.*?)$",
r"# \1 # commented out by conda initialize\n# \2 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
rc_content = re.sub(
r"^[ \t]*[^#\n]?[ \t]*((?:source|\.) .*etc\/fish\/conda\.d\/conda\.fish.*?)$",
r"# \1 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
replace_str = "__CONDA_REPLACE_ME_123__"
rc_content = re.sub(
CONDA_INITIALIZE_RE_BLOCK,
replace_str,
rc_content,
flags=re.MULTILINE,
)
# TODO: maybe remove all but last of replace_str, if there's more than one occurrence
rc_content = rc_content.replace(replace_str, conda_initialize_content)
if "# >>> conda initialize >>>" not in rc_content:
rc_content += '\n%s\n' % conda_initialize_content
if rc_content != rc_original_content:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(rc_original_content, rc_content))
if not context.dry_run:
with open(user_rc_path, 'w') as fh:
fh.write(rc_content)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def _bashrc_content(conda_prefix, shell):
if on_win:
from ..activate import native_path_to_unix
conda_exe = native_path_to_unix(join(conda_prefix, 'Scripts', 'conda.exe'))
conda_initialize_content = dals("""
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
eval "$('%(conda_exe)s' shell.%(shell)s hook)"
# <<< conda initialize <<<
""") % {
'conda_exe': conda_exe,
'shell': shell,
}
else:
conda_exe = join(conda_prefix, 'bin', 'conda')
conda_initialize_content = dals("""
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('%(conda_exe)s' shell.%(shell)s hook 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "%(conda_prefix)s/etc/profile.d/conda.sh" ]; then
. "%(conda_prefix)s/etc/profile.d/conda.sh"
else
export PATH="%(conda_bin)s:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
""") % {
'conda_exe': conda_exe,
'shell': shell,
'conda_bin': dirname(conda_exe),
'conda_prefix': conda_prefix,
}
return conda_initialize_content
def init_sh_user(target_path, conda_prefix, shell):
# target_path: ~/.bash_profile
user_rc_path = target_path
with open(user_rc_path) as fh:
rc_content = fh.read()
rc_original_content = rc_content
conda_initialize_content = _bashrc_content(conda_prefix, shell)
if not on_win:
rc_content = re.sub(
r"^[ \t]*?(export PATH=[\'\"].*?%s\/bin:\$PATH[\'\"])"
r"" % basename(conda_prefix),
r"# \1 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
rc_content = re.sub(
r"^[ \t]*[^#\n]?[ \t]*((?:source|\.) .*etc\/profile\.d\/conda\.sh.*?)\n"
r"(conda activate.*?)$",
r"# \1 # commented out by conda initialize\n# \2 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
rc_content = re.sub(
r"^[ \t]*[^#\n]?[ \t]*((?:source|\.) .*etc\/profile\.d\/conda\.sh.*?)$",
r"# \1 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
if on_win:
rc_content = re.sub(
r"^[ \t]*^[ \t]*[^#\n]?[ \t]*((?:source|\.) .*Scripts[/\\]activate.*?)$",
r"# \1 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
else:
rc_content = re.sub(
r"^[ \t]*^[ \t]*[^#\n]?[ \t]*((?:source|\.) .*bin/activate.*?)$",
r"# \1 # commented out by conda initialize",
rc_content,
flags=re.MULTILINE,
)
replace_str = "__CONDA_REPLACE_ME_123__"
rc_content = re.sub(
CONDA_INITIALIZE_RE_BLOCK,
replace_str,
rc_content,
flags=re.MULTILINE,
)
# TODO: maybe remove all but last of replace_str, if there's more than one occurrence
rc_content = rc_content.replace(replace_str, conda_initialize_content)
if "# >>> conda initialize >>>" not in rc_content:
rc_content += '\n%s\n' % conda_initialize_content
if rc_content != rc_original_content:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(rc_original_content, rc_content))
if not context.dry_run:
with open(user_rc_path, 'w') as fh:
fh.write(rc_content)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def init_sh_system(target_path, conda_prefix):
# target_path: '/etc/profile.d/conda.sh'
conda_sh_system_path = target_path
if exists(conda_sh_system_path):
with open(conda_sh_system_path) as fh:
conda_sh_system_contents = fh.read()
else:
conda_sh_system_contents = ""
conda_sh_contents = _bashrc_content(conda_prefix, 'posix')
if conda_sh_system_contents != conda_sh_contents:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(conda_sh_contents, conda_sh_system_contents))
if not context.dry_run:
if lexists(conda_sh_system_path):
rm_rf(conda_sh_system_path)
mkdir_p(dirname(conda_sh_system_path))
with open(conda_sh_system_path, 'w') as fh:
fh.write(conda_sh_contents)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def _read_windows_registry(target_path): # pragma: no cover
# HKEY_LOCAL_MACHINE\Software\Microsoft\Command Processor\AutoRun
# HKEY_CURRENT_USER\Software\Microsoft\Command Processor\AutoRun
# returns value_value, value_type -or- None, None if target does not exist
main_key, the_rest = target_path.split('\\', 1)
subkey_str, value_name = the_rest.rsplit('\\', 1)
main_key = getattr(winreg, main_key)
try:
key = winreg.OpenKey(main_key, subkey_str, 0, winreg.KEY_READ)
except EnvironmentError as e:
if e.errno != ENOENT:
raise
return None, None
try:
value_tuple = winreg.QueryValueEx(key, value_name)
value_value = value_tuple[0]
if isinstance(value_value, str):
value_value = value_value.strip()
value_type = value_tuple[1]
return value_value, value_type
except Exception:
# [WinError 2] The system cannot find the file specified
winreg.CloseKey(key)
return None, None
finally:
winreg.CloseKey(key)
def _write_windows_registry(target_path, value_value, value_type): # pragma: no cover
main_key, the_rest = target_path.split('\\', 1)
subkey_str, value_name = the_rest.rsplit('\\', 1)
main_key = getattr(winreg, main_key)
try:
key = winreg.OpenKey(main_key, subkey_str, 0, winreg.KEY_WRITE)
except EnvironmentError as e:
if e.errno != ENOENT:
raise
key = winreg.CreateKey(main_key, subkey_str)
try:
winreg.SetValueEx(key, value_name, 0, value_type, value_value)
finally:
winreg.CloseKey(key)
def init_cmd_exe_registry(target_path, conda_prefix):
# HKEY_LOCAL_MACHINE\Software\Microsoft\Command Processor\AutoRun
# HKEY_CURRENT_USER\Software\Microsoft\Command Processor\AutoRun
prev_value, value_type = _read_windows_registry(target_path)
if prev_value is None:
prev_value = ""
value_type = winreg.REG_EXPAND_SZ
hook_path = '"%s"' % join(conda_prefix, 'condabin', 'conda_hook.bat')
replace_str = "__CONDA_REPLACE_ME_123__"
new_value = re.sub(
r'(\"[^\"]*?conda[-_]hook\.bat\")',
replace_str,
prev_value,
count=1,
flags=re.IGNORECASE | re.UNICODE,
)
new_value = new_value.replace(replace_str, hook_path)
if hook_path not in new_value:
if new_value:
new_value += ' & ' + hook_path
else:
new_value = hook_path
if prev_value != new_value:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(prev_value, new_value))
if not context.dry_run:
_write_windows_registry(target_path, new_value, value_type)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def init_long_path(target_path):
win_ver, _, win_rev = context.os_distribution_name_version[1].split('.')
# win10, build 14352 was the first preview release that supported this
if int(win_ver) >= 10 and int(win_rev) >= 14352:
prev_value, value_type = _read_windows_registry(target_path)
if str(prev_value) != "1":
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(str(prev_value), '1'))
if not context.dry_run:
_write_windows_registry(target_path, 1, winreg.REG_DWORD)
return Result.MODIFIED
else:
return Result.NO_CHANGE
else:
if context.verbosity:
print('\n')
print('Not setting long path registry key; Windows version must be at least 10 with '
'the fall 2016 "Anniversary update" or newer.')
return Result.NO_CHANGE
def _powershell_profile_content(conda_prefix):
if on_win:
conda_exe = join(conda_prefix, 'Scripts', 'conda.exe')
else:
conda_exe = join(conda_prefix, 'bin', 'conda')
conda_powershell_module = dals("""
#region conda initialize
# !! Contents within this block are managed by 'conda init' !!
(& {conda_exe} shell.powershell hook) | Out-String | Invoke-Expression
#endregion
""".format(conda_exe=conda_exe))
return conda_powershell_module
def init_powershell_user(target_path, conda_prefix):
# target_path: $PROFILE
profile_path = target_path
# NB: the user may not have created a profile. We need to check
# if the file exists first.
if os.path.exists(profile_path):
with open(profile_path) as fp:
profile_content = fp.read()
else:
profile_content = ""
profile_original_content = profile_content
# Find what content we need to add.
conda_initialize_content = _powershell_profile_content(conda_prefix)
# TODO: comment out old ipmos and Import-Modules.
if "#region conda initialize" not in profile_content:
profile_content += "\n{}\n".format(conda_initialize_content)
else:
re.sub(
r"\#region conda initialize.*\#endregion",
"__CONDA_REPLACE_ME_123__",
profile_content,
count=1,
flags=re.DOTALL | re.MULTILINE
).replace(
"__CONDA_REPLACE_ME_123__",
conda_initialize_content
)
if profile_content != profile_original_content:
if context.verbosity:
print('\n')
print(target_path)
print(make_diff(profile_original_content, profile_content))
if not context.dry_run:
# Make the directory if needed.
if not exists(dirname(profile_path)):
mkdir_p(dirname(profile_path))
with open(profile_path, 'w') as fp:
fp.write(profile_content)
return Result.MODIFIED
else:
return Result.NO_CHANGE
def remove_conda_in_sp_dir(target_path):
# target_path: site_packages_dir
modified = False
site_packages_dir = target_path
rm_rf_these = chain.from_iterable((
glob(join(site_packages_dir, "conda-*info")),
glob(join(site_packages_dir, "conda.*")),
glob(join(site_packages_dir, "conda-*.egg")),
))
rm_rf_these = (p for p in rm_rf_these if not p.endswith('conda.egg-link'))
for fn in rm_rf_these:
print("rm -rf %s" % join(site_packages_dir, fn), file=sys.stderr)
if not context.dry_run:
rm_rf(join(site_packages_dir, fn))
modified = True
others = (
"conda",
"conda_env",
)
for other in others:
path = join(site_packages_dir, other)
if lexists(path):
print("rm -rf %s" % path, file=sys.stderr)
if not context.dry_run:
rm_rf(path)
modified = True
if modified:
return Result.MODIFIED
else:
return Result.NO_CHANGE
def make_conda_egg_link(target_path, conda_source_root):
# target_path: join(site_packages_dir, 'conda.egg-link')
conda_egg_link_contents = conda_source_root + os.linesep
if isfile(target_path):
with open(target_path) as fh:
conda_egg_link_contents_old = fh.read()
else:
conda_egg_link_contents_old = ""
if conda_egg_link_contents_old != conda_egg_link_contents:
if context.verbosity:
print('\n', file=sys.stderr)
print(target_path, file=sys.stderr)
print(make_diff(conda_egg_link_contents_old, conda_egg_link_contents), file=sys.stderr)
if not context.dry_run:
with open(target_path, 'w') as fh:
fh.write(ensure_fs_path_encoding(conda_egg_link_contents))
return Result.MODIFIED
else:
return Result.NO_CHANGE
def modify_easy_install_pth(target_path, conda_source_root):
# target_path: join(site_packages_dir, 'easy-install.pth')
easy_install_new_line = conda_source_root
if isfile(target_path):
with open(target_path) as fh:
old_contents = fh.read()
else:
old_contents = ""
old_contents_lines = old_contents.splitlines()
if easy_install_new_line in old_contents_lines:
return Result.NO_CHANGE
ln_end = os.sep + "conda"
old_contents_lines = tuple(ln for ln in old_contents_lines if not ln.endswith(ln_end))
new_contents = easy_install_new_line + '\n' + '\n'.join(old_contents_lines) + '\n'
if context.verbosity:
print('\n', file=sys.stderr)
print(target_path, file=sys.stderr)
print(make_diff(old_contents, new_contents), file=sys.stderr)
if not context.dry_run:
with open(target_path, 'w') as fh:
fh.write(ensure_fs_path_encoding(new_contents))
return Result.MODIFIED
def make_dev_egg_info_file(target_path):
# target_path: join(conda_source_root, 'conda.egg-info')
if isfile(target_path):
with open(target_path) as fh:
old_contents = fh.read()
else:
old_contents = ""
new_contents = dals("""
Metadata-Version: 1.1
Name: conda
Version: %s
Platform: UNKNOWN
Summary: OS-agnostic, system-level binary package manager.
""") % CONDA_VERSION
if old_contents == new_contents:
return Result.NO_CHANGE
if context.verbosity:
print('\n', file=sys.stderr)
print(target_path, file=sys.stderr)
print(make_diff(old_contents, new_contents), file=sys.stderr)
if not context.dry_run:
if lexists(target_path):
rm_rf(target_path)
with open(target_path, 'w') as fh:
fh.write(new_contents)
return Result.MODIFIED
# #####################################################
# helper functions
# #####################################################
def make_diff(old, new):
return '\n'.join(unified_diff(old.splitlines(), new.splitlines()))
def _get_python_info(prefix):
python_exe = join(prefix, get_python_short_path())
result = subprocess_call("%s --version" % python_exe)
stdout, stderr = result.stdout.strip(), result.stderr.strip()
if stderr:
python_version = stderr.split()[1]
elif stdout: # pragma: no cover
python_version = stdout.split()[1]
else: # pragma: no cover
raise ValueError("No python version information available.")
site_packages_dir = join(prefix,
win_path_ok(get_python_site_packages_short_path(python_version)))
return python_exe, python_version, site_packages_dir
if __name__ == "__main__":
if on_win:
temp_path = sys.argv[1]
run_plan_from_temp_file(temp_path)
else:
run_plan_from_stdin()
|
the-stack_106_25997 | import gym
from torch import nn as nn
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.policies.argmax import ArgmaxDiscretePolicy
from rlkit.torch.policies.softmax_policy import SoftmaxPolicy
from rlkit.torch.networks import Mlp
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.torch_rl_algorithm import TorchOnlineRLAlgorithm
from log_path import get_traffic_path_information
def experiment(variant):
import sys
from traffic.make_env import make_env
expl_env = make_env(args.exp_name,**variant['env_kwargs'])
eval_env = make_env(args.exp_name,**variant['env_kwargs'])
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.n
from graph_builder_multi import MultiTrafficGraphBuilder
gb = MultiTrafficGraphBuilder(input_dim=4, node_num=expl_env.max_veh_num+1,
ego_init=torch.tensor([0.,1.]),
other_init=torch.tensor([1.,0.]),
)
from gnn_net import GNNNet
gnn = GNNNet(
pre_graph_builder = gb,
node_dim = 16,
num_conv_layers=3)
from layers import SelectLayer
encoders = []
encoders.append(nn.Sequential(gnn,SelectLayer(1,0),nn.ReLU()))
sup_learners = []
for i in range(expl_env.max_veh_num):
sup_learner = nn.Sequential(
gnn,
SelectLayer(1,i+1),
nn.ReLU(),
nn.Linear(16, 2),
)
sup_learner = SoftmaxPolicy(sup_learner, learn_temperature=False)
sup_learners.append(sup_learner)
encoders.append(sup_learner)
decoder = Mlp(input_size=int(16+2*expl_env.max_veh_num),
output_size=action_dim,
hidden_sizes=[],
)
from layers import ConcatLayer
need_gradients = np.array([True]*len(encoders))
if variant['no_gradient']:
need_gradients[1:] = False
policy = nn.Sequential(
ConcatLayer(encoders, need_gradients=list(need_gradients), dim=1),
decoder,
)
policy = SoftmaxPolicy(policy, learn_temperature=False)
vf = Mlp(
hidden_sizes=[32, 32],
input_size=obs_dim,
output_size=1,
)
vf_criterion = nn.MSELoss()
eval_policy = ArgmaxDiscretePolicy(policy,use_preactivation=True)
expl_policy = policy
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
expl_policy,
)
from sup_replay_buffer import SupReplayBuffer
replay_buffer = SupReplayBuffer(
observation_dim = obs_dim,
label_dims = [1]*expl_env.max_veh_num,
max_replay_buffer_size = int(1e6),
)
from rlkit.torch.vpg.ppo_sup import PPOSupTrainer
trainer = PPOSupTrainer(
policy=policy,
value_function=vf,
vf_criterion=vf_criterion,
sup_learners=sup_learners,
replay_buffer=replay_buffer,
**variant['trainer_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
log_path_function = get_traffic_path_information,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='t_intersection_multi')
parser.add_argument('--nob', action='store_true', default=False)
parser.add_argument('--yld', type=float, default=0.5)
parser.add_argument('--ds', type=float, default=0.1)
parser.add_argument('--log_dir', type=str, default='PPOSupGNN')
parser.add_argument('--ng', action='store_true', default=False) # no shared gradient
parser.add_argument('--eb', type=float, default=None)
parser.add_argument('--lr', type=float, default=None)
parser.add_argument('--bs', type=int, default=None)
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--snapshot_mode', type=str, default="gap_and_last")
parser.add_argument('--snapshot_gap', type=int, default=500)
args = parser.parse_args()
import os.path as osp
pre_dir = './Data/'+args.exp_name+'yld'+str(args.yld)+'ds'+str(args.ds)+'full'
main_dir = args.log_dir\
+('ng' if args.ng else '')\
+(('eb'+str(args.eb)) if args.eb else '')\
+(('lr'+str(args.lr)) if args.lr else '')\
+(('bs'+str(args.bs)) if args.bs else '')
log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))
max_path_length = 200
# noinspection PyTypeChecker
variant = dict(
no_gradient=args.ng,
env_kwargs=dict(
observe_mode='full',
yld=args.yld,
driver_sigma=args.ds,
),
algorithm_kwargs=dict(
num_epochs=(args.epoch if args.epoch else 1000),
num_eval_steps_per_epoch=1000,
num_train_loops_per_epoch=1,
num_trains_per_train_loop=1,
num_expl_steps_per_train_loop=(args.bs if args.bs else 1000),
max_path_length=max_path_length,
save_best=True,
),
trainer_kwargs=dict(
discount=0.99,
max_path_length=max_path_length,
policy_lr=(args.lr if args.lr else 1e-4),
vf_lr=(args.lr if args.lr else 1e-3),
exploration_bonus=(args.eb if args.eb else 0.),
),
vf_kwargs=dict(
hidden_sizes=[64],
),
)
import os
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
with open(osp.join(log_dir,'variant.json'),'w') as out_json:
import json
json.dump(variant,out_json,indent=2)
import sys
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
setup_logger(args.exp_name+'/'+main_dir, variant=variant,
snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,
log_dir=log_dir)
import numpy as np
import torch
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
|
the-stack_106_25999 | #
# Created on Wed Sep 08 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 Maatuq
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from typing import List
class Solution:
def __init__(self):
self.ans = float("inf")
def splitArray(self, nums: List[int], m: int) -> int:
def dfs(i, subarrays_cnt, cur_sum, max_sum):
if i == n and subarrays_cnt == m:
self.ans = min(self.ans, max_sum)
return
if i == n:
return
if i > 0:
dfs(
i + 1,
subarrays_cnt,
cur_sum + nums[i],
max(max_sum, cur_sum + nums[i]),
)
if subarrays_cnt < m:
dfs(i + 1, subarrays_cnt + 1, nums[i], max(max_sum, nums[i]))
n = len(nums)
dfs(0, 0, 0, 0)
return self.ans
class Solution_2:
def splitArray(self, nums: List[int], m: int) -> int:
left, right = 0, 0
for e in nums:
right += e
if left < e:
left = e
ans = right
while left <= right:
mid = (left + right) // 2
a_sum = 0
cnt = 1
for e in nums:
if a_sum + e > mid:
cnt += 1
a_sum = e
else:
a_sum += e
if cnt <= m:
ans = min(ans, mid)
right = mid - 1
else:
left = mid + 1
return ans
S = Solution_2()
nums = [7, 2, 5, 10, 8]
m = 2
print(S.splitArray(nums, m))
|
the-stack_106_26000 |
# Copyright 2009 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unittests for the Seq objects."""
import warnings
import unittest
import sys
from Bio import BiopythonWarning
from Bio import SeqIO
from Bio.Alphabet import generic_protein, generic_nucleotide
from Bio.Alphabet import generic_dna, generic_rna
from Bio.Alphabet.IUPAC import protein, extended_protein
from Bio.Alphabet.IUPAC import unambiguous_dna, ambiguous_dna, ambiguous_rna
from Bio.Data.IUPACData import ambiguous_dna_values, ambiguous_rna_values
from Bio.Seq import Seq, UnknownSeq, MutableSeq, translate
from Bio.Data.CodonTable import TranslationError, CodonTable
if sys.version_info[0] < 3:
from string import maketrans
else:
maketrans = str.maketrans
# This is just the standard table with less stop codons
# (replaced with coding for O as an artificial example)
special_table = CodonTable(forward_table={
"TTT": "F", "TTC": "F", "TTA": "L", "TTG": "L",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S",
"TAT": "Y", "TAC": "Y", "TAA": "O",
"TGT": "C", "TGC": "C", "TGA": "O", "TGG": "W",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAT": "H", "CAC": "H", "CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I", "ATG": "M",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N", "AAA": "K", "AAG": "K",
"AGT": "S", "AGC": "S", "AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D", "GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G"},
start_codons=["TAA", "TAG", "TGA"],
stop_codons=["TAG"])
Chilodonella_uncinata_table = CodonTable(forward_table={
"TTT": "F", "TTC": "F", "TTA": "L", "TTG": "L",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S",
"TAT": "Y", "TAC": "Y", "TAG": "Q", # noqa: E241
"TGT": "C", "TGC": "C", "TGA": "W", "TGG": "W",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAT": "H", "CAC": "H", "CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I", "ATG": "M",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N", "AAA": "K", "AAG": "K",
"AGT": "S", "AGC": "S", "AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D", "GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G"},
start_codons=["ATG"],
stop_codons=["TAA"])
class StringMethodTests(unittest.TestCase):
_examples = [
# These are length 9, a multiple of 3 for translation tests:
Seq("ACGTGGGGT", generic_protein),
Seq("ACGTGGGGT", generic_nucleotide),
Seq("ACGTGGGGT", generic_dna),
Seq("ACGUGGGGU", generic_rna),
Seq("GG", generic_protein),
Seq("GG", generic_nucleotide),
Seq("GG", generic_dna),
Seq("GG", generic_rna),
Seq("A", generic_protein),
Seq("A", generic_nucleotide),
Seq("A", generic_dna),
Seq("A", generic_rna),
UnknownSeq(1),
UnknownSeq(1, character="n"),
UnknownSeq(1, generic_rna),
UnknownSeq(1, generic_rna, "n"),
UnknownSeq(1, generic_rna, "N"),
UnknownSeq(12, generic_rna, "N"),
UnknownSeq(12, generic_dna, "N"),
UnknownSeq(12, generic_nucleotide, "N"),
UnknownSeq(12, generic_protein, "X"),
UnknownSeq(12, character="X"),
UnknownSeq(12),
]
for seq in _examples[:]:
if isinstance(seq, Seq):
_examples.append(seq.tomutable())
_start_end_values = [0, 1, 2, 1000, -1, -2, -999, None]
def _test_method(self, method_name, pre_comp_function=None,
start_end=False):
"""Check this method matches the plain string's method."""
if pre_comp_function is None:
# Define a no-op function:
def pre_comp_function(x):
return x
self.assertTrue(isinstance(method_name, str))
for example1 in self._examples:
if not hasattr(example1, method_name):
# e.g. MutableSeq does not support find
continue
str1 = str(example1)
for example2 in self._examples:
if not hasattr(example2, method_name):
# e.g. MutableSeq does not support find
continue
if method_name in ("index", "rindex") and isinstance(example1, MutableSeq) and len(example2) > 1:
# MutableSeq index only supports single entries
continue
str2 = str(example2)
try:
i = pre_comp_function(getattr(example1, method_name)(str2))
except ValueError:
i = ValueError
try:
j = pre_comp_function(getattr(str1, method_name)(str2))
except ValueError:
j = ValueError
if i != j:
raise ValueError("%s.%s(%s) = %r, not %r"
% (repr(example1),
method_name,
repr(str2),
i,
j))
try:
try:
i = pre_comp_function(getattr(example1, method_name)(example2))
except ValueError:
i = ValueError
try:
j = pre_comp_function(getattr(str1, method_name)(str2))
except ValueError:
j = ValueError
if i != j:
raise ValueError("%s.%s(%s) = %r, not %r"
% (repr(example1),
method_name,
repr(example2),
i,
j))
except TypeError:
# TODO - Check the alphabets do clash!
pass
if start_end:
if isinstance(example1, MutableSeq):
# Does not support start/end arguments
continue
for start in self._start_end_values:
try:
i = pre_comp_function(getattr(example1, method_name)(str2, start))
except ValueError:
i = ValueError
try:
j = pre_comp_function(getattr(str1, method_name)(str2, start))
except ValueError:
j = ValueError
if i != j:
raise ValueError("%s.%s(%s, %i) = %r, not %r"
% (repr(example1),
method_name,
repr(str2),
start,
i,
j))
for end in self._start_end_values:
try:
i = pre_comp_function(getattr(example1, method_name)(str2, start, end))
except ValueError:
i = ValueError
try:
j = pre_comp_function(getattr(str1, method_name)(str2, start, end))
except ValueError:
j = ValueError
if i != j:
raise ValueError("%s.%s(%s, %i, %i) = %r, not %r"
% (repr(example1),
method_name,
repr(str2),
start,
end,
i,
j))
def test_str_count(self):
"""Check matches the python string count method."""
self._test_method("count", start_end=True)
def test_str_count_overlap_GG(self):
"""Check our count_overlap method using GG."""
# Testing with self._examples
expected = [3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, # Seq() Tests
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # UnknownSeq() Tests
expected *= 2 # MutableSeq() Tests
assert len(self._examples) == len(expected)
for seq, exp in zip(self._examples, expected):
# Using search term GG as a string
self.assertEqual(seq.count_overlap("GG"), exp)
self.assertEqual(seq.count_overlap("G" * 5), 0)
# Using search term GG as a Seq with generic alphabet
self.assertEqual(seq.count_overlap(Seq("GG")), exp)
self.assertEqual(seq.count_overlap(Seq("G" * 5)), 0)
def test_count_overlap_start_end_GG(self):
"""Check our count_overlap method using GG with variable ends and starts."""
# Testing Seq() and MutableSeq() with variable start and end arguments
start_end_exp = [(1, 7, 3),
(3, None, 3),
(3, 6, 2),
(4, 6, 1),
(4, -1, 2),
(-5, None, 2),
(-5, 7, 2),
(7, -5, 0),
(-100, None, 3),
(None, 100, 3),
(-100, 1000, 3)]
testing_seq = "GTAGGGGAG"
for start, end, exp in start_end_exp:
self.assertEqual(Seq(testing_seq).count_overlap("GG", start, end), exp)
self.assertEqual(MutableSeq(testing_seq).count_overlap("GG", start, end), exp)
# Testing Seq() and MutableSeq() with a more heterogeneous sequenece
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("GG"), 5)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("GG"), 5)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("GG", 2, 8), 1)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("GG", 2, 8), 1)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("GG", -11, 6), 3)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("GG", -11, 6), 3)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("GG", 7, 2), 0)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("GG", 7, 2), 0)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("GG", -2, -10), 0)
# Testing UnknownSeq() with variable start and end arguments
alphabet_char_start_end_exp = [(generic_rna, "N", 1, 7, 0),
(generic_dna, "N", 1, 7, 0),
(generic_rna, "N", -4, None, 0),
(generic_dna, "N", -4, None, 0),
(generic_protein, "X", 1, 7, 0)]
for alpha, char, start, end, exp in alphabet_char_start_end_exp:
self.assertEqual(UnknownSeq(12, alpha, char).count_overlap("GG", start, end), exp)
self.assertEqual(UnknownSeq(12, character="X").count_overlap("GG", 1, 7), 0)
# Testing UnknownSeq() with some more cases including unusual edge cases
substr_start_end_exp = [("G", 100, 105, 0),
("G", -1, 4, 0),
("G", 4, -1, 0),
("G", -8, -2, 0),
("G", -2, -8, 0),
("G", 8, 2, 0),
("G", 2, 8, 0),
("GG", 8, 2, 0),
("GG", 2, 8, 0),
("GG", -5, -1, 0),
("GG", 1, 5, 0),
("GGG", None, None, 0),
("GGGGGGGGG", None, None, 0),
("GGG", 1, 2, 0)]
for substr, start, end, exp in substr_start_end_exp:
self.assertEqual(UnknownSeq(7, character="N").count_overlap(substr, start, end), exp)
self.assertEqual(UnknownSeq(7, character="N").count_overlap("GG", 1), 0)
def test_str_count_overlap_NN(self):
"""Check our count_overlap method using NN."""
# Testing with self._examples
expected = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # Seq() Tests
0, 0, 0, 0, 0, 11, 11, 11, 0, 0, 0] # UnknownSeq() Tests
expected *= 2 # MutableSeq() Tests
assert len(self._examples) == len(expected)
for seq, exp in zip(self._examples, expected):
# Using search term NN as a string
self.assertEqual(seq.count_overlap("NN"), exp)
self.assertEqual(seq.count_overlap("N" * 13), 0)
# Using search term NN as a Seq with generic alphabet
self.assertEqual(seq.count_overlap(Seq("NN")), exp)
self.assertEqual(seq.count_overlap(Seq("N" * 13)), 0)
def test_count_overlap_start_end_NN(self):
"""Check our count_overlap method using NN with variable ends and starts."""
# Testing Seq() and MutableSeq() with variable start and end arguments
start_end_exp = [(1, 7, 0),
(3, None, 0),
(3, 6, 0),
(4, 6, 0),
(4, -1, 0),
(-5, None, 0),
(-5, 7, 0),
(7, -5, 0),
(-100, None, 0),
(None, 100, 0),
(-100, 1000, 0)]
testing_seq = "GTAGGGGAG"
for start, end, exp in start_end_exp:
self.assertEqual(Seq(testing_seq).count_overlap("NN", start, end), exp)
self.assertEqual(MutableSeq(testing_seq).count_overlap("NN", start, end), exp)
# Testing Seq() and MutableSeq() with a more heterogeneous sequenece
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("NN"), 0)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("NN"), 0)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("NN", 2, 8), 0)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("NN", 2, 8), 0)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("NN", -11, 6), 0)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("NN", -11, 6), 0)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("NN", 7, 2), 0)
self.assertEqual(MutableSeq("GGGTGGTAGGG").count_overlap("NN", 7, 2), 0)
self.assertEqual(Seq("GGGTGGTAGGG").count_overlap("NN", -10, -2), 0)
# Testing UnknownSeq() with variable start and end arguments
alphabet_char_start_end_exp = [(generic_rna, "N", 1, 7, 5),
(generic_dna, "N", 1, 7, 5),
(generic_rna, "N", -4, None, 3),
(generic_dna, "N", -4, None, 3),
(generic_protein, "X", 1, 7, 0)]
for alpha, char, start, end, exp in alphabet_char_start_end_exp:
self.assertEqual(UnknownSeq(12, alpha, char).count_overlap("NN", start, end), exp)
self.assertEqual(UnknownSeq(12, character="X").count_overlap("NN", 1, 7), 0)
# Testing UnknownSeq() with some more cases including unusual edge cases
substr_start_end_exp = [("N", 100, 105, 0),
("N", -1, 4, 0),
("N", 4, -1, 2),
("N", -8, -2, 5),
("N", -2, -8, 0),
("N", 8, 2, 0),
("N", 2, 8, 5),
("NN", 8, 2, 0),
("NN", 2, 8, 4),
("NN", -5, -1, 3),
("NN", 1, 5, 3),
("NNN", None, None, 5),
("NNNNNNNNN", None, None, 0),
("NNN", 1, 2, 0)]
for substr, start, end, exp in substr_start_end_exp:
self.assertEqual(UnknownSeq(7, character="N").count_overlap(substr, start, end), exp)
self.assertEqual(UnknownSeq(7, character="N").count_overlap("NN", 1), 5)
def test_str_find(self):
"""Check matches the python string find method."""
self._test_method("find", start_end=True)
def test_str_rfind(self):
"""Check matches the python string rfind method."""
self._test_method("rfind", start_end=True)
def test_str_index(self):
"""Check matches the python string index method."""
self._test_method("index", start_end=True)
def test_str_rindex(self):
"""Check matches the python string rindex method."""
self._test_method("rindex", start_end=True)
def test_str_startswith(self):
"""Check matches the python string startswith method."""
self._test_method("startswith", start_end=True)
self.assertTrue("ABCDE".startswith(("ABE", "OBE", "ABC")))
# Now check with a tuple of sub sequences
for example1 in self._examples:
if not hasattr(example1, "startswith"):
# e.g. MutableSeq does not support this
continue
subs = tuple(example1[start:start + 2] for start
in range(0, len(example1) - 2, 3))
subs_str = tuple(str(s) for s in subs)
self.assertEqual(str(example1).startswith(subs_str),
example1.startswith(subs))
self.assertEqual(str(example1).startswith(subs_str),
example1.startswith(subs_str)) # strings!
self.assertEqual(str(example1).startswith(subs_str, 3),
example1.startswith(subs, 3))
self.assertEqual(str(example1).startswith(subs_str, 2, 6),
example1.startswith(subs, 2, 6))
def test_str_endswith(self):
"""Check matches the python string endswith method."""
self._test_method("endswith", start_end=True)
self.assertTrue("ABCDE".endswith(("ABE", "OBE", "CDE")))
# Now check with a tuple of sub sequences
for example1 in self._examples:
if not hasattr(example1, "endswith"):
# e.g. MutableSeq does not support this
continue
subs = tuple(example1[start:start + 2] for start
in range(0, len(example1) - 2, 3))
subs_str = tuple(str(s) for s in subs)
self.assertEqual(str(example1).endswith(subs_str),
example1.endswith(subs))
self.assertEqual(str(example1).startswith(subs_str),
example1.startswith(subs_str)) # strings!
self.assertEqual(str(example1).endswith(subs_str, 3),
example1.endswith(subs, 3))
self.assertEqual(str(example1).endswith(subs_str, 2, 6),
example1.endswith(subs, 2, 6))
def test_str_strip(self):
"""Check matches the python string strip method."""
self._test_method("strip", pre_comp_function=str)
def test_str_rstrip(self):
"""Check matches the python string rstrip method."""
self._test_method("rstrip", pre_comp_function=str)
def test_str_split(self):
"""Check matches the python string rstrip method."""
# Calling (r)split should return a list of Seq-like objects, we'll
# just apply str() to each of them so it matches the string method
self._test_method("rstrip",
pre_comp_function=lambda x: [str(y) for y in x]) # noqa: E731
def test_str_rsplit(self):
"""Check matches the python string rstrip method."""
# Calling (r)split should return a list of Seq-like objects, we'll
# just apply str() to each of them so it matches the string method
self._test_method("rstrip",
pre_comp_function=lambda x: [str(y) for y in x]) # noqa: E731
def test_str_lsplit(self):
"""Check matches the python string rstrip method."""
# Calling (r)split should return a list of Seq-like objects, we'll
# just apply str() to each of them so it matches the string method
self._test_method("rstrip",
pre_comp_function=lambda x: [str(y) for y in x]) # noqa: E731
def test_str_length(self):
"""Check matches the python string __len__ method."""
for example1 in self._examples:
str1 = str(example1)
self.assertEqual(len(example1), len(str1))
def test_str_upper(self):
"""Check matches the python string upper method."""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
str1 = str(example1)
self.assertEqual(str(example1.upper()), str1.upper())
def test_str_lower(self):
"""Check matches the python string lower method."""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
str1 = str(example1)
self.assertEqual(str(example1.lower()), str1.lower())
def test_str_encode(self):
"""Check matches the python string encode method."""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
str1 = str(example1)
self.assertEqual(example1.encode("ascii"), str1.encode("ascii"))
self.assertEqual(example1.encode(), str1.encode())
def test_str_hash(self):
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
with warnings.catch_warnings():
# Silence change in behaviour warning
warnings.simplefilter("ignore", BiopythonWarning)
self.assertEqual(hash(str(example1)), hash(example1),
"Hash mismatch, %r for %r vs %r for %r"
% (hash(str(example1)), id(example1),
hash(example1), example1))
def test_str_comparison(self):
for example1 in self._examples:
for example2 in self._examples:
with warnings.catch_warnings():
# Silence alphabet warning
warnings.simplefilter("ignore", BiopythonWarning)
self.assertEqual(str(example1) == str(example2),
example1 == example2,
"Checking %r == %r" % (example1, example2))
self.assertEqual(str(example1) != str(example2),
example1 != example2,
"Checking %r != %r" % (example1, example2))
self.assertEqual(str(example1) < str(example2),
example1 < example2,
"Checking %r < %r" % (example1, example2))
self.assertEqual(str(example1) <= str(example2),
example1 <= example2,
"Checking %r <= %r" % (example1, example2))
self.assertEqual(str(example1) > str(example2),
example1 > example2,
"Checking %r > %r" % (example1, example2))
self.assertEqual(str(example1) >= str(example2),
example1 >= example2,
"Checking %r >= %r" % (example1, example2))
def test_str_getitem(self):
"""Check slicing and indexing works like a string."""
for example1 in self._examples:
str1 = str(example1)
for i in self._start_end_values:
if i is not None and abs(i) < len(example1):
self.assertEqual(str(example1[i]), str1[i])
self.assertEqual(str(example1[:i]), str1[:i])
self.assertEqual(str(example1[i:]), str1[i:])
for j in self._start_end_values:
self.assertEqual(str(example1[i:j]), str1[i:j])
for step in range(-3, 4):
if step == 0:
try:
print(example1[i:j:step])
self._assert(False) # Should fail!
except ValueError:
pass
else:
self.assertEqual(str(example1[i:j:step]),
str1[i:j:step])
def test_tomutable(self):
"""Check obj.tomutable() method."""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
mut = example1.tomutable()
self.assertTrue(isinstance(mut, MutableSeq))
self.assertEqual(str(mut), str(example1))
self.assertEqual(mut.alphabet, example1.alphabet)
def test_toseq(self):
"""Check obj.toseq() method."""
for example1 in self._examples:
try:
seq = example1.toseq()
except AttributeError:
self.assertTrue(isinstance(example1, Seq))
continue
self.assertTrue(isinstance(seq, Seq))
self.assertEqual(str(seq), str(example1))
self.assertEqual(seq.alphabet, example1.alphabet)
def test_the_complement(self):
"""Check obj.complement() method."""
mapping = ""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
try:
comp = example1.complement()
except ValueError as e:
self.assertEqual(str(e), "Proteins do not have complements!")
continue
str1 = str(example1)
# This only does the unambiguous cases
if any(("U" in str1, "u" in str1, example1.alphabet == generic_rna)):
mapping = maketrans("ACGUacgu", "UGCAugca")
elif any(("T" in str1, "t" in str1, example1.alphabet == generic_dna,
example1.alphabet == generic_nucleotide)):
mapping = maketrans("ACGTacgt", "TGCAtgca")
elif "A" not in str1 and "a" not in str1:
mapping = maketrans("CGcg", "GCgc")
else:
# TODO - look at alphabet?
raise ValueError(example1)
self.assertEqual(str1.translate(mapping), str(comp))
self.assertEqual(comp.alphabet, example1.alphabet)
def test_the_reverse_complement(self):
"""Check obj.reverse_complement() method."""
mapping = ""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
try:
comp = example1.reverse_complement()
except ValueError as e:
self.assertEqual(str(e), "Proteins do not have complements!")
continue
str1 = str(example1)
# This only does the unambiguous cases
if any(("U" in str1, "u" in str1, example1.alphabet == generic_rna)):
mapping = maketrans("ACGUacgu", "UGCAugca")
elif any(("T" in str1, "t" in str1, example1.alphabet == generic_dna,
example1.alphabet == generic_nucleotide)):
mapping = maketrans("ACGTacgt", "TGCAtgca")
elif "A" not in str1 and "a" not in str1:
mapping = maketrans("CGcg", "GCgc")
else:
# TODO - look at alphabet?
continue
self.assertEqual(str1.translate(mapping)[::-1], str(comp))
self.assertEqual(comp.alphabet, example1.alphabet)
def test_the_transcription(self):
"""Check obj.transcribe() method."""
mapping = ""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
try:
tran = example1.transcribe()
except ValueError as e:
if str(e) == "Proteins cannot be transcribed!":
continue
if str(e) == "RNA cannot be transcribed!":
continue
raise e
str1 = str(example1)
if len(str1) % 3 != 0:
# TODO - Check for or silence the expected warning?
continue
self.assertEqual(str1.replace("T", "U").replace("t", "u"), str(tran))
self.assertEqual(tran.alphabet, generic_rna) # based on limited examples
def test_the_back_transcription(self):
"""Check obj.back_transcribe() method."""
mapping = ""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
try:
tran = example1.back_transcribe()
except ValueError as e:
if str(e) == "Proteins cannot be back transcribed!":
continue
if str(e) == "DNA cannot be back transcribed!":
continue
raise e
str1 = str(example1)
self.assertEqual(str1.replace("U", "T").replace("u", "t"), str(tran))
self.assertEqual(tran.alphabet, generic_dna) # based on limited examples
def test_the_translate(self):
"""Check obj.translate() method."""
mapping = ""
for example1 in self._examples:
if isinstance(example1, MutableSeq):
continue
if len(example1) % 3 != 0:
# TODO - Check for or silence the expected warning?
continue
try:
tran = example1.translate()
except ValueError as e:
if str(e) == "Proteins cannot be translated!":
continue
raise e
# This is based on the limited example not having stop codons:
if tran.alphabet not in [extended_protein, protein, generic_protein]:
print(tran.alphabet)
self.fail()
# TODO - check the actual translation, and all the optional args
def test_the_translation_of_stops(self):
"""Check obj.translate() method with stop codons."""
misc_stops = "TAATAGTGAAGAAGG"
for nuc in [Seq(misc_stops),
Seq(misc_stops, generic_nucleotide),
Seq(misc_stops, generic_dna),
Seq(misc_stops, unambiguous_dna)]:
self.assertEqual("***RR", str(nuc.translate()))
self.assertEqual("***RR", str(nuc.translate(1)))
self.assertEqual("***RR", str(nuc.translate("SGC0")))
self.assertEqual("**W**", str(nuc.translate(table=2)))
self.assertEqual("**WRR",
str(nuc.translate(table="Yeast Mitochondrial")))
self.assertEqual("**WSS", str(nuc.translate(table=5)))
self.assertEqual("**WSS", str(nuc.translate(table=9)))
self.assertEqual("**CRR", str(nuc.translate(table="Euplotid Nuclear")))
self.assertEqual("***RR", str(nuc.translate(table=11)))
self.assertEqual("***RR", str(nuc.translate(table="11")))
self.assertEqual("***RR", str(nuc.translate(table="Bacterial")))
self.assertEqual("**GRR", str(nuc.translate(table=25)))
self.assertEqual("", str(nuc.translate(to_stop=True)))
self.assertEqual("O*ORR", str(nuc.translate(table=special_table)))
self.assertEqual("*QWRR",
str(nuc.translate(table=Chilodonella_uncinata_table)))
# These test the Bio.Seq.translate() function - move these?:
self.assertEqual("*QWRR",
translate(str(nuc), table=Chilodonella_uncinata_table))
self.assertEqual("O*ORR", translate(str(nuc), table=special_table))
self.assertEqual("", translate(str(nuc), to_stop=True))
self.assertEqual("***RR", translate(str(nuc), table="Bacterial"))
self.assertEqual("***RR", translate(str(nuc), table="11"))
self.assertEqual("***RR", translate(str(nuc), table=11))
self.assertEqual("**W**", translate(str(nuc), table=2))
self.assertEqual(str(Seq("TAT").translate()), "Y")
self.assertEqual(str(Seq("TAR").translate()), "*")
self.assertEqual(str(Seq("TAN").translate()), "X")
self.assertEqual(str(Seq("NNN").translate()), "X")
self.assertEqual(str(Seq("TAt").translate()), "Y")
self.assertEqual(str(Seq("TaR").translate()), "*")
self.assertEqual(str(Seq("TaN").translate()), "X")
self.assertEqual(str(Seq("nnN").translate()), "X")
self.assertEqual(str(Seq("tat").translate()), "Y")
self.assertEqual(str(Seq("tar").translate()), "*")
self.assertEqual(str(Seq("tan").translate()), "X")
self.assertEqual(str(Seq("nnn").translate()), "X")
def test_the_translation_of_invalid_codons(self):
"""Check obj.translate() method with invalid codons."""
for codon in ["TA?", "N-N", "AC_", "Ac_"]:
for nuc in [Seq(codon),
Seq(codon, generic_nucleotide),
Seq(codon, generic_dna),
Seq(codon, unambiguous_dna)]:
try:
print(nuc.translate())
self.fail("Translating %s should fail" % codon)
except TranslationError:
pass
def test_the_translation_of_ambig_codons(self):
"""Check obj.translate() method with ambiguous codons."""
for letters, ambig_values in [(ambiguous_dna.letters, ambiguous_dna_values),
(ambiguous_rna.letters, ambiguous_rna_values)]:
ambig = set(letters)
for c1 in ambig:
for c2 in ambig:
for c3 in ambig:
values = {str(Seq(a + b + c).translate())
for a in ambig_values[c1]
for b in ambig_values[c2]
for c in ambig_values[c3]}
t = str(Seq(c1 + c2 + c3).translate())
if t == "*":
self.assertEqual(values, set("*"))
elif t == "X":
self.assertTrue(len(values) > 1,
"translate('%s') = '%s' not '%s'"
% (c1 + c2 + c3, t, ",".join(values)))
elif t == "Z":
self.assertEqual(values, set("EQ"))
elif t == "B":
self.assertEqual(values, set("DN"))
elif t == "J":
self.assertEqual(values, set("LI"))
else:
self.assertEqual(values, set(t))
# TODO - Use the Bio.Data.IUPACData module for the
# ambiguous protein mappings?
def test_init_typeerror(self):
"""Check Seq __init__ gives TypeError exceptions."""
# Only expect it to take strings and unicode - not Seq objects!
self.assertRaises(TypeError, Seq, (1066))
self.assertRaises(TypeError, Seq, (Seq("ACGT", generic_dna)))
def test_MutableSeq_init_typeerror(self):
"""Check MutableSeq __init__ gives TypeError exceptions."""
self.assertRaises(TypeError, MutableSeq, (Seq("A")))
self.assertRaises(TypeError, MutableSeq, (UnknownSeq(1)))
self.assertRaises(TypeError, MutableSeq, 1)
self.assertRaises(TypeError, MutableSeq, 1.0)
def test_join_Seq_ValueError(self):
"""Checks that a ValueError is thrown for all non-iterable types."""
# No iterable types which contain non-accepted types either.
spacer = Seq("NNNNN")
self.assertRaises(ValueError, spacer.join, 5)
self.assertRaises(ValueError, spacer.join, "ATG")
self.assertRaises(ValueError, spacer.join, Seq("ATG"))
self.assertRaises(ValueError, spacer.join, MutableSeq("ATG"))
self.assertRaises(ValueError, spacer.join, ["ATG", "ATG", 5, "ATG"])
def test_join_UnknownSeq_ValueError(self):
"""Checks that a ValueError is thrown for all non-iterable types."""
# No iterable types which contain non-accepted types either.
spacer = UnknownSeq(5, character="-")
self.assertRaises(ValueError, spacer.join, 5)
self.assertRaises(ValueError, spacer.join, "ATG")
self.assertRaises(ValueError, spacer.join, Seq("ATG"))
self.assertRaises(ValueError, spacer.join, MutableSeq("ATG"))
self.assertRaises(ValueError, spacer.join, ["ATG", "ATG", 5, "ATG"])
def test_join_MutableSeq_ValueError(self):
"""Checks that a ValueError is thrown for all non-iterable types."""
# No iterable types which contain non-accepted types either.
spacer = MutableSeq("MMMMM")
self.assertRaises(ValueError, spacer.join, 5)
self.assertRaises(ValueError, spacer.join, "ATG")
self.assertRaises(ValueError, spacer.join, Seq("ATG"))
self.assertRaises(ValueError, spacer.join, MutableSeq("ATG"))
self.assertRaises(ValueError, spacer.join, ["ATG", "ATG", 5, "ATG"])
def test_join_Seq_TypeError(self):
"""Checks that a TypeError is thrown for incompatible alphabets."""
spacer = Seq("NNNNN", generic_dna)
self.assertRaises(TypeError, spacer.join, [Seq("NNNNN", generic_rna), Seq("NNNNN", generic_rna)])
self.assertRaises(TypeError, spacer.join, [Seq("NNNNN", generic_protein), Seq("NNNNN", generic_protein)])
def test_join_UnknownSeq_TypeError(self):
"""Checks that a TypeError is thrown for incompatible alphabets."""
spacer = UnknownSeq(5, character="-", alphabet=generic_dna)
self.assertRaises(TypeError, spacer.join, [UnknownSeq(5, character="-", alphabet=generic_rna), UnknownSeq(5, character="-", alphabet=generic_rna)])
self.assertRaises(TypeError, spacer.join, [Seq("NNNNN", generic_protein), UnknownSeq(5, character="-", alphabet=generic_protein)])
def test_join_MutableSeq_TypeError(self):
"""Checks that a TypeError is thrown for incompatible alphabets."""
spacer = MutableSeq("NNNNN", generic_dna)
self.assertRaises(TypeError, spacer.join, [MutableSeq("NNNNN", generic_rna), MutableSeq("NNNNN", generic_rna)])
self.assertRaises(TypeError, spacer.join, [Seq("NNNNN", generic_protein), MutableSeq("NNNNN", generic_protein)])
def test_join_Seq(self):
"""Checks if Seq join correctly concatenates sequence with the spacer."""
# Only expect it to take Seq objects and/or strings in an iterable!
spacer1 = Seq("", generic_dna)
spacers = [spacer1, Seq("NNNNN", generic_dna), Seq("GGG", generic_nucleotide)]
example_strings = ["ATG", "ATG", "ATG", "ATG"]
example_strings_seqs = ["ATG", "ATG", Seq("ATG", generic_dna), "ATG"]
# strings with empty spacer
str_concatenated = spacer1.join(example_strings)
self.assertEqual(str(str_concatenated), "".join(example_strings))
self.assertEqual(str_concatenated.alphabet, spacer1.alphabet)
for spacer in spacers:
seq_concatenated = spacer.join(example_strings_seqs)
self.assertEqual(str(seq_concatenated), str(spacer).join(example_strings))
self.assertEqual(seq_concatenated.alphabet, spacer.alphabet)
def test_join_Seq_with_file(self):
"""Checks if Seq join correctly concatenates sequence from a file with the spacer."""
filename = "Fasta/f003"
seqlist = [record.seq for record in SeqIO.parse(filename, "fasta")]
seqlist_as_strings = [str(_) for _ in seqlist]
spacer = Seq("NNNNN")
spacer1 = Seq("")
# seq objects with spacer
seq_concatenated = spacer.join(seqlist)
# seq objects with empty spacer
seq_concatenated1 = spacer1.join(seqlist)
ref_data = ref_data1 = ""
ref_data = str(spacer).join(seqlist_as_strings)
ref_data1 = str(spacer1).join(seqlist_as_strings)
self.assertEqual(str(seq_concatenated), ref_data)
self.assertEqual(str(seq_concatenated1), ref_data1)
with self.assertRaises(TypeError):
spacer.join(SeqIO.parse(filename, "fasta"))
def test_join_UnknownSeq(self):
"""Checks if UnknownSeq join correctly concatenates sequence with the spacer."""
# Only expect it to take Seq objects and/or strings in an iterable!
spacer1 = UnknownSeq(0, character="-", alphabet=generic_dna)
spacers = [spacer1, UnknownSeq(5, character="-", alphabet=generic_dna), UnknownSeq(5, character="-", alphabet=generic_nucleotide)]
example_strings = ["ATG", "ATG", "ATG", "ATG"]
example_strings_seqs = ["ATG", "ATG", Seq("ATG", generic_dna), "ATG"]
# strings with empty spacer
str_concatenated = spacer1.join(example_strings)
self.assertEqual(str(str_concatenated), "".join(example_strings))
self.assertEqual(str_concatenated.alphabet, spacer1.alphabet)
for spacer in spacers:
seq_concatenated = spacer.join(example_strings_seqs)
self.assertEqual(str(seq_concatenated), str(spacer).join(example_strings))
self.assertEqual(seq_concatenated.alphabet, spacer.alphabet)
def test_join_UnknownSeq_with_file(self):
"""Checks if UnknownSeq join correctly concatenates sequence from a file with the spacer."""
filename = "Fasta/f003"
seqlist = [record.seq for record in SeqIO.parse(filename, "fasta")]
seqlist_as_strings = [str(_) for _ in seqlist]
spacer = UnknownSeq(0, character="-", alphabet=generic_dna)
spacer1 = UnknownSeq(5, character="-", alphabet=generic_dna)
# seq objects with spacer
seq_concatenated = spacer.join(seqlist)
# seq objects with empty spacer
seq_concatenated1 = spacer1.join(seqlist)
ref_data = ref_data1 = ""
ref_data = str(spacer).join(seqlist_as_strings)
ref_data1 = str(spacer1).join(seqlist_as_strings)
self.assertEqual(str(seq_concatenated), ref_data)
self.assertEqual(str(seq_concatenated1), ref_data1)
with self.assertRaises(TypeError):
spacer.join(SeqIO.parse(filename, "fasta"))
def test_join_MutableSeq(self):
"""Checks if MutableSeq join correctly concatenates sequence with the spacer."""
# Only expect it to take Seq objects and/or strings in an iterable!
spacer1 = MutableSeq("", generic_dna)
spacers = [spacer1, MutableSeq("NNNNN", generic_dna), MutableSeq("GGG", generic_nucleotide)]
example_strings = ["ATG", "ATG", "ATG", "ATG"]
example_strings_seqs = ["ATG", "ATG", Seq("ATG", generic_dna), "ATG"]
# strings with empty spacer
str_concatenated = spacer1.join(example_strings)
self.assertEqual(str(str_concatenated), "".join(example_strings))
self.assertEqual(str_concatenated.alphabet, spacer1.alphabet)
for spacer in spacers:
seq_concatenated = spacer.join(example_strings_seqs)
self.assertEqual(str(seq_concatenated), str(spacer).join(example_strings))
self.assertEqual(seq_concatenated.alphabet, spacer.alphabet)
def test_join_MutableSeq_with_file(self):
"""Checks if MutableSeq join correctly concatenates sequence from a file with the spacer."""
filename = "Fasta/f003"
seqlist = [record.seq for record in SeqIO.parse(filename, "fasta")]
seqlist_as_strings = [str(_) for _ in seqlist]
spacer = MutableSeq("NNNNN")
spacer1 = MutableSeq("")
# seq objects with spacer
seq_concatenated = spacer.join(seqlist)
# seq objects with empty spacer
seq_concatenated1 = spacer1.join(seqlist)
ref_data = ref_data1 = ""
ref_data = str(spacer).join(seqlist_as_strings)
ref_data1 = str(spacer1).join(seqlist_as_strings)
self.assertEqual(str(seq_concatenated), ref_data)
self.assertEqual(str(seq_concatenated1), ref_data1)
with self.assertRaises(TypeError):
spacer.join(SeqIO.parse(filename, "fasta"))
# TODO - Addition...
class FileBasedTests(unittest.TestCase):
"""Test Seq objects created from files by SeqIO."""
def test_unknown_seq_ungap(self):
"""Test ungap() works properly on UnknownSeq instances."""
rec = SeqIO.read("GenBank/NT_019265.gb", "genbank")
self.assertIsInstance(rec.seq, UnknownSeq)
ungapped_seq = rec.features[1].extract(rec.seq).ungap("-")
self.assertIsInstance(ungapped_seq, UnknownSeq)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
the-stack_106_26006 | """application URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from captcha.conf import settings as ca_settings
from captcha.helpers import captcha_image_url, captcha_audio_url
from captcha.models import CaptchaStore
from django.conf import settings
from django.urls import re_path, include
from django.views.static import serve
from rest_framework.views import APIView
from apps.vadmin.op_drf.response import SuccessResponse
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="[email protected]"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=[permissions.AllowAny],
)
class CaptchaRefresh(APIView):
authentication_classes = []
permission_classes = []
def get(self, request):
new_key = CaptchaStore.pick()
to_json_response = {
"key": new_key,
"image_url": captcha_image_url(new_key),
"audio_url": captcha_audio_url(new_key) if ca_settings.CAPTCHA_FLITE_PATH else None,
}
return SuccessResponse(to_json_response)
urlpatterns = [
re_path(r'media/(?P<path>.*)', serve, {"document_root": settings.MEDIA_ROOT}),
re_path(r'^admin/', include('apps.vadmin.urls')),
re_path(r'^$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
re_path(r'^device/', include('apps.device.urls')), # 注册设备模块
]
|
the-stack_106_26007 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate pre-trained model trained for ppl metric.
"""
from parlai_internal.scripts.eval_ppl import eval_ppl, setup_args
from parlai_internal.scripts.utils import write_result_to_csv, update_opt
import os
if __name__ == '__main__':
parser = setup_args()
parser.add_argument('-ds','--dataset',
default='barista-personalised', type=str,
help='Dataset name. Choices: barista, barista-personalised, barista-personalised-order.')
parser.add_argument('-ts', '--task-size',
default='Task1k', type=str,
help='Task size folder,'
'choices: SecondInteraction, Task1k, Task10k for barista-personalised and barista-personalised-order'
'choices: Task100, Task1k, Task10k for barista.')
parser.add_argument('-tid', '--task-id', type=int, default=1,
help='Task number, default is 1. For personalised sets, 0-8, for barista 1-7.')
parser.add_argument('-ne', '--num-examples', default=100000000)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.set_defaults(
task='internal:barista-personalised:Task1k:1',
model='parlai_internal.agents.seq2seq.seq2seq_v0:PerplexityEvaluatorAgent',
model_file='izoo:Seq2Seq/model',
datatype='test',
)
opt = parser.parse_args(print_args=False)
# add additional model args
opt = update_opt(opt, "Seq2Seq", log_incorrect=True, log_correct=True)
new_parser = setup_args(parser=parser)
new_parser.set_params(
task=opt['task'],
model_file=opt['model_file'],
log_predictions=opt['log_predictions'],
dump_incorrect_predictions_path=opt['dump_incorrect_predictions_path'],
dump_correct_predictions_path=opt['dump_correct_predictions_path'],
datatype='test',
numthreads=1,
batchsize=1,
hide_labels=False,
dict_lower=True,
dict_include_valid=False,
dict_tokenizer='split',
rank_candidates=True,
metrics='accuracy,f1,hits@1',
no_cuda=True,
)
opt = new_parser.parse_args()
report = eval_ppl(opt)
result_file = os.path.join("izoo:" + "Seq2Seq", opt['dataset'], opt['task_size'], "log") + "/results_test.csv"
write_result_to_csv(report, result_file, opt['task_id'], opt['datapath'])
|
the-stack_106_26012 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder, self).__init__(init_cfg)
if pretrained is not None:
assert backbone.get('pretrained') is None, \
'both backbone and segmentor set pretrained weight'
backbone.pretrained = pretrained
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
size = img_meta[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
|
the-stack_106_26013 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "contour"
_path_str = "contour.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.contour.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_106_26017 | """
Tests for the basic masking and filtering operations
"""
from ccd.qa import *
from ccd.app import get_default_params
clear = 0
water = 1
fill = 255
snow = 3
clear_thresh = 0.25
snow_thresh = 0.75
default_params = get_default_params()
def test_checkbit():
packint = 1
offset = 0
assert checkbit(packint, offset)
offset = 1
assert not checkbit(packint, offset)
def test_qabitval():
# [fill, clear, water, cloud shadow, snow, cloud,
# High Cirrus + low cloud conf, high cirrus + medium cloud conf, terrain occlusion]
packints = [1, 2, 4, 8, 16, 32, 832, 896, 1024]
ans = [0, 1, 2, 3, 4, 5, 1, 1, 1]
for i, a in zip(packints, ans):
assert qabitval(i, default_params) == a
def test_count_clear_or_water():
arr = np.arange(5)
ans = 2
assert ans == count_clear_or_water(arr, clear, water)
def test_count_total():
arr = np.arange(5)
arr[-1] = 255
ans = 4
assert ans == count_total(arr, fill)
def test_ratio_clear():
arr = np.arange(5)
arr[-1] = 255
ans = 0.5
assert ans == ratio_clear(arr, clear, water, fill)
def test_ratio_snow():
arr = np.arange(5)
ans = 1/3.01
assert ans == ratio_snow(arr, clear, water, snow)
def test_enough_clear():
arr = np.arange(5)
ans = True
assert ans == enough_clear(arr, clear, water, fill, clear_thresh)
arr[1:] = snow
ans = False
assert ans == enough_clear(arr, clear, water, fill, clear_thresh)
def test_enough_snow():
arr = np.arange(5)
ans = False
assert ans == enough_snow(arr, clear, water, snow, snow_thresh)
arr[1:] = snow
ans = True
assert ans == enough_snow(arr, clear, water, snow, snow_thresh)
def test_filter_median_green():
arr = np.arange(10)
ans = np.array([True, True, True, True, True,
True, True, True, False, False])
assert np.array_equal(ans, filter_median_green(arr, 3))
def test_duplicate_values():
arr = np.array([1, 2, 2, 3, 4, 5, 5])
ans = np.array([True, True, False, True, True,
True, False], dtype=bool)
assert np.array_equal(ans, mask_duplicate_values(arr))
|
the-stack_106_26019 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud provides a unified interface to the cloud computing resources.
:var __version__: Current version of libcloud
"""
__all__ = ['__version__', 'enable_debug']
__version__ = '0.14.1'
import os
try:
import paramiko
have_paramiko = True
except ImportError:
have_paramiko = False
def enable_debug(fo):
"""
Enable library wide debugging to a file-like object.
:param fo: Where to append debugging information
:type fo: File like object, only write operations are used.
"""
from libcloud.common.base import (Connection,
LoggingHTTPConnection,
LoggingHTTPSConnection)
LoggingHTTPSConnection.log = fo
LoggingHTTPConnection.log = fo
Connection.conn_classes = (LoggingHTTPConnection,
LoggingHTTPSConnection)
def _init_once():
"""
Utility function that is ran once on Library import.
This checks for the LIBCLOUD_DEBUG environment variable, which if it exists
is where we will log debug information about the provider transports.
"""
path = os.getenv('LIBCLOUD_DEBUG')
if path:
fo = open(path, 'a')
enable_debug(fo)
if have_paramiko:
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
_init_once()
|
the-stack_106_26022 | # Specify an Exasim version to run
version = "Version0.1";
# import external modules
import numpy, os
# Add Exasim to Python search path
cdir = os.getcwd(); ii = cdir.find("Exasim");
exec(open(cdir[0:(ii+6)] + "/Installation/setpath.py").read());
# import internal modules
import Preprocessing, Postprocessing, Gencode, Mesh
# Create pde object and mesh object
pde,mesh = Preprocessing.initializeexasim(version);
# Define a PDE model: governing equations and boundary conditions
pde['model'] = "ModelD"; # ModelC, ModelD, ModelW
pde['modelfile'] = "pdemodel"; # name of a file defining the PDE model
# Choose computing platform and set number of processors
#pde['platform'] = "gpu"; # choose this option if NVIDIA GPUs are available
pde['mpiprocs'] = 1; # number of MPI processors
# Set discretization parameters, physical parameters, and solver parameters
pde['porder'] = 3; # polynomial degree
pde['physicsparam'] = numpy.array([0.01, 0.0, 1.0, 0.5, 0.0]); # unit thermal conductivity
pde['tau'] = numpy.array([1.0]); # DG stabilization parameter
# create a mesh of 8 by 8 quads on a square domain
mesh['p'], mesh['t'] = Mesh.SquareMesh(16,16,1)[0:2];
# expressions for domain boundaries
mesh['boundaryexpr'] = [lambda p: (p[1,:] < 1e-3), lambda p: (p[0,:] > 1-1e-3), lambda p: (p[1,:] > 1-1e-3), lambda p: (p[0,:] < 1e-3)];
mesh['boundarycondition'] = numpy.array([1, 1, 2, 1]); # Set boundary condition for each boundary
# call exasim to generate and run C++ code to solve the PDE model
sol, pde, mesh = Postprocessing.exasim(pde,mesh)[0:3];
# visualize the numerical solution of the PDE model using Paraview
pde['visscalars'] = ["temperature", 0]; # list of scalar fields for visualization
pde['visvectors'] = ["temperature gradient", numpy.array([1, 2]).astype(int)]; # list of vector fields for visualization
Postprocessing.vis(sol,pde,mesh); # visualize the numerical solution
print("Done!");
# npf = dmd[0]['facecon'].shape[0];
# nf = dmd[0]['facecon'].shape[2];
# print(numpy.reshape(dmd[0]['facecon'][:,0,:],(npf,nf),'F').T)
# print(numpy.reshape(dmd[0]['facecon'][:,1,:],(npf,nf),'F').T)
# fileapp1 = cdir + "/datain/app.bin";
# app1 = Preprocessing.readapp(fileapp1);
# fileapp2 = cdir + "/Applications/Poisson/Poisson2d/datain/app.bin";
# app2 = Preprocessing.readapp(fileapp2);
# diff = Preprocessing.checkapp(app1,app2);
# print(app1['problem'])
# print(app2['problem'])
# print(diff)
# filemaster1 = cdir + "/datain/master.bin";
# tm1 = numpy.fromfile(open(filemaster1, "r"), dtype=numpy.float64);
# filemaster2 = cdir + "/Applications/Poisson/Poisson2d/datain/master.bin";
# tm2 = numpy.fromfile(open(filemaster2, "r"), dtype=numpy.float64);
# print(max(abs(tm1.flatten('F')-tm2.flatten('F'))))
# filemesh1 = cdir + "/datain/mesh1.bin";
# mesh1 = Preprocessing.readmesh(filemesh1);
# filemesh2 = cdir + "/Applications/Poisson/Poisson2d/datain/mesh1.bin";
# mesh2 = Preprocessing.readmesh(filemesh2);
# diff = Preprocessing.checkmesh(mesh1,mesh2);
# print(mesh1['nbsd'])
# print(mesh2['nbsd'])
# print(diff)
#
# filemesh1 = cdir + "/datain/mesh2.bin";
# mesh1 = Preprocessing.readmesh(filemesh1);
# filemesh2 = cdir + "/Applications/Poisson/Poisson2d/datain/mesh2.bin";
# mesh2 = Preprocessing.readmesh(filemesh2);
# diff = Preprocessing.checkmesh(mesh1,mesh2);
# print(mesh1['nbsd'])
# print(mesh2['nbsd'])
# print(diff)
# print(mesh1['ndims'])
# print(mesh2['ndims'])
# print(mesh1['nsize'])
# print(mesh2['nsize'])
# print(mesh1['facecon'][0:10])
# print(mesh2['facecon'][0:10])
# print(dmd[0]['facecon'][:,:,0])
# fileapp1 = cdir + "/datain/app['bin";
# app1 = Preprocessing.readapp(fileapp1);
# fileapp2 = cdir + "/Applications/Poisson2d/datain/app['bin";
# app2 = Preprocessing.readapp(fileapp2);
# diff = Preprocessing.checkapp(app1,app2);
# print(diff)
#
# filemaster1 = cdir + "/datain/master.bin";
# tm1 = numpy.fromfile(open(filemaster1, "r"), dtype=numpy.float64);
# filemaster2 = cdir + "/Applications/Poisson2d/datain/master.bin";
# tm2 = numpy.fromfile(open(filemaster2, "r"), dtype=numpy.float64);
# print(max(abs(tm1.flatten('F')-tm2.flatten('F'))))
#
# filemesh1 = cdir + "/datain/mesh.bin";
# mesh1 = Preprocessing.readmesh(filemesh1);
# filemesh2 = cdir + "/Applications/Poisson2d/datain/mesh.bin";
# mesh2 = Preprocessing.readmesh(filemesh2);
# diff = Preprocessing.checkmesh(mesh1,mesh2);
# print(diff)
# tm1 = numpy.fromfile(open(filemesh1, "r"), dtype=numpy.float64);
# tm2 = numpy.fromfile(open(filemesh2, "r"), dtype=numpy.float64);
# print(mesh1['nsize'])
# print(mesh2['nsize'])
# k1 = 0; k2 = 20;
# print(max(abs(tm1[k1:k2].flatten('F')-tm2[k1:k2].flatten('F'))))
# k1 = 20; k2 = 1152+20;
# print(max(abs(tm1[k1:k2].flatten('F')-tm2[k1:k2].flatten('F'))))
# print(tm1[k1:k2])
# print(tm2[k1:k2])
# print(mesh1['facecon'].flatten('F'))
# print(mesh2['facecon'].flatten('F'))
# print(tm1.shape)
# print(tm2.shape)
# print(mesh1['colent2elem'].T)
# print(mesh2['colent2elem'].T)
# print(mesh['f'].T)
# print(mesh['dgnodes'][:,:,0])
# print(mesh['dgnodes'][:,:,-1])
|
the-stack_106_26023 | # _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
import logging
import re
import sys
import textwrap
_indention = re.compile('\s*')
class LogHandler(logging.Handler):
def __init__( self, base='', stream=None,
level=logging.NOTSET, verbosity=None ):
logging.Handler.__init__(self, level=level)
if verbosity is None:
verbosity = lambda: True
if stream is None:
stream = sys.stdout
self.verbosity = verbosity
self.stream = stream
self.basepath = base
# Public attributes (because embedded constants in functions are evil)
self.wrap = 78
self.initial_indent = ''
self.subsequent_indent = ' '*4
def emit(self, record):
level = record.levelname
msg = record.getMessage()
# Most of the messages are either unformatted long lines or
# triple-quote blocks of text. In the latter case, if the text
# starts on the same line as the triple-quote, then it is almost
# certainly NOT indented with the bulk of the text, which will
# cause dedent to get confused and not strip any leading
# whitespace. This attempts to work around that case:
#
#if not ( msg.startswith('\n') or _indention.match(msg).group() ):
# # copy the indention for the second line to the first:
# lines = msg.splitlines()
# if len(lines) > 1:
# msg = _indention.match(lines[1]).group() + msg
#
# The problem with the above logic is that users may want a
# simple introductory line followed by an intented line (our
# tests did this!), and cannot specify it without adding an
# extra blank line to the output. In contrast, it is possible
# for the user to fix the scenario above that motivated this
# code by just indenting their first line correctly.
# TBD: dedent does not convert \t to ' '*8. Should we do that?
msg = textwrap.dedent(msg)
# As textwrap only works on single paragraphs, we need to break
# up the incoming message into paragraphs before we pass it to
# textwrap.
paragraphs = []
indent = _indention.match(msg).group()
par_lines = []
for line in msg.splitlines():
leading = _indention.match(line).group()
content = line.strip()
if not content:
paragraphs.append((indent, par_lines))
par_lines = []
# Blank lines reset the indentation level
indent = None
elif indent == leading:
# Catch things like bulleted lists
if len(content) > 1 and par_lines and content[1] == ' ' \
and content[0] in '-* ':
paragraphs.append((indent, par_lines))
par_lines = []
par_lines.append( content )
else:
paragraphs.append((indent, par_lines))
par_lines = [ content ]
indent = leading
# Collect the final paragraph
if par_lines:
paragraphs.append((indent, par_lines))
# Skip any leading/trailing blank lines
while paragraphs and not paragraphs[-1][1]:
paragraphs.pop()
while paragraphs and not paragraphs[0][1]:
paragraphs.pop(0)
if self.verbosity():
#
# If verbosity is on, the first logged line is the file,
# line, and function name that called the logger. The first
# line of the message is actually the second line of the
# output (and so is indented/wrapped the same as the rest of
# the message)
#
filename = record.pathname # file path
lineno = record.lineno
try:
function = record.funcName
except AttributeError:
function = '(unknown)'
if self.basepath and filename.startswith(self.basepath):
filename = '[base]' + filename[len(self.basepath):]
self.stream.write(
'%s: "%s", %d, %s\n' %
( level, filename, lineno, function.strip(), ))
else:
#
# If verbosity is off, prepend the log level name to the
# beginning of the message and format the line without the
# 'subsequent' indentation of the remainder of the message
#
if paragraphs:
firstPar = ' '.join(paragraphs.pop(0)[1]).strip()
if level:
firstPar = ('%s: %s' % (level, firstPar))
else:
firstPar = level
self.stream.write( '%s\n' % (
textwrap.fill( firstPar,
width=self.wrap,
initial_indent=self.initial_indent,
subsequent_indent=self.subsequent_indent ), ))
for indent, par in paragraphs:
if not indent:
indent = ''
# Bulleted lists get indented with a hanging indent
if par and len(par[0]) > 1 and par[0][0] in '-*':
hang = ' '*4
else:
hang = ''
self.stream.write( '%s\n' % (
textwrap.fill(
' '.join(par),
width=self.wrap,
initial_indent=self.subsequent_indent+indent,
subsequent_indent=self.subsequent_indent+indent+hang ), ))
#
# Set up default logging for PyUtilib
#
# __file__ fails if script is called in different ways on Windows
# __file__ fails if someone does os.chdir() before
# sys.argv[0] also fails because it does not always contain the path
from os.path import dirname as _dir, abspath as _abs
import inspect
_pyutilib_base = _dir(_dir(_dir(_abs(inspect.getfile(inspect.currentframe())))))
_logger = logging.getLogger('pyutilib')
_logger.setLevel(logging.WARNING)
_logger.addHandler( LogHandler(
_pyutilib_base, verbosity=lambda: _logger.isEnabledFor(logging.DEBUG)))
|
the-stack_106_26025 | from flask import Flask, render_template, request, redirect, jsonify, make_response, json, request, url_for, send_from_directory, flash
from collections import namedtuple
from werkzeug import secure_filename
import sys
import types
import time
import datetime
import re
import json
import untangle
import uuid
import sqlalchemy
import os
import io
from datetime import date, timedelta
from .DBSetup import *
from sqlalchemy import *
from sqlalchemy.orm import aliased
from .config import *
from .SMTPemail import *
from .tablefunctions import *
from io import StringIO
#Make the WSGI interface available at the top level so wfastcgi can get it.
app = Flask(__name__)
wsgi_app = app.wsgi_app
#These are the extension types that we are accepting to be uploaded
app.config['ALLOWED_EXTENSIONS'] = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'xml', 'csv'])
app.secret_key = getconfig('SecretKey')
Session = sessionmaker(bind=engine)
#For a given file, return whether it's an allowed type or not
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def errorpage(thisuser,message):
return render_template('errorpage.html', \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
errormessage = message
)
#the root page isn't meant to be navigable. It shows the user an error and
#tells them how to get to their user home.
@app.route('/')
def rootpage():
return render_template('index.html')
#upon the URL request in the form domain/user/<logonid> the user receives their home. The home contains the groups they
#are playing in. Optionally, this page presents their home in the future or the past, and gives them further options.
@app.route('/user/<logonid>/')
def home(logonid,inputdate='n'):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('HOME: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
debuglog('HOME: date modifier is currently set to %s' % inputdate)
#get the current announcement
currentannouncement = session.query(announcement).order_by(desc(announcement.creationtime)).first()
if currentannouncement is not None:
announcementcontent = currentannouncement.content.replace("\n","<br />")
else:
announcementcontent = ''
#get impontant datetimes
today = datetime.datetime.combine(datetime.date.today(), datetime.time.min) #get today's date
debuglog('HOME: Today is %s' % today.strftime('%Y-%m-%d %H:%M'))
campstarttime = datetime.datetime.strptime(getconfig('StartTime'), '%Y-%m-%d %H:%M')
campendtime = datetime.datetime.strptime(getconfig('EndTime'), '%Y-%m-%d %H:%M')
#if the suer has submitted a date, convert it to a datetime and use it as the display date
if inputdate != 'n':
displaydate = datetime.datetime.strptime(inputdate, '%Y-%m-%d')
#if the user has not submitted a date and today is before the start of camp, use the first day of camp as the display date
elif today < datetime.datetime.strptime(getconfig('StartTime'), '%Y-%m-%d %H:%M'):
displaydate = datetime.datetime.strptime(getconfig('StartTime').split()[0], '%Y-%m-%d')
#if today is after the start of camp, use today as the display date
else:
displaydate = today
previousday = displaydate + datetime.timedelta(days=-1)
midday = displaydate + datetime.timedelta(hours=12)
nextday = displaydate + datetime.timedelta(days=1)
#get an array containing the dates that the camp is running
dates = []
for d in range((campendtime-campstarttime).days + 2):
dates.append(campstarttime + timedelta(days=d))
#get the user's schedule, an array of objects depending on the user's state during each period
schedule = getschedule(session,thisuser,displaydate)
unscheduled = False
for p in schedule:
if session.query(group).filter(group.periodid == p.periodid, group.status == "Confirmed", or_(group.ismusical == 1, group.iseveryone == 1)).first() is None:
unscheduled = True
break
session.close()
return render_template('home.html', \
thisuser=thisuser, \
date=displaydate,\
schedule=schedule, \
dates=dates, \
previousday=previousday, \
nextday=nextday, \
today=today, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
currentannouncement=announcementcontent, \
now = datetime.datetime.now(), \
midday=midday, \
unscheduled=unscheduled, \
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return render_template('errorpage.html', \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
errormessage = 'Failed to display page. %s' % ex
)
else:
return jsonify(message = message, url = 'none')
#NOT CURRENTLY USED Currently broken. Doesn't like the fact that the getschedule function returns an array of inconsistant objects. Either make them consistant,
#or put logic into the 'for s in ...' part of the below function. Or maybe improve the query in the getschedule function.
"""@app.route('/user/<logonid>/requestschedule/', methods=["POST"])
def requestschedule(logonid):
log('Schedule request for date %s' % request.json['date'])
#convert the inputdate to a datetime object
date = datetime.datetime.strptime(request.json['date'], '%Y-%m-%d')
session = Session()
#gets the data associated with this user
thisuser = session.query(user).filter(user.logonid == logonid).first()
if thisuser is None:
session.close()
return jsonify(message = 'Your user does not exist. Something went wrong.', url = 'none', status_code = 400)
schedule_serialized = []
for s in getschedule(session,thisuser,date):
schedule_serialized.append({'groupname': s.groupname, 'starttime': s.starttime, 'endtime': s.endtime,\
'locationname': s.locationname, 'groupid': s.groupid, 'ismusical': s.ismusical, 'iseveryone': s.iseveryone,\
'preiodid': s.periodid, 'periodname': s.periodname, 'instrumentname': s.instrumentname})
return jsonify(schedule_serialized)"""
#When the user selects the "next day" and "previous day" links on their home, it goes to this URL. this route redirects them back
#to the user home with a date modifier.
@app.route('/user/<logonid>/date/<date>/')
def homeDateModifier(logonid,date):
return home(logonid,date)
#Makes a post query that marks a player adsent for a given period. This is triggered off the above (two) home functions.
@app.route("/user/<logonid>/period/<periodid>/absent/<command>/", methods=["POST"])
def mark_absent(logonid,periodid,command):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('ABSENTREQUEST: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
thisperiod = getperiod(session,periodid)
if command == 'confirm':
thisuser.markabsent(session,thisperiod)
if command == 'cancel':
thisuser.markpresent(session,thisperiod)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
else:
session.commit()
session.close()
return jsonify(message = 'success', url = 'none')
#The group page displays all the people in a given group, along with possible substitutes
@app.route('/user/<logonid>/group/<groupid>/')
def grouppage(logonid,groupid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('GROUPPAGE: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
thisgroup = getgroup(session,groupid)
thislocation = getlocation(session,thisgroup.locationid)
thisperiod = getperiod(session,thisgroup.periodid)
thisgrouprequestor = getuser(session,thisgroup.requesteduserid)
if thisgroup.musicid is not None:
thismusic = getmusic(session, thisgroup.musicid)
else:
thismusic = None
#gets the list of players playing in the given group
players = session.query(user.userid, user.firstname, user.lastname, groupassignment.instrumentname).join(groupassignment).join(group).\
filter(group.groupid == groupid).order_by(groupassignment.instrumentname).all()
#find the substitutes for this group
if thisgroup.status == 'Confirmed' and thisgroup.iseveryone != 1 and thisgroup.groupname != 'absent':
minimumlevel = thisgroup.getminlevel(session)
maximumlevel = thisgroup.getmaxlevel(session)
#get the list of instruments played in this group and removes duplicates to be used as a subquery later
instruments_in_group_query = session.query(groupassignment.instrumentname).join(group).filter(group.groupid == thisgroup.groupid).group_by(groupassignment.instrumentname)
debuglog('GROUPPAGE: Found instruments in group to be %s' % instruments_in_group_query.all())
#get the userids of everyone that's already playing in something this period
everyone_playing_in_periodquery = session.query(user.userid).join(groupassignment).join(group).join(period).filter(period.periodid == thisgroup.periodid)
#combine the last two queries with another query, finding everyone that both plays an instrument that's found in this
#group AND isn't in the list of users that are already playing in this period.
substitutes = session.query(
instrument.instrumentname,
user.userid,
user.firstname,
user.lastname
).join(user
).filter(
~user.userid.in_(everyone_playing_in_periodquery),
user.isactive == 1,
user.arrival <= thisperiod.starttime,
user.departure >= thisperiod.endtime,
instrument.instrumentname.in_(instruments_in_group_query),
instrument.level >= minimumlevel,
instrument.level <= maximumlevel,
instrument.isactive == 1
).order_by(instrument.instrumentname)
else:
substitutes = None
session.close()
return render_template('grouppage.html', \
thisperiod=thisperiod, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
thisgroup=thisgroup, \
players=players, \
substitutes=substitutes, \
thisuser=thisuser, \
thislocation=thislocation, \
thismusic=thismusic, \
thisgrouprequestor=thisgrouprequestor, \
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return render_template('errorpage.html', \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
errormessage = 'Failed to display page. %s' % ex
)
else:
return jsonify(message = message, url = 'none')
#Group editor page. Only accessable by admins. Navigate here from a group to edit group.
@app.route('/user/<logonid>/group/<groupid>/edit/', methods=['GET', 'POST', 'DELETE'])
def editgroup(logonid,groupid,periodid=None):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('GROUPEDIT: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isadmin != 1:
session.close()
raise Exception('You do not have permission to do this')
if periodid == 'None':
periodid = None
if groupid == 'new' or groupid is None:
groupid = None
thisgroup = group(ismusical = 1, requesteduserid = thisuser.userid)
requestor = thisuser
else:
thisgroup = session.query(group).filter(group.groupid == groupid).first()
requestor = session.query(user).filter(user.userid == thisgroup.requesteduserid).first()
if request.method == 'GET':
tomorrow = datetime.datetime.combine(datetime.date.today(), datetime.time.min) + datetime.timedelta(days=1)
#Current period tracks the period that the group is already set to (none, if it's a new group)
currentperiod = session.query(period).filter(period.periodid == thisgroup.periodid).first()
#print out the list of players and remove any that have already left camp
thisgroupplayers = session.query(user.userid, user.firstname, user.lastname, groupassignment.instrumentname, user.departure).join(groupassignment).join(group).filter(group.groupid == thisgroup.groupid).order_by(groupassignment.instrumentname).all()
#find all periods from now until the end of time to display to the user, then removes any periods that the people in this group cannot play in
thisgroupplayers_query = session.query(user.userid).join(groupassignment).join(group).filter(group.groupid == thisgroup.groupid).order_by(groupassignment.instrumentname)
periodlist = session.query(period).order_by(period.starttime).all()
if thisgroupplayers_query.first() is not None:
lastarrival = session.query(func.max(user.arrival).label("lastarrival")).filter(user.userid.in_(thisgroupplayers_query)).first().lastarrival
firstdeparture = session.query(func.min(user.departure).label("firstdeparture")).filter(user.userid.in_(thisgroupplayers_query)).first().firstdeparture
else:
lastarrival = datetime.datetime.strptime(getconfig('StartTime'), '%Y-%m-%d %H:%M')
firstdeparture = datetime.datetime.strptime(getconfig('EndTime'), '%Y-%m-%d %H:%M')
debuglog('GROUPEDIT: Last Arrival time of players in this group: %s' % lastarrival)
debuglog('GROUPEDIT: First Departure time of players in this group: %s' % firstdeparture)
periods = []
for p in periodlist:
if thisgroupplayers_query.first() is None or ((currentperiod and p.periodid == currentperiod.periodid) \
or (len(session.query(user.userid).join(groupassignment).join(group).join(period).filter(group.periodid == p.periodid, or_(user.userid.in_(thisgroupplayers_query))).all()) == 0\
and p.starttime >= lastarrival and p.starttime <= firstdeparture)):
periods.append(p)
#if there was no selected period by the user, select the first period
if periodid is not None:
selectedperiod = session.query(period).filter(period.periodid == periodid).first()
elif currentperiod is None:
debuglog('GROUPEDIT: This is a periodless group. Selecting a period for the group.')
foundperiod = False
for p in periods:
if p.starttime > tomorrow and session.query(period.periodid).join(group).filter(period.periodid == p.periodid, group.iseveryone == 1).first() is None:
selectedperiod = p
foundperiod = True
break
if not foundperiod:
selectedperiod = None
else:
selectedperiod = currentperiod
thislocation = session.query(location).join(group).filter(group.groupid == groupid).first()
#gets the list of players playing in the given group
thisgroupplayers = session.query(user.userid, user.firstname, user.lastname, groupassignment.instrumentname).join(groupassignment).join(group).\
filter(group.groupid == thisgroup.groupid).order_by(groupassignment.instrumentname).all()
thisgroupplayers_serialized = []
for p in thisgroupplayers:
thisgroupplayers_serialized.append({'userid': p.userid, 'firstname': p.firstname, 'lastname': p.lastname,
'instrumentname': p.instrumentname})
if selectedperiod is not None:
#Finds all players who are already playing in this period (except in this specific group)
playersPlayingInPeriod = session.query(user.userid).join(groupassignment).join(group).filter(group.groupid != thisgroup.groupid).filter(group.periodid == selectedperiod.periodid)
#finds all players who are available to play in this group (they aren't already playing in other groups)
playersdump = session.query(user.userid,user.firstname,user.lastname,user.agecategory,instrument.instrumentname,instrument.level,instrument.isprimary).\
join(instrument).filter(~user.userid.in_(playersPlayingInPeriod), user.isactive == 1, user.arrival <= selectedperiod.starttime, user.departure >= selectedperiod.endtime, instrument.isactive == 1).all()
else:
playersdump = session.query(user.userid,user.firstname,user.lastname,user.agecategory,instrument.instrumentname,instrument.level,instrument.isprimary).\
join(instrument).filter(user.isactive == 1, instrument.isactive == 1).all()
playersdump_serialized = []
for p in playersdump:
playersdump_serialized.append({'userid': p.userid, 'firstname': p.firstname, 'lastname': p.lastname,
'agecategory': p.agecategory, 'instrumentname': p.instrumentname, 'level': p.level, 'isprimary': p.isprimary})
#Get a list of the available music not being used in the period selected
if selectedperiod is not None:
musics_used_query = session.query(music.musicid).join(group).join(period).filter(period.periodid == selectedperiod.periodid, group.groupid != thisgroup.groupid)
musics = session.query(music).filter(~music.musicid.in_(musics_used_query)).all()
else:
musics = session.query(music).all()
musics_serialized = [i.serialize for i in musics]
#get a list of the locations not being used in this period
if selectedperiod is not None:
locations_used_query = session.query(location.locationid).join(group).join(period).filter(period.periodid == selectedperiod.periodid, group.groupid != thisgroup.groupid, location.locationname != 'None')
locations = session.query(location).filter(~location.locationid.in_(locations_used_query)).all()
else:
locations = session.query(location).all()
debuglog('GROUPEDIT: This groups status is %s' % thisgroup.status)
#find all group templates to show in a dropdown
grouptemplates = session.query(grouptemplate).all()
grouptemplates_serialized = [i.serialize for i in grouptemplates]
template = render_template('editgroup.html', \
currentperiod=currentperiod, \
selectedperiod=selectedperiod, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
thisgroup=thisgroup, \
thisgroupplayers=thisgroupplayers, \
thisuser=thisuser, \
periods=periods, \
thislocation=thislocation, \
locations=locations, \
playersdump=playersdump, \
playersdump_serialized=playersdump_serialized, \
thisgroupplayers_serialized=thisgroupplayers_serialized, \
maximumlevel=int(getconfig('MaximumLevel')), \
grouptemplates=grouptemplates, \
grouptemplates_serialized=grouptemplates_serialized, \
musics=musics, \
musics_serialized=musics_serialized, \
thismusic=getmusic(session,thisgroup.musicid), \
instrumentlist_string=getconfig('Instruments'), \
groupmin=thisgroup.getminlevel(session), \
groupmax=thisgroup.getmaxlevel(session), \
requestor=requestor, \
)
session.close()
return template
if request.method == 'DELETE':
if groupid is not None:
thisgroup.delete(session)
url = ('/user/' + str(thisuser.logonid) + '/')
message = 'none'
flash(u'Group Deleted','message')
debuglog('Sending user to URL: %s' % url)
session.close()
return jsonify(message = message, url = url)
else:
raise Exception('You must have a period selected before autofilling.')
if request.method == 'POST':
#format the packet received from the server as JSON
content = request.json
if content['groupname'] == '' or content['groupname'] == 'null' or content['groupname'] is None:
raise Exception('You must give this group a name before saving or autofilling.')
if content['periodid'] != '' and content['periodid'] != 'null' and content['periodid'] is not None:
thisperiod = session.query(period).filter(period.periodid == content['periodid']).first()
else:
raise Exception('Could not find a period with the selected id. Refresh the page and try again.')
thisgroupassignments = session.query(groupassignment).filter(groupassignment.groupid == thisgroup.groupid).all()
for a in thisgroupassignments:
session.delete(a)
#add the content in the packet to this group's attributes
for key,value in content.items():
if (value is None or value == 'null' or value == '') and key != 'primary_only':
debuglog('Setting %s to be NULL' % (key))
setattr(thisgroup,key,None)
elif key != 'primary_only':
debuglog('Setting %s to be %s' % (key, value))
setattr(thisgroup,key,value)
if groupid == None:
session.add(thisgroup)
thisgroup.requesttime = datetime.datetime.now()
session.commit()
if content['locationid'] is not None and content['locationid'] != '':
location_clash = session.query(location.locationname, group.groupid).join(group).join(period).filter(period.periodid == thisperiod.periodid, location.locationid == content['locationid'], group.groupid != thisgroup.groupid).first()
if location_clash is not None:
debuglog('Group %s is already using this location %s' % (location_clash.groupid, location_clash.locationname))
raise Exception('This location is already being used at this time. Select another.')
if content['musicid'] != '' and content['musicid'] != 'null' and content['musicid'] is not None:
music_clash = session.query(music.musicname, music.composer, group.groupid).join(group).join(period).filter(period.periodid == thisperiod.periodid, music.musicid == content['musicid'], group.groupid != thisgroup.groupid).first()
if music_clash is not None:
debuglog('Group %s is already using this music %s %s' % (music_clash.groupid, music_clash.composer, music_clash.musicname))
raise Exception('This music is already being used at this time. You cannot schedule in this period.')
foundfilled = False
for p in content['objects']:
if p['userid'] != '' and p['userid'] is not None:
foundfilled = True
playeruser = session.query(user).filter(user.userid == p['userid']).first()
#if the player is already playing in something, we have a clash and we have to exit completely. This may happen if multiple people are creating groups at the same time.
currentassignment = session.query(groupassignment.instrumentname, group.groupname, group.groupid).join(group).filter(groupassignment.userid == p['userid']).filter(group.periodid == thisperiod.periodid).first()
if currentassignment is not None:
#if the player is already playing in something, we have a clash and we have to exit completely. This may happen if multiple people are creating groups at the same time.
if currentassignment.groupid != thisgroup.groupid:
raise Exception('Found a clash for %s. They are already playing %s in %s. Refresh the page and try again.' % (playeruser.firstname, currentassignment.instrumentname, currentassignment.groupname))
#if we found a player and no clash, we can assign this player to the group
if playeruser is None:
url = ('/user/' + str(thisuser.logonid) + '/')
raise Exception(message = 'Could not find one of your selected players in the database. Please refresh the page and try again.')
else:
#if the player is inactive or not attending camp at this time, they should never have been shown to the admin and chosen - this could happen if they were set to inactive while the admin had the page open
if playeruser.isactive != 1 or playeruser.arrival > thisperiod.starttime or playeruser.departure < thisperiod.endtime:
raise Exception('The user %s %s is set to inactive and they cannot be assigned. Refresh the page and try again with a different user.' % (playeruser.firstname, playeruser.lastname))
else:
playergroupassignment = groupassignment(userid = playeruser.userid, groupid = thisgroup.groupid, instrumentname = p['instrumentname'])
session.add(playergroupassignment)
if thisgroup.status == 'Confirmed' and (
thisgroup.periodid == '' or thisgroup.periodid is None or
thisgroup.groupname == '' or thisgroup.groupname is None or
thisgroup.locationid == '' or thisgroup.locationid is None
):
raise Exception(message = 'Confirmed groups must have a name, assigned period, assigned location and no empty player slots.')
if content['submittype'] == 'autofill':
debuglog('GROUPEDIT: User selected to autofill the group')
debuglog('GROUPEDIT: Primary_only switch set to %s' % content['primary_only'])
if content['periodid'] == '' or content['periodid'] is None:
raise Exception('You must select a period before autofilling')
else:
if thisperiod is not None:
#get the optimal list of players to be filled into this group
final_list = autofill(session,thisgroup,thisperiod,int(content['primary_only']))
#create group assignments for each player in the final_list
thisgroup.addplayers(session,final_list)
#Check for empty instrument slots if group is set to confirmed - if there are empties we have to switch it back to queued
if thisgroup.status == 'Confirmed':
for i in getconfig('Instruments').split(","):
debuglog('This group has a required %s number of %s and an assigned %s.' % (i, getattr(thisgroup,i), session.query(user).join(groupassignment).filter(groupassignment.groupid == thisgroup.groupid, groupassignment.instrumentname == i).count()))
if thisgroup.locationid is None or thisgroup.locationid == '' or thisgroup.totalinstrumentation != thisgroup.totalallocatedplayers:
thisgroup.status = 'Queued'
session.merge(thisgroup)
session.commit()
url = '/user/' + str(thisuser.logonid) + '/group/' + str(thisgroup.groupid) + '/edit/'
session.close()
flash(u'Changes Partially Saved','message')
return jsonify(message = 'Your group is not confirmed because there are empty instrument slots or your location is blank. Your other changes have been saved.', url = url)
session.merge(thisgroup)
session.commit()
if content['submittype'] == 'autofill':
url = '/user/' + str(thisuser.logonid) + '/group/' + str(thisgroup.groupid) + '/edit/'
message = 'none'
flash(u'Autofill Completed','message')
elif content['submittype'] == 'save':
if groupid == None:
url = '/user/' + str(thisuser.logonid) + '/group/' + str(thisgroup.groupid) + '/edit/'
else:
url = 'none'
message = 'none'
else:
url = '/user/' + str(thisuser.logonid) + '/group/' + str(thisgroup.groupid) + '/'
message = 'none'
if thisgroup.status == 'Confirmed':
flash(u'Group Confirmed and Scheduled','message')
else:
flash(u'Changes Saved','success')
session.close()
return jsonify(message = message, url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/group/<groupid>/period/<periodid>/edit/', methods=['GET', 'POST', 'DELETE'])
def editgroupperiod(logonid,groupid,periodid):
return editgroup(logonid,groupid,periodid)
@app.route('/user/<logonid>/grouphistory/')
def grouphistory(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('GROUPHISTORY: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
now = datetime.datetime.now() #get the time now
groups = session.query(group.groupname, group.groupid, period.periodid, period.starttime, period.endtime, groupassignment.instrumentname, group.status, location.locationname).\
join(groupassignment).outerjoin(period).outerjoin(location).filter(groupassignment.userid == thisuser.userid, group.groupname != 'absent').order_by(period.starttime).all()
debuglog(groups)
count = playcount(session, thisuser.userid)
thisuserprimary = session.query(instrument.instrumentname).filter(instrument.userid == thisuser.userid, instrument.isprimary == 1).first().instrumentname
total = 0
number = 0
for p in session.query(instrument.userid).filter(instrument.isactive == 1, instrument.isprimary == 1, instrument.instrumentname == thisuserprimary).group_by(instrument.userid).all():
total = total + playcount(session, p.userid)
number = number + 1
average = "%.2f" % (float(total) / float(number))
debuglog('Found total number of %s players to be %s and plays by all of them totalling %s giving an average of %s' % (thisuserprimary, number, total, average))
session.close()
return render_template('grouphistory.html', \
thisuser=thisuser, \
groups = groups, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
now=now, \
playcount=count, \
average=average, \
thisuserprimary=thisuserprimary, \
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/musiclibrary/')
def musiclibrary(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('MUSICLIBRARY: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
musics = session.query(music).all()
grouptemplates = session.query(grouptemplate).filter(grouptemplate.size == 'S').all()
session.close()
return render_template('musiclibrary.html', \
thisuser=thisuser, \
musics=musics, \
grouptemplates=grouptemplates, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s.' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/musiclibrary/details/<musicid>/')
def musicdetails(logonid,musicid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('MUSICDETAILS: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
thismusic = session.query(music).filter(music.musicid == musicid).first()
if thismusic is None:
session.close()
return errorpage(thisuser,'The music you selected does not exist in the database.')
thisuserinstruments = session.query(instrument).filter(instrument.userid == thisuser.userid, instrument.isactive == 1).all()
canplay = False
for i in thisuserinstruments:
for j in getconfig('Instruments').split(","):
if i.instrumentname == j and getattr(thismusic,j) > 0:
canplay = True
break
if canplay == True:
break
grouptemplates = session.query(grouptemplate).filter(grouptemplate.size == 'S').all()
playcount = session.query(group).filter(group.musicid == thismusic.musicid).count()
grouphistory = session.query(group.groupname, group.status, group.groupid, period.starttime
).join(period, group.periodid == period.periodid
).filter(group.musicid == thismusic.musicid).all()
session.close()
return render_template('musicdetails.html', \
thisuser=thisuser, \
now=datetime.datetime.now(), \
thismusic=thismusic, \
grouptemplates=grouptemplates, \
canplay=canplay, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
playcount=playcount, \
grouphistory=grouphistory
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/musiclibrary/new/', methods=['GET', 'POST'])
def newmusic(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('NEWMUSIC: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if request.method == 'GET':
grouptemplates = session.query(grouptemplate).all()
grouptemplates_serialized = [i.serialize for i in grouptemplates]
return render_template('newmusic.html', \
thisuser=thisuser, \
grouptemplates=grouptemplates, \
grouptemplates_serialized=grouptemplates_serialized, \
instrumentlist_string=getconfig('Instruments'), \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
if request.method == 'POST':
#format the packet received from the server as JSON
content = request.json
found_non_zero = False
for i in getconfig('Instruments').split(","):
if content[i] != 0 and content[i] != '' and content[i] != '0' and content[i] is not None:
found_non_zero = True
if not found_non_zero:
session.rollback()
session.close()
return jsonify(message = 'You cannot submit music without instrumentation.', url = 'none')
thismusic = music()
debuglog('New Music: Submitted by user %s %s' % (thisuser.firstname, thisuser.lastname))
for key,value in content.items():
if (value is None or value == 'null' or value == '') and key == 'composer':
session.rollback()
session.close()
return jsonify(message = 'You must enter a composer', url = 'none')
if (value is None or value == 'null' or value == '') and key == 'name':
session.rollback()
session.close()
return jsonify(message = 'You must enter a name', url = 'none')
debuglog('New Music: setting %s to be %s' % (key,value))
setattr(thismusic,key,value)
#try to find a grouptemplate that matches this instrumentation
matchingtemplate = session.query(grouptemplate)
for i in instrumentlist:
matchingtemplate = matchingtemplate.filter(getattr(thismusic,i) == getattr(grouptemplate,i))
matchingtemplate = matchingtemplate.first()
if matchingtemplate is not None:
debuglog('New Music: Found a template matching this music: %s' % matchingtemplate.grouptemplatename)
thismusic.grouptemplateid = matchingtemplate.grouptemplateid
session.add(thismusic)
session.commit()
debuglog('New Music: Successfully created')
url = ('/user/' + str(thisuser.logonid) + '/musiclibrary/')
session.close()
flash(u'New Music Accepted', 'success')
return jsonify(message = 'none', url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#Handles the group request page. If a user visits the page, it gives them a form to create a new group request. Pressing submit
#sends a post containing configuration data. Their group request is queued until an adminsitrator approves it and assigns it to
#a period.
@app.route('/user/<logonid>/grouprequest/', methods=['GET', 'POST'])
def grouprequest(logonid,periodid=None,musicid=None):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('GROUPREQUEST: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
today = datetime.datetime.combine(datetime.date.today(), datetime.time.min) #get today's date
intwodays = today + datetime.timedelta(days=2)
now = datetime.datetime.now() #get the time now
#if this camper is inactive, has not arrived at camp yet, or is departing before the end of tomorrow
if (thisuser.isactive != 1) and periodid is None:
session.close()
return errorpage(thisuser,'Your account is currently set to inactive. Inactive users cannot request groups. Is this is a mistake, navigate to your settings and set yourself to active.')
if (thisuser.departure < intwodays) and periodid is None:
session.close()
return errorpage(thisuser,"You are set to depart camp in less than one days' time, so you cannot request a group. If this is incorrect, you can change your departure time in your settings.")
#find the instruments this user plays
thisuserinstruments = session.query(instrument).filter(instrument.userid == thisuser.userid, instrument.isactive == 1).all()
thisuserinstruments_serialized = [i.serialize for i in thisuserinstruments]
#check if this user is really a conductor and actually requested a conductorpage for a specific period
if thisuser.isconductor == 1 and periodid is not None:
conductorpage = True
thisperiod = session.query(period).filter(period.periodid == periodid).first()
if thisperiod is None:
return ('Did not find period in database. Something has gone wrong.')
elif thisuserinstruments is None:
session.close()
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
conductorpage = False
thisperiod = None
#if this user isn't a conductor and/or they didn't request the conductor page and they've already surpassed their group-per-day limit, deny them.
if conductorpage == False:
if thisuser.grouprequestcount == 0 or thisuser.grouprequestcount == None or thisuser.grouprequestcount == '':
thisuser.grouprequestcount = 0
debuglog('GROUPREQUEST: User has requested %s groups' % thisuser.grouprequestcount)
debuglog('GROUPREQUEST: Maximum allowance is %s plus an extra %s per day' % (getconfig('BonusGroupRequests'), getconfig('DailyGroupRequestLimit')))
debuglog('GROUPREQUEST: User arrived at camp at %s.' % thisuser.arrival)
if (now - thisuser.arrival).days < 0:
allowedRequests = float(getconfig('BonusGroupRequests'))
else:
allowedRequests = float((now - thisuser.arrival).days + 2) * float(getconfig('DailyGroupRequestLimit')) + float(getconfig('BonusGroupRequests'))
debuglog('GROUPREQUEST: User is allowed total %s requests.' % (allowedRequests))
if thisuser.grouprequestcount >= allowedRequests:
debuglog('GROUPREQUEST: This user is denied access to request another group.')
session.close()
return errorpage(thisuser,"You have requested %s groups throughout the camp, and you're allowed %s per day (as well as %s welcome bonus requests!). You've reached your limit for today. Come back tomorrow!" % \
(thisuser.grouprequestcount, getconfig('DailyGroupRequestLimit'), getconfig('BonusGroupRequests')))
#The below runs when a user visits the grouprequest page
if request.method == 'GET':
#if this is the conductorpage, the user will need a list of the locations that are not being used in the period selected
if conductorpage == True:
locations_used_query = session.query(location.locationid).join(group).join(period).filter(period.periodid == periodid)
locations = session.query(location).filter(~location.locationid.in_(locations_used_query)).all()
musics_used_query = session.query(music.musicid).join(group).join(period).filter(period.periodid == periodid)
musics = session.query(music).filter(~music.musicid.in_(musics_used_query)).all()
else:
locations = None
musics = session.query(music).filter(or_(*[(getattr(music,getattr(i,'instrumentname')) > 0) for i in session.query(instrument.instrumentname).filter(instrument.userid == thisuser.userid, instrument.isactive == 1)])).all()
musics_serialized = [i.serialize for i in musics]
#checks if the requested music exists and sets it up for the page
if musicid is not None:
requestedmusic = session.query(music).filter(music.musicid == musicid).first()
if requestedmusic is None:
return errorpage(thisuser,"You have requested music that could not be found in the database. Talk to the administrator." % (thisuser.grouprequestcount, getconfig('DailyGroupRequestLimit'), (now - datetime.datetime.strptime(getconfig('StartTime'), '%Y-%m-%d %H:%M')).days))
else:
requestedmusic = None
if conductorpage == True:
#Finds all players who aren't already playing in this period
playersPlayingInPeriod = session.query(user.userid).join(groupassignment).join(group).filter(group.periodid == periodid)
playersdump = session.query(user.userid,user.firstname,user.lastname,instrument.instrumentname,instrument.level,instrument.isprimary).\
join(instrument).filter(~user.userid.in_(playersPlayingInPeriod), user.isactive == 1, user.arrival <= thisperiod.starttime, user.departure >= thisperiod.endtime).all()
else:
#find all the instruments that everyone plays and serialize them to prepare to inject into the javascript
playersdump = session.query(user.userid,user.firstname,user.lastname,instrument.instrumentname,instrument.level,instrument.isprimary).\
join(instrument).filter(user.userid != thisuser.userid, user.isactive == 1, instrument.isactive == 1).all()
playersdump_serialized = []
for p in playersdump:
playersdump_serialized.append({'userid': p.userid, 'firstname': p.firstname, 'lastname': p.lastname,
'instrumentname': p.instrumentname, 'level': p.level, 'isprimary': p.isprimary})
#find all group templates and serialize them to prepare to inject into the javascript
allgrouptemplates = session.query(grouptemplate).filter(grouptemplate.size == 'S').all()
#if we are not on the conductorpage, filter the group templates so the user only sees templates that are covered by their instruments
if conductorpage == False:
grouptemplates = []
for t in allgrouptemplates:
found = False
for i in thisuserinstruments:
if getattr(t, i.instrumentname) > 0 and found == False:
grouptemplates.append(t)
found = True
#if we are on the conductorpage, show the user all the grouptemplates
else:
grouptemplates = allgrouptemplates
#serialize the grouptemplates so the JS can read them properly
grouptemplates_serialized = [i.serialize for i in grouptemplates]
session.close()
return render_template('grouprequest.html', \
thisuser=thisuser, \
thisuserinstruments=thisuserinstruments, \
thisuserinstruments_serialized=thisuserinstruments_serialized, \
playerlimit = int(getconfig('GroupRequestPlayerLimit')), \
grouptemplates = grouptemplates, \
grouptemplates_serialized=grouptemplates_serialized, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
instrumentlist_string=getconfig('Instruments'), \
playersdump_serialized=playersdump_serialized, \
conductorpage=conductorpage, \
thisperiod=thisperiod, \
locations=locations, \
musics=musics, \
musics_serialized=musics_serialized, \
requestedmusic=requestedmusic, \
)
#The below runs when a user presses "Submit" on the grouprequest page. It creates a group object with the configuraiton selected by
#the user, and creates groupassignments for all players they selected (and the user themselves)
if request.method == 'POST':
instrumentlist = getconfig('Instruments').split(",")
#format the packet received from the server as JSON
content = request.json
session = Session()
log('GROUPREQUEST: Grouprequest received. Whole content of JSON returned is: %s' % content)
#if we received too many players, send the user an error
if (content['musicid'] == '' or content['musicid'] is None or content['musicid'] == 'null') and (len(content['objects']) > int(getconfig('GroupRequestPlayerLimit'))) and conductorpage == False:
session.rollback()
session.close()
return jsonify(message = 'You have entered too many players. You may only submit grouprequests of players %s or less.' % getconfig('GroupRequestPlayerLimit'), url = 'none')
if len(content['objects']) == 0:
session.rollback()
session.close()
return jsonify(message = 'You must have at least one player in the group', url = 'none')
#establish the 'grouprequest' group object. This will be built up from the JSON packet, and then added to the database
#a minimumlevel and maximumlevel of 0 indicates that they will be automatically be picked on group confirmation
grouprequest = group(ismusical = 1, requesteduserid = thisuser.userid, requesttime = datetime.datetime.now(), minimumlevel = 0, maximumlevel = 0)
if content['musicid'] is not None and content['musicid'] != '':
grouprequest.musicid = content['musicid']
else:
grouprequest.musicwritein = content['musicwritein']
#if the conductorpage is false, we need to set the status to queued
if conductorpage == False:
grouprequest.status = "Queued"
#if the conductorpage is true, we expect to also receive a locationid from the JSON packet, so we add it to the grouprequest, we also confirm the request
if conductorpage == True:
if content['locationid'] == '':
session.rollback()
session.close()
return jsonify(message = 'You must select a location for this group', url = 'none')
grouprequest.locationid = content['locationid']
grouprequest.status = "Queued"
#for each instrument
for i in instrumentlist:
#set a default value of 0
setattr(grouprequest,i,0)
#iterate over the objects in the request corresponding with that instrument, and increment the counter for each
for p in content['objects']:
if p['instrumentname'] == i:
#if it has a corresponding user, check that that user exists
if p['userid'] != 'null' and p['userid'] != '':
#try to find a user that matches this id
puser = session.query(user).filter(user.userid == p['userid']).first()
#if we don't find one, this grouprequset is a failiure
if puser is None:
debuglog('Input error. user %s does not exist in the database.' % p['userid'])
session.rollback()
session.close()
return jsonify(message = 'Input error. One of the sent users does not exist in the database.', url = 'none')
#if we find an inactive user, it's also a failure
elif puser.isactive != 1:
debuglog('User %s %s is inactive. Cannot accept this group request.' % (puser.firstname, puser.lastname))
session.rollback()
session.close()
return jsonify(message = 'A selected user is inactive. Cannot accept this group request.', url = 'none')
#increment the instrument counter
setattr(grouprequest,i,getattr(grouprequest,i) + 1)
debuglog('Instrument %s is value %s' % (i, getattr(grouprequest,i)))
#run the getgroupname function, which logically names the group
grouprequest.groupname = getgroupname(session,grouprequest)
#if we are on the conductorpage, assign it to the period the user submitted
if conductorpage == True:
grouprequest.periodid = thisperiod.periodid
#--------MATCHMAKING SECTION-----------
#try to find an existing group request with the same music and instrumentation configuration as the request
musicstatus = None
if (content['musicid'] != '' and content['musicid'] != 'null' and content['musicid'] != None):
debuglog('MATCHMAKING: Found that user has requested the music to be %s' % content['musicid'])
matchinggroups = session.query(group).filter(group.musicid == content['musicid'], group.ismusical == 1, group.periodid == None, group.iseveryone == 0).order_by(group.requesttime).all()
musicstatus = 'musicid'
musicvalue = content['musicid']
elif (content['musicwritein'] != '' and content['musicwritein'] != 'null' and content['musicwritein'] != None):
debuglog('MATCHMAKING: Found that user has written in %s for their music' % content['musicwritein'])
matchinggroups = session.query(group).filter(group.musicwritein == content['musicwritein'], group.iseveryone == 0, group.ismusical == 1, group.periodid == None)
for i in instrumentlist:
matchinggroups = matchinggroups.filter(getattr(grouprequest,i) == getattr(group,i))
matchinggroups = matchinggroups.order_by(group.requesttime).all()
musicstatus = 'musicwritein'
musicvalue = content['musicwritein']
else:
debuglog('MATCHMAKING: User did not specify any music in their request')
matchinggroups = session.query(group).filter(group.iseveryone == 0, group.ismusical == 1, group.periodid == None)
for i in instrumentlist:
matchinggroups = matchinggroups.filter(getattr(grouprequest,i) == getattr(group,i))
matchinggroups = matchinggroups.order_by(group.requesttime).all()
Match = False
#if we found at least one matching group
if matchinggroups is not None:
#check each group that matched the instrumentation for player slots
for m in matchinggroups:
debuglog("MATCHMAKING: Instrumentation and music match found, requested by %s at time %s" % (m.requesteduserid, m.requesttime))
#check if this group is a suitable level
groupmin = m.getminlevel(session)
groupmax = m.getmaxlevel(session)
#for each specific player in the request, check if there's a free spot in the matching group
#for each player in the group request
clash = False
instrumentsThatFit = {}
for p in content['objects']:
#if it's a named player, not a blank drop-down
if p['userid'] != 'null' and p['userid'] != '':
#find a list of players that are already assigned to this group, and play the instrument requested by the grouprequest
instrumentclash = session.query(groupassignment).filter(groupassignment.instrumentname == p['instrumentname'], groupassignment.groupid == m.groupid).all()
#if the list of players already matches the group instrumentation for this instrument, this match fails and break out
if instrumentclash is not None and instrumentclash != []:
#find the number of slots already taken up by this instrument in the group
slotsTaken = len(instrumentclash)
#find the number of slots we need to fill this request
slotsNeeded = sum(o['instrumentname'] == p['instrumentname'] for o in content['objects'])
#if these are greater than the maximum number in the group, this is a clash
if slotsTaken + slotsNeeded > getattr(m, p['instrumentname']):
debuglog('MATCHMAKING: Found group not suitable, does not have an open slot for this player.')
clash = True
break
#find out if this group's level is unsuitable for this player on this instrument and make a clash if they are
playerinstrument = session.query(instrument).filter(instrument.userid == p['userid'], instrument.instrumentname == p['instrumentname']).first()
if groupmin > playerinstrument.level or groupmax < playerinstrument.level :
debuglog('MATCHMAKING: Found group not suitable, the current players are of unsuitable level. Current min: %s, Current max: %s, this players level: %s.' % (groupmin,groupmax,playerinstrument.level))
clash = True
break
#find out if this player is already playing in the found group and make a clash if they are
playerclash = session.query(groupassignment).filter(groupassignment.userid == p['userid'], groupassignment.groupid == m.groupid).all()
if playerclash is not None and playerclash != []:
debuglog('MATCHMAKING: Found group not suitable, already has this player playing in it. Found the following group assignment: %s' % playerclash)
clash = True
break
#if we didn't have a clash while iterating over this group, we have a match! set the grouprequest group to be the old group and break out
if clash == False:
debuglog('MATCHMAKING: Match found. Adding the players in this request to the already formed group.')
grouprequest = m
#if the original group doesn't have music already assigned, we can assign it music from the user request
if musicstatus is not None:
setattr(grouprequest,musicstatus,musicvalue)
if musicstatus == 'musicid':
musicrequest = session.query(music).filter(music.musicid == content['musicid']).first()
grouprequest.addtolog('Setting group music to %s: %s (Requested by %s %s)' % (musicrequest.composer,musicrequest.musicname, thisuser.firstname, thisuser.lastname))
elif musicstatus == 'musicwritein':
grouprequest.addtolog('Setting group music to be custom value "%s" (Requested by %s %s)' % (content['musicwritein'], thisuser.firstname, thisuser.lastname))
session.merge(grouprequest)
Match = True
break
#if we didn't get a match, we need to create the grouprequest, we won't be using an old one
if Match == False:
debuglog('MATCHMAKING: No group already exists with the correct instrumentation slots. Creating a new group.')
#add the grouprequest to the database
session.add(grouprequest)
grouprequest.addtolog('Group initially created (Requested by %s %s)' % (thisuser.firstname,thisuser.lastname))
#If we have got to here, the user successfully created their group (or was matchmade). We need to increment their total.
if not conductorpage:
thisuser.grouprequestcount = thisuser.grouprequestcount + 1
debuglog('%s %s has now made %s group requests' % (thisuser.firstname, thisuser.lastname, thisuser.grouprequestcount))
#for each player object in the players array in the JSON packet
for p in content['objects']:
#if we are on the conductorpage, you cannot submit blank players. Give the user an error and take them back to their home.
if (p['userid'] == 'null' or p['userid'] == '') and conductorpage == True:
url = ('/user/' + str(thisuser.logonid) + '/')
session.rollback()
session.close()
return jsonify(message = 'You cannot have any empty player boxes in the group, because this is the conductor version of the group request page.', url = 'none')
#if the playerid is not null, we create a groupassignment for them and bind it to this group
if p['userid'] != 'null' and p['userid'] != '':
playeruser = session.query(user).filter(user.userid == p['userid']).first()
if playeruser is not None:
grouprequest.addplayer(session,playeruser,p['instrumentname'])
grouprequest.addtolog('Adding player %s %s to group with instrument %s (Requested by %s %s)' % (playeruser.firstname,playeruser.lastname,p['instrumentname'],thisuser.firstname,thisuser.lastname))
else:
url = ('/user/' + str(thisuser.logonid) + '/')
session.rollback()
session.close()
return jsonify(message = 'Could not find one of your selected players in the database. Please refresh the page and try again.', url = url)
#if none of the above are satisfied - that's ok. you're allowed to submit null playernames in the user request page, these will be
#allocated by the admin when the group is confirmed.
#create the group and the groupassinments configured above in the database
session.merge(thisuser)
session.commit()
#send the URL for the group that was just created to the user, and send them to that page
url = ('/user/' + str(thisuser.logonid) + '/group/' + str(grouprequest.groupid) + '/')
debuglog('Sending user to URL: %s' % url)
session.close()
flash(u'Request Successful', 'success')
return jsonify(message = 'none', url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/grouprequest/conductor/<periodid>/', methods=['GET', 'POST'])
def conductorgrouprequest(logonid,periodid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('CONDUCTORGROUPREQUEST: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isconductor != 1:
log('CONDUCTORGROUPREQUEST: user %s %s is not allowed to view this page' % (thisuser.firstname, thisuser.lastname))
raise Exception('You are not a conductor and cannot visit this page.')
else:
session.close()
return grouprequest(logonid,periodid,None)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/grouprequest/music/<musicid>/', methods=['GET', 'POST'])
def musicgrouprequest(logonid,musicid):
return grouprequest(logonid,None,musicid)
#This page is used by an "announcer" to edit the announcement that users see when they open their homes
@app.route('/user/<logonid>/announcement/', methods=['GET', 'POST'])
def announcementpage(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('ANNOUNCEMENT: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isannouncer != 1:
session.close()
return 'You do not have permission to view this page'
else:
#if this is a user requesting the page
if request.method == 'GET':
#get the current announcement
currentannouncement = session.query(announcement).order_by(desc(announcement.creationtime)).first()
session.close()
return render_template('announcement.html', \
currentannouncement=currentannouncement, \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
#if this is a user that just pressed submit
if request.method == 'POST':
#create a new announcement object with the submitted content, and send it
newannouncement = announcement(content = request.json['content'], creationtime = datetime.datetime.now())
session.add(newannouncement)
session.commit()
url = ('/user/' + str(thisuser.logonid) + '/')
session.close()
#send the user back to their home
flash(u'Changes Saved', 'success')
return jsonify(message = 'none', url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#This page shows the queued groups, it is only accessible by the admin
@app.route('/user/<logonid>/groupscheduler/', methods=['GET', 'POST'])
def groupscheduler(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('GROUPSCHEDULER: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if request.method == 'GET':
if thisuser.isadmin != 1:
session.close()
return 'You do not have permission to view this page'
else:
groups = session.query(*[c for c in group.__table__.c]
).add_columns(
period.periodid,
period.periodname,
period.starttime,
period.endtime,
user.firstname,
user.lastname,
music.composer,
music.musicname
).outerjoin(period
).outerjoin(user
).outerjoin(music
).filter(
group.groupname != 'absent',
group.iseveryone != 1,
or_(
period.starttime == None,
period.starttime > datetime.datetime.now()
)
).order_by(
group.status.desc(),
group.periodid.nullslast(),
group.requesttime
).all()
debuglog("GROUPSCHEDULER: Found %s queued groups to show the user" % len(groups))
#find all periods after now so the admin can choose which they want to fill with groups
periods = session.query(period).filter(period.starttime > datetime.datetime.now()).all()
session.close()
return render_template('groupscheduler.html', \
groups=groups, \
periods=periods, \
thisuser=thisuser, \
now=datetime.datetime.now(), \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
if request.method == 'POST':
log('GROUPSCHEDULER: POST received with submittype %s' % request.json['submittype'])
if request.json['submittype'] == 'reset':
thisgroup = session.query(group).filter(group.groupid == request.json['groupid']).first()
if thisgroup is None:
session.rollback()
session.close()
return jsonify(message = 'Could not find this group in the database. Refresh your page and try again.', success = 'false')
else:
thisgroup.periodid = None
thisgroup.locationid = None
session.merge(thisgroup)
session.commit()
session.close()
return jsonify(message = 'none', success = 'true')
elif request.json['submittype'] == 'fillall':
debuglog('FILLALL: Admin has initiated a fill_all action for period id:%s' % request.json['periodid'])
#first, check if the period they selected is valid and in the future
if request.json['periodid'] is None or request.json['periodid'] == '':
raise Exception('You must choose a period before requesting the fill-all.')
else:
thisperiod = getperiod(session,request.json['periodid'])
debuglog('FILLALL: Filling period name:%s id:%s' % (thisperiod.periodname,thisperiod.periodid))
if thisperiod is None:
raise Exception('Could not find the requested period, or the selected period is in the past. Refresh the page and try again.')
#iterate through the groups that already have this period assigned. If we fail to allocate on just one of these, we need to break out and inform the admin
possiblegroups = session.query(group
).outerjoin(period
).filter(
or_(
group.periodid == thisperiod.periodid,
group.periodid == None
),
group.status == "Queued"
).order_by(
group.periodid.nullslast(),
group.requesttime
).all()
debuglog('FILLALL: Found %s groups to be filled' % len(possiblegroups))
for g in possiblegroups:
#first, purge any players that have since left the camp or are marked inactive
g.purgeoldplayers(session)
#then, if this group is empty, has no allocated period and was requested by a player that is old, delete it
requesteduser = getuser(session,g.requesteduserid)
if g.periodid is None and \
g.totalallocatedplayers == 0 and \
(requesteduser.isactive != 1 or requesteduser.departure < datetime.datetime.now()):
debuglog('FILLALL: Group name:%s id:%s is an orphan group and will now be deleted' % (g.groupname,g.groupid))
g.delete(session)
#then, check if any of the players in this group are already playing in this period
elif g.checkplayerclash(session,thisperiod):
debuglog('FILLALL: Found that group %s cannot be autofilled because players already in this group are already playing in this period. Skipping this group.')
else:
debuglog('FILLALL: Attempting autofill, location allocation and confirmation for group name:%s id:%s' % (g.groupname,g.groupid))
#see if we can assign the group a location for this period
if g.locationid is None or g.locationid == '':
#get the instruments in this group to check against the location restrictions
instruments = g.instruments
#get the locations already being used in this period
locations_used_query = session.query(location.locationid).join(group).join(period).filter(period.periodid == thisperiod.periodid)
#get the location with the minimum capacity that fits this group, is currently free, and does not void any instrument restrictions
thislocation = session.query(
location
).filter(
~location.locationid.in_(locations_used_query),
location.capacity >= g.totalinstrumentation,
*[getattr(location,i) > 0 for i in instruments]
).order_by(
location.capacity
).first()
else:
thislocation = getlocation(session,g.locationid)
#if we could not find a suitable location, break out and send the user a message informing them
if thislocation is None:
debuglog('FILLALL: No suitable location exists for group with name %s and id %s. Can not autofill this group.' % (g.groupname,g.groupid))
if g.periodid is not None:
message = 'Fill-All failed at group "%s". Could not find a suitable location for this group. Try editing other groups in the period to make room, or reduce the number of players in the group.' % g.groupname
url = 'refresh'
session.commit()
session.close()
flash(u'Period Partially Filled', 'message')
return jsonify(message = message, url = url )
else:
#find how many players can be autofilled into this group
players = autofill(session,g,thisperiod)
#if the autofill didn't completely fill the group and this group already has an assigned period, we need to break out and inform the admin that they need to do this group manually
if g.totalinstrumentation != g.totalallocatedplayers + len(players) and g.periodid is not None:
message = 'Fill-All failed at group "%s". This is because the group requires %s players, but the autofill algorythm only found %s players. Select this group and fill it manually, then attempt to fill-all again.' % (g.groupname,g.totalinstrumentation,g.totalallocatedplayers+len(players))
url = 'refresh'
debuglog('FILLALL: Autofill failed to completely fill group name:%s id:%s totalinstrumentation:%s totalallocatedplayers:%s. Found only %s players to autofill. Sending an error to the admin.' % (g.groupname,g.groupid,g.totalinstrumentation,g.totalallocatedplayers,len(players)))
session.commit()
session.close()
flash(u'Period Partially Filled', 'message')
return jsonify(message = message, url = url)
#if this group doesn't have an assigned period and can't be assigned to the selected period - just skip it.
elif g.totalinstrumentation != g.totalallocatedplayers + len(players):
debuglog('FILLALL: Found that group %s cannot be autofilled with current players in the pool. Skipping this group.')
else:
#if we got here, this group is good to go
#assign the location as found in a previous query
g.locationid = thislocation.locationid
#assign the period as selected
g.periodid = thisperiod.periodid
#allocate the players
g.addplayers(session,players)
#confirm the group
g.status = "Confirmed"
session.merge(g)
session.commit()
debuglog('FILLALL: Group confirmed - Status:%s Name:%s Locationid:%s Periodid:%s TotalPlayers:%s TotalSetInstrumentation:%s' % (g.status,g.groupname,g.locationid,g.periodid,g.totalallocatedplayers,g.totalinstrumentation))
session.commit()
session.close()
debuglog('FILLALL: Fill-all operitaion successful')
flash(u'Period Sucessfully Filled', 'success')
return jsonify(message = 'none', url = 'refresh')
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#This page is for creating a public event. It comes up as an option for adminsitrators on their homes
@app.route('/user/<logonid>/publicevent/<periodid>/', methods=['GET', 'POST'])
def publiceventpage(logonid,periodid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('PUBLICEVENT: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isadmin != 1:
raise Exception(thisuser,'You do not have permission to view this page')
else:
#if the user requested the public event page
if request.method == 'GET':
#get the locations that aren't being used yet for this period
locations_used_query = session.query(location.locationid).join(group).join(period).filter(period.periodid == periodid)
locations = session.query(location).filter(~location.locationid.in_(locations_used_query)).all()
#get the period details to display to the user on the page
thisperiod = session.query(period).filter(period.periodid == periodid).first()
session.close()
return render_template('publicevent.html', \
locations=locations, \
thisuser=thisuser, \
thisperiod=thisperiod, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
#if the user pressed "submit" on the public event page
if request.method == 'POST':
if request.json['locationid'] == '' or request.json['groupname'] == '':
raise Exception('Submission failed. You must submit both an event name and location.')
event = group(periodid = periodid, iseveryone = 1, groupname = request.json['groupname'], requesteduserid = thisuser.userid,\
ismusical = 0, locationid = request.json['locationid'], status = "Confirmed", requesttime = datetime.datetime.now())
if request.json['groupdescription'] and request.json['groupdescription'] != '':
event.groupdescription = request.json['groupdescription']
session.add(event)
session.commit()
url = ('/user/' + str(thisuser.logonid) + '/group/' + str(event.groupid) + '/')
session.close()
flash(u'Event Created', 'success')
return jsonify(message = 'none', url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#this page is the full report for any given period
@app.route('/user/<logonid>/period/<periodid>/')
def periodpage(logonid,periodid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('PERIODPAGE: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
thisperiod = session.query(period).filter(period.periodid == periodid).first()
#find any public events on during this period
publicevents = session.query(
group.groupname,
group.groupid,
location.locationname,
group.groupdescription
).join(location
).filter(
group.iseveryone == 1,
group.periodid == periodid
).all()
#start with the players that are playing in groups in the period
players = session.query(
user.userid,
user.firstname,
user.lastname,
user.dietaryrequirements,
user.agecategory,
period.starttime,
period.endtime,
group.groupname,
groupassignment.instrumentname,
location.locationname,
groupassignment.groupid
).join(groupassignment, user.userid == groupassignment.userid
).join(group
).join(period
).outerjoin(location
).filter(
user.arrival <= thisperiod.starttime,
user.departure >= thisperiod.endtime,
group.periodid == thisperiod.periodid,
group.status == 'Confirmed',
group.groupname != 'absent'
).order_by(
group.groupid,
groupassignment.instrumentname
).all()
#grab just the userids of those players to be used in the next query
players_in_groups_query = session.query(user.userid).\
join(groupassignment).join(group).join(period).\
filter(user.isactive == 1, user.arrival <= thisperiod.starttime, user.departure >= thisperiod.starttime, group.periodid == thisperiod.periodid, group.status == 'Confirmed')
#find all other players to be displayed to the user
unallocatedplayers = session.query(user.userid, user.firstname, user.lastname, user.dietaryrequirements, user.agecategory,instrument.instrumentname).join(instrument).filter(~user.userid.in_(players_in_groups_query), user.isactive == 1, user.arrival <= thisperiod.starttime, user.departure >= thisperiod.starttime, instrument.isprimary == 1, instrument.isactive == 1).all()
unallocatedplayers_query = session.query(user.userid).join(instrument).filter(~user.userid.in_(players_in_groups_query), user.isactive == 1, user.arrival <= thisperiod.starttime, user.departure >= thisperiod.starttime, instrument.isprimary == 1, instrument.isactive == 1)
nonplayers = session.query(user.userid, user.firstname, user.lastname, user.dietaryrequirements,user.agecategory, sqlalchemy.sql.expression.literal_column("'Non Player'").label("instrumentname")).filter(~user.userid.in_(players_in_groups_query), ~user.userid.in_(unallocatedplayers_query), user.isactive == 1, user.arrival <= thisperiod.starttime, user.departure >= thisperiod.starttime).all()
thisperiod = session.query(period).filter(period.periodid == periodid).first()
nextperiod = session.query(period).filter(period.starttime > thisperiod.starttime).order_by(period.starttime).first()
previousperiod = session.query(period).filter(period.starttime < thisperiod.starttime).order_by(desc(period.starttime)).first()
mealstats = thisperiod.getmealstats(session)
session.close()
return render_template('periodpage.html', \
players=players, \
unallocatedplayers=unallocatedplayers, \
publicevents=publicevents, \
nonplayers=nonplayers, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=(getconfig('Instruments') + ',Non Player').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
thisuser=thisuser, \
thisperiod=thisperiod, \
mealstats=mealstats, \
previousperiod=previousperiod, \
nextperiod=nextperiod, \
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#handles the admin page
@app.route('/user/<logonid>/useradmin/')
def useradmin(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('USERADMIN: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
#check if this user is a conductor, if they are not, deny them.
if thisuser.isadmin != 1 and thisuser.logonid != getconfig('AdminUUID'):
raise Exception('You are not allowed to view this page.')
else:
#get the userids and their associated primary instruments
primaryinstruments_subquery = session.query(
instrument.userid.label('primaryinstruments_userid'),
instrument.instrumentname.label('instrumentname')
).filter(
instrument.isactive == 1,
instrument.isprimary == 1
).subquery()
#make a query that totals the nmber of times each potential user has played at camp.
playcounts_subquery = session.query(
user.userid.label('playcounts_userid'),
func.count(user.userid).label("playcount")
).group_by(
user.userid
).outerjoin(groupassignment, groupassignment.userid == user.userid
).outerjoin(group, group.groupid == groupassignment.groupid
).filter(
groupassignment.instrumentname != 'Conductor',
group.ismusical == 1,
group.periodid != None,
).subquery()
users = session.query(
user.userid,
user.logonid,
user.isactive,
user.firstname,
user.lastname,
user.isadmin,
user.isconductor,
user.isannouncer,
user.grouprequestcount,
primaryinstruments_subquery.c.instrumentname,
playcounts_subquery.c.playcount,
).outerjoin(primaryinstruments_subquery, primaryinstruments_subquery.c.primaryinstruments_userid == user.userid
).outerjoin(playcounts_subquery, playcounts_subquery.c.playcounts_userid == user.userid
).order_by(
user.firstname,
user.lastname
).all()
return render_template('useradmin.html', \
thisuser=thisuser, \
users=users, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#handles the useredit page
@app.route('/user/<logonid>/edituser/<targetuserid>/', methods=['GET', 'POST'])
def edituser(logonid, targetuserid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('EDITUSER: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isadmin != 1 and targetuserid is None:
session.close()
return errorpage(thisuser,'You do not have permission to view this page.')
if targetuserid is not None:
if targetuserid == 'self':
targetuser = session.query(user).filter(user.userid == thisuser.userid).first()
else:
targetuser = session.query(user).filter(user.userid == targetuserid).first()
if targetuser is None:
session.close()
return errorpage(thisuser,'Could not find requested user in database.')
elif thisuser.isadmin != 1 and (thisuser.userid != targetuser.userid):
session.close()
return errorpage(thisuser,'You do not have permission to view this page.')
else:
targetuser = user()
targetuserinstruments = session.query(instrument).filter(instrument.userid == targetuser.userid).all()
periods = session.query(period).order_by(period.starttime).all()
#if this is a user requesting the page
if request.method == 'GET':
session.close()
return render_template('edituser.html', \
thisuser=thisuser, \
targetuser=targetuser, \
targetuserinstruments=targetuserinstruments, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
periods=periods, \
maximumlevel=int(getconfig('MaximumLevel')), \
)
#if this is a user that just pressed submit
if request.method == 'POST':
#format the packet received from the server as JSON
content = request.json
if thisuser.isadmin == 1:
#is this user doesn't have an ID yet, they are new and we must set them up
if targetuser.userid is None:
#assign them a userid from a randomly generated uuid
targetuser.userid = str(uuid.uuid4())
targetuser.logonid = str(uuid.uuid4())
session.add(targetuser)
elif content['submittype'] == 'reset':
targetuser.logonid = str(uuid.uuid4())
session.merge(targetuser)
session.commit()
session.close()
return jsonify(message = 'User logon reset.', url = 'none', success='true')
if content['firstname'] == '' or content['firstname'] == 'null' or content['firstname'] == 'None' or \
content['lastname'] == '' or content['lastname'] == 'null' or content['lastname'] == 'None':
session.rollback()
session.close()
return jsonify(message = 'You cannot save a user without a firstname and lastname.', url = 'none')
if content['arrival'] >= content['departure']:
session.rollback()
session.close()
return jsonify(message = 'Your departure time must be after your arrival time.', url = 'none')
#add the content in the packet to this group's attributes
for key,value in content.items():
if (thisuser.isadmin != 1 and thisuser.isadmin != '1') and key != 'arrival' and key != 'departure' and key != 'isactive' and key != 'submittype' and key != 'objects':
session.rollback()
session.close()
return jsonify(message = 'Users are not allowed to edit this attribute. The page should not have given you this option.', url = 'none')
elif not isinstance(value, list) and value is not None and value != 'null' and value != '' and value != 'None' and value != 'HIDDEN':
debuglog('Setting %s to be %s' % (key, value))
setattr(targetuser,key,value)
session.merge(targetuser)
session.commit()
newinstrument = False
#for each instrument object in the receiving packet
for i in content['objects']:
if i['instrumentid'] == 'new' and thisuser.isadmin == 1:
thisinstrument = instrument(userid = targetuser.userid)
session.add(thisinstrument)
debuglog('Added new instrument for user %s' % targetuser.firstname)
newinstrument = True
elif i['instrumentid'] != 'new':
thisinstrument = session.query(instrument).filter(instrument.instrumentid == i['instrumentid']).first()
if thisinstrument is None:
session.rollback()
session.close()
return jsonify(message = 'Instrument listing not found, could not modify the listing.', url = 'none')
if thisuser.isadmin != 1 and thisinstrument.userid != thisuser.userid:
session.rollback()
session.close()
return jsonify(message = 'You cannot change an instrument listing for another user.', url = 'none')
else:
session.rollback()
session.close()
return jsonify(message = 'You have submitted illegal parameters for your instrument. No changes have been made to your instrument listings.', url = 'none')
for key,value in i.items():
if key != 'instrumentid':
if thisuser.isadmin != 1 and key != 'isactive' and key != 'isprimary':
session.rollback()
session.close()
return jsonify(message = 'You have submitted illegal parameters for your instrument. No changes have been made to your instrument listings.', url = 'none')
elif value != 'None':
setattr(thisinstrument,key,value)
session.merge(thisinstrument)
session.commit()
if content['submittype'] == 'submit':
url = '/user/' + str(thisuser.logonid) + '/'
message = 'none'
flash(u'Changes Saved', 'success')
elif content['submittype'] == 'save':
if targetuserid is None:
url = ('/user/' + str(thisuser.logonid) + '/edituser/' + str(targetuser.userid) + '/')
message = 'none'
flash(u'New User Successfully Created', 'success')
else:
if newinstrument == True:
url = 'refresh'
flash(u'New Instrument Successfully Added', 'success')
else:
url = 'none'
message = 'none'
else:
url = 'none'
message = 'Incomplete request. Request failed.'
session.close()
#send the user back to their home
return jsonify(message = message, url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/user/<logonid>/newuser/', methods=['GET', 'POST'])
def newuser(logonid):
return edituser(logonid,None)
@app.route('/user/<logonid>/settings/', methods=['GET', 'POST'])
def settings(logonid):
return edituser(logonid,'self')
#sends bulk emails to an array of users sent with the request
@app.route('/user/<logonid>/email/', methods=['POST'])
def sendlinkemail(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('SENDLINKEMAIL: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isadmin != 1:
session.close()
return errorpage(thisuser,'You do not have permission to view this page')
content = request.json
log('SENDLINKEMAIL: Content received: %s' % content)
errors = ''
success = 'true'
for u in content['objects']:
targetuser = session.query(user).filter(user.userid == u['userid']).first()
if targetuser is None:
errors = errors + ('Could not find user with id %s in database\n' % u['userid'])
elif targetuser.email is None or targetuser.email == '':
errors = errors + ('Could not find email for user %s %s\n' % (targetuser.firstname, targetuser.lastname))
else:
subject = ('Your %s Daily Schedule' % getconfig('Name'))
body = """Hi %s, welcome to %s!\n
Your homepage, containing your daily schedule, is here:\n
%s/user/%s/ \n
WARNING: DO NOT GIVE THIS LINK, OR ANY LINK ON THIS WEBSITE TO ANYONE ELSE. It is yours, and yours alone and contains your connection credentials.\n
If you're new to camp please stay after the first dinner and we'll walk you through how to use this website, including how to make your first group request.\n
If you have any questions, please reply to this email or contact us on %s.\n
Thanks!\n
%s
%s""" % (targetuser.firstname, \
getconfig('Name'), \
getconfig('Website_URL'), \
targetuser.logonid, \
getconfig('SupportEmailAddress'), \
thisuser.firstname, \
getconfig('Name')\
)
message = send_email(targetuser.email, subject, body)
if message == 'Failed to Send Email':
errors = errors + ('Failed to send email to %s %s\n' % (targetuser.firstname, targetuser.lastname))
session.close()
if errors != '':
message = 'Completed with errors:\n' + errors
success = 'false'
return jsonify(message = message, url = 'none', success = success)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#Handles the conductor instrumentation page.
@app.route('/user/<logonid>/instrumentation/<periodid>/', methods=['GET', 'POST'])
def instrumentation(logonid,periodid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('INSTRUMENTATION: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
#check if this user is a conductor, if they are not, deny them.
if thisuser.isconductor != 1:
return ('You are not allowed to view this page.')
else:
#gets the data associated with this period
thisperiod = session.query(period).filter(period.periodid == periodid).first()
if thisperiod is None:
return ('Could not find the period requested.')
#The below runs when a user visits the instrumentation page
if request.method == 'GET':
#Get a list of the locations that are not being used in the period selected
locations_used_query = session.query(location.locationid).join(group).join(period).filter(period.periodid == periodid)
locations = session.query(location).filter(~location.locationid.in_(locations_used_query)).all()
#Get a list of the available music not being used in the period selected
musics_used_query = session.query(music.musicid).join(group).join(period).filter(period.periodid == periodid)
musics = session.query(music).filter(~music.musicid.in_(musics_used_query)).all()
musics_serialized = [i.serialize for i in musics]
#get a list of conductors to fill the dropdown on the page
everyone_playing_in_periodquery = session.query(user.userid).join(groupassignment).join(group).join(period).filter(period.periodid == thisperiod.periodid)
conductors = session.query(user).join(instrument, user.userid == instrument.userid).filter(instrument.instrumentname == 'Conductor', instrument.isactive == 1).filter(~user.userid.in_(everyone_playing_in_periodquery)).all()
#get the list of instruments from the config file
instrumentlist = getconfig('Instruments').split(",")
#find all large group templates and serialize them to prepare to inject into the javascript
grouptemplates = session.query(grouptemplate).filter(grouptemplate.size == 'L').all()
grouptemplates_serialized = [i.serialize for i in grouptemplates]
session.close()
return render_template('instrumentation.html', \
thisuser=thisuser, \
grouptemplates = grouptemplates, \
conductors=conductors, \
grouptemplates_serialized=grouptemplates_serialized, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
thisperiod=thisperiod, \
locations=locations, \
maximumlevel=int(getconfig('MaximumLevel')), \
musics=musics, \
musics_serialized=musics_serialized, \
instrumentlist_string=getconfig('Instruments'), \
)
#The below runs when a user presses "Submit" on the instrumentation page. It creates a group object with the configuraiton selected by
#the user, and creates groupassignments if needed
if request.method == 'POST':
#format the packet received from the server as JSON
content = request.json
session = Session()
debuglog('Grouprequest received. Whole content of JSON returned is: %s' % content)
#establish the 'grouprequest' group object. This will be built up from the JSON packet, and then added to the database
grouprequest = group(ismusical = 1, requesteduserid = thisuser.userid, periodid = thisperiod.periodid, status = "Queued", requesttime = datetime.datetime.now())
#check if a group already exists for this period with the same name
namecheck = session.query(group).filter(group.groupname == content['groupname'], group.periodid == thisperiod.periodid).first()
if namecheck is not None:
raise Exception('Could not create instrumentation, there is already a group named %s in this period.' % namecheck.groupname)
#for each player object in the players array in the JSON packet
for key, value in content.items():
if (key == 'groupname' or key == 'maximumlevel' or key == 'mininumlevel') and value == '':
raise Exception('Could not create instrumentation, you must enter a groupname, music, maximumlevel and mininumlevel')
if value != '' and value is not None and value != 'null' and value != 'None':
setattr(grouprequest, key, value)
else:
setattr(grouprequest, key, None)
#create the group and the groupassinments configured above in the database
session.add(grouprequest)
#if the user plays the "conductor" instrument (i.e. they are actually a conductor) assign them to this group
if content['conductoruserid'] is not None and content['conductoruserid'] != 'null' and content['conductoruserid'] != '':
userconductor = session.query(user).join(instrument, user.userid == instrument.userid).filter(user.userid == content['conductoruserid'], instrument.instrumentname == "Conductor", instrument.isactive == 1, user.isactive == 1, user.arrival <= thisperiod.starttime, user.departure >= thisperiod.endtime).first()
if userconductor is not None:
userplayinginperiod = session.query(user.userid).join(groupassignment).join(group).join(period).filter(period.periodid == thisperiod.periodid, user.userid == content['conductoruserid']).first()
if userplayinginperiod is None:
grouprequest.addplayer(session,getuser(session,content['conductoruserid']),'Conductor')
else:
raise Exception('Could not create instrumentation, %s is already assigned to a group during this period.' % userconductor.firstname)
else:
raise Exception('The user requested as the conductor is not set up to be a conductor in the database.')
session.commit()
#send the URL for the group that was just created to the user, and send them to that page
url = ('/user/' + str(thisuser.logonid) + '/group/' + str(grouprequest.groupid) + '/')
log('Sending user to URL: %s' % url)
session.close()
flash(u'Instrumentation Accepted', 'success')
return jsonify(message = 'none', url = url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#Application setup page. This needs to be run at the start, just after the app has been deployed. The user uploads config files and user lists to populate the database.
@app.route('/user/<logonid>/setup/', methods=["GET", "POST"])
def setup(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('SETUP: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
#check if this user is an admin or the Administrator superuser, if they are not, deny them.
if thisuser.isadmin != 1 and thisuser.logonid != getconfig('AdminUUID'):
return errorpage(thisuser,'You are not allowed to view this page.')
else:
if request.method == 'GET':
session.close()
return render_template('setup.html', \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
if request.method == 'POST':
session.close()
# Get the name of the uploaded file
file_bytes = request.files['file']
filename = secure_filename(file_bytes.filename)
if file_bytes and allowed_file(filename):
log('SETUP: File received named %s' % filename)
file_string = file_bytes.getvalue()
file_text = file_string.decode('utf-8')
csv = StringIO(file_text)
if filename == 'config.xml':
message = dbbuild(file_text)
if filename == 'campers.csv':
message = importusers(csv)
if filename == 'musiclibrary.csv':
message = importmusic(csv)
if message == 'Success':
flash(message,'success')
else:
flash(message,'error')
return redirect(request.url)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#This page is viewable by the admin only, it lets them edit different objects in the database - grouptemplates, locations, periods, etc.
@app.route('/user/<logonid>/objecteditor/<input>/', methods=["GET","POST","DELETE"])
def objecteditor(logonid, input, objectid=None):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('OBJECTEDITOR: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
try:
if thisuser.isadmin != 1:
session.close()
return render_template('errorpage.html', \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
errormessage = 'You do not have permission to view this page.'
)
debuglog('User requested objects with type %s' % input)
if input == 'grouptemplate':
table = 'grouptemplate'
type = 'Group Template'
objects_query = session.query(grouptemplate)
elif input == 'location':
table = 'location'
type = 'Location'
objects_query = session.query(location)
elif input == 'period':
table = 'period'
type = 'Period'
objects_query = session.query(period)
elif input == 'music':
table = 'music'
type = 'Music'
objects_query = session.query(music)
elif input == 'group':
table = 'group'
type = 'Group'
objects_query = session.query(group)
else:
session.close()
return render_template('errorpage.html', \
thisuser=thisuser, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
errormessage = 'Invalid input.'
)
if request.method == 'GET':
object_dict = dict((col, getattr(objects_query.first(), col)) for col in objects_query.first().__table__.columns.keys())
objects = objects_query.all()
session.close()
return render_template('objecteditor.html', \
thisuser=thisuser, \
object_dict=object_dict, \
objects=objects, \
type=type, \
table=table, \
objectid=objectid, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress'), \
)
if request.method == 'POST':
try:
#format the packet received from the server as JSON
content = request.json
debuglog('Received packet for modifying %ss with content: %s' % (type, content))
for o in content['objects']:
if o[table + 'id'] != '' and o[table + 'id'] is not None:
if o[table + 'id'] == 'new':
debuglog('Found a new object to create')
if table == 'grouptemplate':
object = grouptemplate()
elif table == 'location':
object = location()
elif table == 'period':
object = period()
elif table == 'music':
object = music()
elif table == 'group':
object = group()
session.add(object)
else:
debuglog('Trying to find a %s object with id %s' % (table, o[table + 'id']))
object = objects_query.filter(getattr(globals()[table],(table + 'id')) == o[table + 'id']).first()
if object is None:
session.rollback()
session.close()
return jsonify(message = 'Could not find one of your requested objects. This is a malformed request packet.', url = 'none')
for key, value in o.items():
if key != table + 'id':
debuglog('Changing object %s key %s to %s' % (table, key, value))
setattr(object,key,value)
session.merge(object)
url = '/user/' + thisuser.logonid + '/objecteditor/' + table + '/'
session.commit()
session.close()
return jsonify(message = 'none', url = url)
except Exception as ex:
session.rollback()
session.close()
return jsonify(message = 'Failed to post update with exception: %s' % ex, url = url)
if request.method == 'DELETE':
try:
session.delete(objects_query.filter(getattr(globals()[table],(table + 'id')) == request.json).first())
url = '/user/' + thisuser.logonid + '/objecteditor/' + table + '/'
session.commit()
session.close()
return jsonify(message = 'none', url = url)
except Exception as ex:
session.rollback()
session.close()
return jsonify(message = 'Failed to delete object with exception %s' % ex, url = 'none')
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
#The catering page displays all periods flagged as a meal, along with numbers and dietary requirements
@app.route('/user/<logonid>/catering/')
def cateringpage(logonid):
try:
session = Session()
thisuser = getuser(session,logonid,True)
log('CATERINGPAGE: user firstname:%s lastname:%s method:%s' % (thisuser.firstname, thisuser.lastname, request.method))
except Exception as ex:
session.close()
return str(ex)
start = datetime.datetime.strptime(getconfig('StartTime').split(' ')[0], '%Y-%m-%d')
end = datetime.datetime.strptime(getconfig('EndTime').split(' ')[0], '%Y-%m-%d')
campdays = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days + 1)]
try:
days = []
for day in campdays:
thisday = {}
thisday['date'] = day
meals = []
nextday = day + timedelta(days=1)
mealperiods = session.query(period).filter(period.meal == 1, period.starttime >= day, period.starttime < nextday).all()
for thisperiod in mealperiods:
meals.append(thisperiod.getmealstats(session))
thisday['meals'] = meals
days.append(thisday)
session.close()
return render_template('cateringpage.html', \
thisuser=thisuser, \
days=days, \
campname=getconfig('Name'), favicon=getconfig('Favicon_URL'), instrumentlist=getconfig('Instruments').split(","), supportemailaddress=getconfig('SupportEmailAddress')
)
except Exception as ex:
log('Failed to execute %s for user %s %s with exception: %s.' % (request.method, thisuser.firstname, thisuser.lastname, ex))
message = ('Failed to execute %s with exception: %s. Try refreshing the page and trying again or contact camp administration.' % (request.method, ex))
session.rollback()
session.close()
if request.method == 'GET':
return errorpage(thisuser,'Failed to display page. %s' % ex)
else:
return jsonify(message = message, url = 'none')
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('css', path)
@app.route('/img/<path:path>')
def send_png(path):
return send_from_directory('img', path) |
the-stack_106_26029 | from flask_mail import Message
from flask import render_template
from . import mail
subject_prefix = 'Phil Blog'
sender_email = '[email protected]'
def mail_message(subject,template,to,**kwargs):
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email) |
the-stack_106_26031 | import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources', )
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', )
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp,
)
if (
newer_pairwise_group(dependencies, expected_objects)
!= ([], [])
):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros', )
include_dirs = build_info.get('include_dirs', )
cflags = build_info.get('cflags', )
self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
|
the-stack_106_26032 | # An Accessory for Adafruit NeoPixels attached to GPIO Pin18
# Tested using Python 3.6 Raspberry Pi
# This device uses all available services for the Homekit Lightbulb API
# Note: set your neopixels settings under the #NeoPixel constructor arguments
# Note: RPi GPIO must be PWM. Neopixels.py will warn if wrong GPIO is used
# at runtime
# Note: This Class requires the installation of rpi_ws281x lib
# Follow the instllation instructions;
# git clone https://github.com/jgarff/rpi_ws281x.git
# cd rpi_ws281x
# scons
#
# cd python
# sudo python3.6 setup.py install
# https://learn.adafruit.com/neopixels-on-raspberry-pi/software
# Apple Homekit API Call Order
# User changes light settings on iOS device
# Changing Brightness - State - Hue - Brightness
# Changing Color - Saturation - Hue
# Changing Temp/Sat - Saturation - Hue
# Changing State - State
# import logging
from neopixel import *
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_LIGHTBULB
class NeoPixelLightStrip(Accessory):
category = CATEGORY_LIGHTBULB
__accessoryState = 0 # State of the neo light On/Off
__hue = 0 # Hue Value 0 - 360 Homekit API
__saturation = 100 # Saturation Values 0 - 100 Homekit API
__brightness = 100 # Brightness value 0 - 100 Homekit API
# NeoPixel constructor arguments
LED_COUNT = 8
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 10
LED_BRIGHTNESS = 255 # Note this is for the neopixel object construct only
LED_INVERT = False
__neo_strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ,
LED_DMA, LED_INVERT, LED_BRIGHTNESS)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set our neopixel API services up using Lightbulb base
serv_light = self.add_preload_service(
'Lightbulb', chars=['On', 'Hue', 'Saturation', 'Brightness'])
# Configure our callbacks
self.char_hue = serv_light.configure_char(
'Hue', setter_callback=self.set_hue)
self.char_saturation = serv_light.configure_char(
'Saturation', setter_callback=self.set_saturation)
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_bulb)
self.char_on = serv_light.configure_char(
'Brightness', setter_callback=self.set_brightness)
# Must be called before any colors can be applied to neoPixels
self.__neo_strip.begin()
# def __setstate__(self, state):
# print("___ setstate ___")
# self.__dict__.update(state)
def set_bulb(self, value):
self.__accessoryState = value
if value == 1: # On
self.set_hue(self.__hue)
else:
self.Update_NeoPixel_With_Color(0, 0, 0) # Off
def set_hue(self, value):
# Lets only write the new RGB values if the power is on
# otherwise update the hue value only
if self.__accessoryState == 1:
self.__hue = value
rgb_tuple = self.hsv_to_rgb(
self.__hue, self.__saturation, self.__brightness)
if len(rgb_tuple) == 3:
self.Update_NeoPixel_With_Color(
rgb_tuple[0], rgb_tuple[1], rgb_tuple[2])
else:
self.__hue = value
def set_brightness(self, value):
self.__brightness = value
self.set_hue(self.__hue)
def set_saturation(self, value):
self.__saturation = value
self.set_hue(self.__hue)
def Update_NeoPixel_With_Color(self, red, green, blue):
# For some reason the neopixels I have are G-R-B
# or it could be the neopixel.py library
# Change the setPixelColor inputs for yourself below
for i in range(self.LED_COUNT):
self.__neo_strip.setPixelColor(
i, Color(int(green), int(red), int(blue)))
self.__neo_strip.show()
def stop(self):
super().stop()
def hsv_to_rgb(self, h, s, v):
# This function takes
# h - 0 - 360 Deg
# s - 0 - 100 %
# v - 0 - 100 %
hPri = h / 60
s = s / 100
v = v / 100
if s <= 0.0:
return int(0), int(0), int(0)
C = v * s # Chroma
X = C * (1 - abs(hPri % 2 - 1))
RGB_Pri = [0.0, 0.0, 0.0]
if 0 <= hPri <= 1:
RGB_Pri = [C, X, 0]
elif 1 <= hPri <= 2:
RGB_Pri = [X, C, 0]
elif 2 <= hPri <= 3:
RGB_Pri = [0, C, X]
elif 3 <= hPri <= 4:
RGB_Pri = [0, X, C]
elif 4 <= hPri <= 5:
RGB_Pri = [X, 0, C]
elif 5 <= hPri <= 6:
RGB_Pri = [C, 0, X]
else:
RGB_Pri = [0, 0, 0]
m = v - C
return int((RGB_Pri[0] + m) * 255), int((RGB_Pri[1] + m) * 255), int((RGB_Pri[2] + m) * 255)
|
the-stack_106_26034 | import numpy as np
import os.path as osp
import sys
import pyrado
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from panda3d.core import *
from pyrado.environments.sim_base import SimEnv
# Configuration for panda3d-window
confVars = """
win-size 1280 720
framebuffer-multisample 1
multisamples 2
show-frame-rate-meter 1
sync-video 0
threading-model Cull/Cull
"""
loadPrcFileData("", confVars)
class PandaVis(ShowBase):
def __init__(self, rendering: bool):
"""
Constructor
:param rendering: boolean indicating whether to use RenderPipeline or default Panda3d as visualization-module.
"""
super().__init__(self)
self.dir = Filename.fromOsSpecific(pyrado.PANDA_ASSETS_DIR).getFullpath()
# Initialize RenderPipeline
if rendering:
sys.path.insert(0, pyrado.RENDER_PIPELINE_DIR)
from rpcore import RenderPipeline
self.render_pipeline = RenderPipeline()
self.render_pipeline.pre_showbase_init()
self.render_pipeline.set_loading_screen_image(osp.join(self.dir, "logo.png"))
self.render_pipeline.settings["pipeline.display_debugger"] = False
self.render_pipeline.create(self)
self.render_pipeline.daytime_mgr.time = "17:00"
# Activate antialiasing
self.render.setAntialias(AntialiasAttrib.MAuto)
# Set window properties
self.windowProperties = WindowProperties()
self.windowProperties.setForeground(True)
self.windowProperties.setTitle("Experiment")
# Set background color
self.setBackgroundColor(1, 1, 1)
# Configuration of the lighting
self.directionalLight1 = DirectionalLight("directionalLight")
self.directionalLightNP1 = self.render.attachNewNode(self.directionalLight1)
self.directionalLightNP1.setHpr(0, -8, 0)
self.render.setLight(self.directionalLightNP1)
self.directionalLight2 = DirectionalLight("directionalLight")
self.directionalLightNP2 = self.render.attachNewNode(self.directionalLight2)
self.directionalLightNP2.setHpr(180, -20, 0)
self.render.setLight(self.directionalLightNP2)
self.ambientLight = AmbientLight("ambientLight")
self.ambientLightNP = self.render.attachNewNode(self.ambientLight)
self.ambientLight.setColor((0.1, 0.1, 0.1, 1))
self.render.setLight(self.ambientLightNP)
# Create a text node displaying the physic parameters on the top left of the screen
self.text = TextNode("parameters")
self.textNodePath = aspect2d.attachNewNode(self.text)
self.text.setTextColor(0, 0, 0, 1) # black
self.textNodePath.setScale(0.07)
self.textNodePath.setPos(-1.9, 0, 0.9)
# Configure trace
self.trace = LineSegs()
self.trace.setThickness(3)
self.trace.setColor(0.8, 0.8, 0.8) # light grey
self.lines = self.render.attachNewNode("Lines")
self.last_pos = None
# Adds one instance of the update function to the task-manager, thus initializes the animation
self.taskMgr.add(self.update, "update")
def update(self, task: Task):
"""
Updates the visualization with every call.
:param task: Needed by panda3d task manager.
:return Task.cont: indicates that task should be called again next frame.
"""
return Task.cont
def reset(self):
"""
Resets the the visualization to a certain state, so that in can be run again. Removes the trace.
"""
self.lines.getChildren().detach()
self.last_pos = None
def draw_trace(self, point):
"""
Draws a line from the last point to the current point
:param point: Current position of pen. Needs 3 value vector.
"""
# Check if trace initialized
if self.last_pos:
# Set starting point of new line
self.trace.moveTo(self.last_pos)
# Draw line to that point
self.trace.drawTo(point)
# Save last position of pen
self.last_pos = point
# Show drawing
self.trace_np = NodePath(self.trace.create())
self.trace_np.reparentTo(self.lines)
class BallOnBeamVis(PandaVis):
def __init__(self, env: SimEnv, rendering: bool):
"""
Constructor
:param env: environment to visualize
:param rendering
"""
super().__init__(rendering)
# Accessing variables of environment class
self._env = env
r_ball = self._env.domain_param["r_ball"]
l_beam = self._env.domain_param["l_beam"]
d_beam = self._env.domain_param["d_beam"]
x = float(self._env.state[0]) # ball position along the beam axis [m]
a = float(self._env.state[1]) # angle [rad]
# Scaling of the animation so the camera can move smoothly
self._scale = 10 / l_beam
# Set window title
self.windowProperties.setTitle("Ball on Beam")
self.win.requestProperties(self.windowProperties)
# Set pov
self.cam.setY(-3.0 * self._scale)
# Ball
self.ball = self.loader.loadModel(osp.join(self.dir, "ball_red.egg"))
self.ball.setScale(r_ball * self._scale)
self.ball.setPos(x * self._scale, 0, (d_beam / 2.0 + r_ball) * self._scale)
self.ball.reparentTo(self.render)
# Beam
self.beam = self.loader.loadModel(osp.join(self.dir, "cube_green.egg"))
self.beam.setScale(l_beam / 2 * self._scale, d_beam * self._scale, d_beam / 2 * self._scale)
self.beam.setR(-a * 180 / np.pi)
self.beam.reparentTo(self.render)
def update(self, task: Task):
# Accessing the current parameter values
g = self._env.domain_param["g"]
m_ball = self._env.domain_param["m_ball"]
r_ball = self._env.domain_param["r_ball"]
m_beam = self._env.domain_param["m_beam"]
l_beam = self._env.domain_param["l_beam"]
d_beam = self._env.domain_param["d_beam"]
ang_offset = self._env.domain_param["ang_offset"]
c_frict = self._env.domain_param["c_frict"]
x = float(self._env.state[0]) # ball position along the beam axis [m]
a = float(self._env.state[1]) # angle [rad]
ball_pos = (
(np.cos(a) * x - np.sin(a) * (d_beam / 2.0 + r_ball)) * self._scale,
0,
(np.sin(a) * x + np.cos(a) * (d_beam / 2.0 + r_ball)) * self._scale,
)
# Update position of ball
self.ball.setPos(ball_pos)
# Draw trace
self.draw_trace(ball_pos)
# Update rotation of joint
self.beam.setR(-a * 180 / np.pi)
# Update displayed text
self.text.setText(
f"""
dt: {self._env._dt : 1.4f}
g: {g : 1.3f}
m_ball: {m_ball: 1.2f}
r_ball: {r_ball : 1.3f}
m_beam: {m_beam : 1.2f}
l_beam: {l_beam : 1.2f}
d_beam: {d_beam : 1.2f}
c_frict: {c_frict : 1.3f}
ang_offset: {ang_offset : 1.3f}
"""
)
return Task.cont
class OneMassOscillatorVis(PandaVis):
def __init__(self, env: SimEnv, rendering: bool):
"""
Constructor
:param env: environment to visualize
"""
super().__init__(rendering)
# Accessing variables of environment class
self._env = env
c = 0.1 * self._env.obs_space.bound_up[0]
# Scaling of the animation so the camera can move smoothly
self._scale = 5 / c
# Set window title
self.windowProperties.setTitle("One Mass Oscillator")
self.win.requestProperties(self.windowProperties)
# Set pov
self.cam.setY(-5 * self._scale)
# Ground
self.ground = self.loader.loadModel(osp.join(self.dir, "cube_green.egg"))
self.ground.setPos(0, 0, -0.02 * self._scale)
self.ground.setScale(self._env.obs_space.bound_up[0] * self._scale, 1.5 * c * self._scale, 0.01 * self._scale)
self.ground.reparentTo(self.render)
# Object
self.mass = self.loader.loadModel(osp.join(self.dir, "cube_blue.egg"))
self.mass.setPos(self._env.state[0] * self._scale, 0, c / 2.0 * self._scale)
self.mass.setScale(c * 0.5 * self._scale, c * 0.5 * self._scale, c * 0.5 * self._scale)
self.mass.reparentTo(self.render)
# Desired state
self.des = self.loader.loadModel(osp.join(self.dir, "cube_green.egg"))
self.des.setPos(self._env._task.state_des[0] * self._scale, 0, 0.4 * c * self._scale)
self.des.setScale(0.4 * c * self._scale, 0.4 * c * self._scale, 0.4 * c * self._scale)
self.des.setTransparency(1)
self.des.setColorScale(1, 0, 0, 0.5)
self.des.reparentTo(self.render)
# Force
self.force = self.loader.loadModel(osp.join(self.dir, "arrow_red.egg"))
self.force.setPos(self._env.state[0] * self._scale, 0, c / 2.0 * self._scale)
self.force.setScale(
0.1 * self._env._curr_act / 10.0 * self._scale, 0.1 * c * self._scale, 0.1 * c * self._scale
)
self.force.reparentTo(self.render)
# Spring
self.spring = self.loader.loadModel(osp.join(self.dir, "spring_orange.egg"))
self.spring.setPos(0, 0, c / 2.0 * self._scale)
self.spring.setScale(
(self._env.state[0] - c / 2.0) / 7.3 * self._scale, c / 6.0 * self._scale, c / 6.0 * self._scale
)
self.spring.reparentTo(self.render)
def update(self, task: Task):
# Accessing the current parameter values
m = self._env.domain_param["m"]
k = self._env.domain_param["k"]
d = self._env.domain_param["d"]
c = 0.1 * self._env.obs_space.bound_up[0]
# Update position of mass
pos_mass = (self._env.state[0] * self._scale, 0, c / 2.0 * self._scale)
self.mass.setPos(pos_mass)
# And force
self.force.setPos(self._env.state[0] * self._scale, 0, c / 2.0 * self._scale)
# Update scale of force
capped_act = np.sign(self._env._curr_act) * max(0.1 * np.abs(self._env._curr_act), 0.3)
if capped_act == 0:
self.force.setSx(0.00001) # has_mat error if scale = 0
else:
self.force.setSx(capped_act / 10.0 * self._scale)
# Update scale of spring
self.spring.setSx((self._env.state[0] - c / 2.0) / 7.3 * self._scale)
# Update displayed text
self.text.setText(
f"""
mass_x: {np.round(self.mass.getX(), 3)}
spring_Sx: {np.round(self.spring.getSx(), 3)}
dt: {self._env.dt :1.4f}
m: {m : 1.3f}
k: {k : 2.2f}
d: {d : 1.3f}
"""
)
return Task.cont
class PendulumVis(PandaVis):
def __init__(self, env: SimEnv, rendering: bool):
"""
Constructor
:param env: environment to visualize
"""
super().__init__(rendering)
# Accessing variables of environment class
self._env = env
th, _ = self._env.state
l_pole = float(self._env.domain_param["l_pole"])
r_pole = 0.05
# Scaling of the animation so the camera can move smoothly
self._scale = 10 / l_pole
# Set window title
self.windowProperties.setTitle("Pendulum")
self.win.requestProperties(self.windowProperties)
# Set pov
self.cam.setY(-20 * self._scale)
# Joint
self.joint = self.loader.loadModel(osp.join(self.dir, "ball_grey.egg"))
self.joint.setPos(0, r_pole * self._scale, 0)
self.joint.setScale(r_pole * self._scale, r_pole * self._scale, r_pole * self._scale)
self.joint.reparentTo(self.render)
# Pole
self.pole = self.loader.loadModel(osp.join(self.dir, "cylinder_top_red.egg"))
self.pole.setPos(0, r_pole * self._scale, 0)
self.pole.setScale(r_pole * self._scale, r_pole * self._scale, 2 * l_pole * self._scale)
self.pole.reparentTo(self.render)
def update(self, task: Task):
# Accessing the current parameter values
th, _ = self._env.state
g = self._env.domain_param["g"]
m_pole = self._env.domain_param["m_pole"]
l_pole = float(self._env.domain_param["l_pole"])
d_pole = self._env.domain_param["d_pole"]
tau_max = self._env.domain_param["tau_max"]
# Update position and rotation of pole
self.pole.setR(-th * 180 / np.pi)
# Get position of pole
pole_pos = self.pole.getPos(self.render)
# Calculate position of new point
current_pos = (
pole_pos[0] + 4 * l_pole * np.sin(th) * self._scale,
pole_pos[1],
pole_pos[2] - 4 * l_pole * np.cos(th) * self._scale,
)
# Update displayed text
self.text.setText(
f"""
dt: {self._env._dt :1.4f}
theta: {self._env.state[0] * 180 / np.pi : 2.3f}
sin theta: {np.sin(self._env.state[0]) : 1.3f}
cos theta: {np.cos(self._env.state[0]) : 1.3f}
theta_dot: {self._env.state[1] * 180 / np.pi : 2.3f}
tau: {self._env._curr_act[0] : 1.3f}
g: {g : 1.3f}
m_pole: {m_pole : 1.3f}
l_pole: {l_pole : 1.3f}
d_pole: {d_pole : 1.3f}
tau_max: {tau_max: 1.3f}
"""
)
return Task.cont
class QBallBalancerVis(PandaVis):
def __init__(self, env: SimEnv, rendering: bool):
"""
Constructor
:param env: environment to visualize
"""
super().__init__(rendering)
# Accessing variables of environment class
self._env = env
l_plate = self._env.domain_param["l_plate"]
r_ball = self._env.domain_param["r_ball"]
# Only for animation
d_plate = 0.01
r_pole = 0.005
l_pole = 0.02
# Scaling of the animation so the camera can move smoothly
self._scale = 1 / r_pole
# Set window title
self.windowProperties.setTitle("Quanser Ball Balancer")
self.win.requestProperties(self.windowProperties)
# Set pov
self.cam.setY(-1.3 * self._scale)
# Ball
self.ball = self.loader.loadModel(osp.join(self.dir, "ball_red.egg"))
self.ball.setPos(
self._env.state[2] * self._scale, self._env.state[3] * self._scale, (r_ball + d_plate / 2.0) * self._scale
)
self.ball.setScale(r_ball * self._scale)
self.ball.reparentTo(self.render)
# Plate
self.plate = self.loader.loadModel(osp.join(self.dir, "cube_blue.egg"))
self.plate.setScale(l_plate * 0.5 * self._scale, l_plate * 0.5 * self._scale, d_plate * 0.5 * self._scale)
self.plate.reparentTo(self.render)
# Joint
self.joint = self.loader.loadModel(osp.join(self.dir, "ball_grey.egg"))
self.joint.setPos(0, 0, -d_plate * self._scale)
self.joint.setScale(r_pole * self._scale, r_pole * self._scale, r_pole * self._scale)
self.joint.reparentTo(self.render)
# Pole
self.pole = self.loader.loadModel(osp.join(self.dir, "cylinder_top_grey.egg"))
self.pole.setPos(0, 0, -d_plate * self._scale)
self.pole.setScale(r_pole * self._scale, r_pole * self._scale, l_pole * self._scale)
self.pole.reparentTo(self.render)
# Null_plate
self.null_plate = self.loader.loadModel(osp.join(self.dir, "cube_grey.egg"))
self.null_plate.setPos(0, 0, -2.5 * l_pole * self._scale)
self.null_plate.setScale(
l_plate * 1.5 * 0.5 * self._scale, l_plate * 1.5 * 0.5 * self._scale, d_plate / 20.0 * self._scale
)
self.null_plate.reparentTo(self.render)
def update(self, task: Task):
# Accessing the current parameter values
g = self._env.domain_param["g"]
l_plate = self._env.domain_param["l_plate"]
m_ball = self._env.domain_param["m_ball"]
r_ball = self._env.domain_param["r_ball"]
eta_g = self._env.domain_param["eta_g"]
eta_m = self._env.domain_param["eta_m"]
K_g = self._env.domain_param["K_g"]
J_m = self._env.domain_param["J_m"]
J_l = self._env.domain_param["J_l"]
r_arm = self._env.domain_param["r_arm"]
k_m = self._env.domain_param["k_m"]
R_m = self._env.domain_param["R_m"]
B_eq = self._env.domain_param["B_eq"]
c_frict = self._env.domain_param["c_frict"]
V_thold_x_neg = self._env.domain_param["V_thold_x_neg"]
V_thold_x_pos = self._env.domain_param["V_thold_x_pos"]
V_thold_y_neg = self._env.domain_param["V_thold_y_neg"]
V_thold_y_pos = self._env.domain_param["V_thold_y_pos"]
offset_th_x = self._env.domain_param["offset_th_x"]
offset_th_y = self._env.domain_param["offset_th_y"]
d_plate = 0.01 # only for animation
# Get ball position
x = self._env.state[2]
y = self._env.state[3]
# Compute plate orientation
a_vp = -self._env.plate_angs[0]
b_vp = self._env.plate_angs[1]
# Update rotation of plate
self.plate.setR(-a_vp * 180 / np.pi)
self.plate.setP(b_vp * 180 / np.pi)
# Update position of ball
ball_pos = (
x * np.cos(a_vp) * self._scale,
y * np.cos(b_vp) * self._scale,
(r_ball + x * np.sin(a_vp) + y * np.sin(b_vp) + np.cos(a_vp) * d_plate / 2.0) * self._scale,
)
self.ball.setPos(ball_pos)
# Draw line to that point
self.draw_trace(ball_pos)
# Update displayed text
self.text.setText(
f"""
x-axis is pos to the right, y-axis is pos up
Commanded voltage: x servo : {self._env._curr_act[0] : 1.2f}, y servo : {self._env._curr_act[1] : 1.2f}
Plate angle around x axis: {self._env.plate_angs[1] * 180 / np.pi : 2.2f}
Plate angle around y axis: {self._env.plate_angs[0] * 180 / np.pi : 2.2f}
Shaft angles: {self._env.state[0] * 180 / np.pi : 2.2f}, {self._env.state[1] * 180 / np.pi : 2.2f}
Ball position: {x : 1.3f}, {y : 1.3f}
g: {g : 1.3f}
m_ball: {m_ball : 1.3f}
r_ball: {r_ball : 1.3f}
r_arm: {r_arm : 1.3f}
l_plate: {l_plate : 1.3f}
K_g: {K_g : 2.2f}
J_m: {J_m : 1.7f}
J_l: {J_l : 1.6f}
eta_g: {eta_g : 1.3f}
eta_m: {eta_m : 1.3f}
k_mt: {k_m : 1.3f}
R_m: {R_m : 1.3f}
B_eq: {B_eq : 1.3f}
c_frict: {c_frict : 1.3f}
V_thold_x_pos: {V_thold_x_pos : 2.3f}
V_thold_x_neg: {V_thold_x_neg : 2.3f}
V_thold_y_pos: {V_thold_y_pos : 2.3f}
V_thold_y_neg: {V_thold_y_neg : 2.3f}
offset_th_x: {offset_th_x : 2.3f}
offset_th_y: {offset_th_y : 2.3f}
"""
)
return Task.cont
class QCartPoleVis(PandaVis):
"""
Visualization for QCartPoleSim
"""
def __init__(self, env: SimEnv, rendering: bool):
"""
Constructor
:param env: environment to visualize
"""
super().__init__(rendering)
# Accessing variables of environment class
self._env = env
x, th, _, _ = self._env.state
l_pole = float(self._env.domain_param["l_pole"])
l_rail = float(self._env.domain_param["l_rail"])
# Only for animation
l_cart, h_cart = 0.08, 0.08
r_pole, r_rail = 0.01, 0.005
# Scaling of the animation so the camera can move smoothly
self._scale = 10 / l_pole
# Set window title
self.windowProperties.setTitle("Quanser Cartpole")
self.win.requestProperties(self.windowProperties)
# Set pov
self.cam.setY(-5 * self._scale)
# Rail
self.rail = self.loader.loadModel(osp.join(self.dir, "cylinder_middle_grey.egg"))
self.rail.setPos(0, 0, (-h_cart - r_rail) * self._scale)
self.rail.setScale(r_rail * self._scale, r_rail * self._scale, l_rail * self._scale)
self.rail.reparentTo(self.render)
self.rail.setR(90)
# Cart
self.cart = self.loader.loadModel(osp.join(self.dir, "cube_green.egg"))
self.cart.setX(x * self._scale)
self.cart.setScale(l_cart * self._scale, h_cart / 2 * self._scale, h_cart * self._scale)
self.cart.reparentTo(self.render)
# Joint
self.joint = self.loader.loadModel(osp.join(self.dir, "ball_grey.egg"))
self.joint.setPos(x * self._scale, (-r_pole - h_cart / 2) * self._scale, 0)
self.joint.setScale(r_pole * self._scale)
self.joint.reparentTo(self.render)
# Pole
self.pole = self.loader.loadModel(osp.join(self.dir, "cylinder_top_red.egg"))
self.pole.setPos(x * self._scale, (-r_pole - h_cart / 2) * self._scale, 0)
self.pole.setScale(r_pole * self._scale, r_pole * self._scale, 2 * l_pole * self._scale)
self.pole.reparentTo(self.render)
def update(self, task: Task):
# Accessing the current parameter values
x, th, _, _ = self._env.state
g = self._env.domain_param["g"]
m_cart = self._env.domain_param["m_cart"]
m_pole = self._env.domain_param["m_pole"]
l_pole = float(self._env.domain_param["l_pole"])
l_rail = float(self._env.domain_param["l_rail"])
eta_m = self._env.domain_param["eta_m"]
eta_g = self._env.domain_param["eta_g"]
K_g = self._env.domain_param["K_g"]
J_m = self._env.domain_param["J_m"]
R_m = self._env.domain_param["R_m"]
k_m = self._env.domain_param["k_m"]
r_mp = self._env.domain_param["r_mp"]
B_eq = self._env.domain_param["B_eq"]
B_pole = self._env.domain_param["B_pole"]
# Update position of Cart, Joint and Pole
self.cart.setX(x * self._scale)
self.joint.setX(x * self._scale)
self.pole.setX(x * self._scale)
# Update rotation of Pole
self.pole.setR(-th * 180 / np.pi)
# Get position of pole
pole_pos = self.pole.getPos(self.render)
# Calculate position of new point
current_pos = (
pole_pos[0] + 4 * l_pole * np.sin(th) * self._scale,
pole_pos[1],
pole_pos[2] - 4 * l_pole * np.cos(th) * self._scale,
)
# Draw line to that point
self.draw_trace(current_pos)
# Update displayed text
self.text.setText(
f"""
theta: {self._env.state[1] * 180 / np.pi : 2.3f}
dt: {self._env._dt :1.4f}
g: {g : 1.3f}
m_cart: {m_cart : 1.4f}
l_rail: {l_rail : 1.3f}
l_pole: {l_pole : 1.3f}
eta_m: {eta_m : 1.3f}
eta_g: {eta_g : 1.3f}
K_g: {K_g : 1.3f}
J_m: {J_m : 1.8f}
r_mp: {r_mp : 1.4f}
R_m: {R_m : 1.3f}
k_m: {k_m : 1.6f}
B_eq: {B_eq : 1.2f}
B_pole: {B_pole : 1.3f}
m_pole: {m_pole : 1.3f}
"""
)
return Task.cont
class QQubeVis(PandaVis):
def __init__(self, env: SimEnv, rendering: bool):
"""
Constructor
:param env: environment to visualize
"""
super().__init__(rendering)
# Accessing variables of environment class
self._env = env
Lr = self._env.domain_param["Lr"]
Lp = self._env.domain_param["Lp"]
# Only for animation
arm_radius = 0.003
pole_radius = 0.0045
# Scaling of the animation so the camera can move smoothly
self._scale = 20 / Lp
# Set window title
self.windowProperties.setTitle("Quanser Qube")
self.win.requestProperties(self.windowProperties)
# Set pov
self.cam.setPos(-0.4 * self._scale, -1.3 * self._scale, 0.4 * self._scale)
self.cam.setHpr(-20, -10, 0)
# Box
self.box = self.loader.loadModel(osp.join(self.dir, "cube_green.egg"))
self.box.setPos(0, 0.07 * self._scale, 0)
self.box.setScale(0.09 * self._scale, 0.1 * self._scale, 0.09 * self._scale)
self.box.reparentTo(self.render)
# Cylinder
self.cylinder = self.loader.loadModel(osp.join(self.dir, "cylinder_middle_grey.egg"))
self.cylinder.setScale(0.005 * self._scale, 0.005 * self._scale, 0.03 * self._scale)
self.cylinder.setPos(0, 0.07 * self._scale, 0.12 * self._scale)
self.cylinder.reparentTo(self.render)
# Joint 1
self.joint1 = self.loader.loadModel(osp.join(self.dir, "ball_grey.egg"))
self.joint1.setScale(0.005 * self._scale)
self.joint1.setPos(0.0, 0.07 * self._scale, 0.15 * self._scale)
self.joint1.reparentTo(self.render)
# Arm
self.arm = self.loader.loadModel(osp.join(self.dir, "cylinder_top_blue.egg"))
self.arm.setScale(arm_radius * self._scale, arm_radius * self._scale, Lr * self._scale)
self.arm.setP(90)
self.arm.setPos(0, 0.07 * self._scale, 0.15 * self._scale)
self.arm.reparentTo(self.render)
# Joint 2
self.joint2 = self.loader.loadModel(osp.join(self.dir, "ball_grey.egg"))
self.joint2.setScale(pole_radius * self._scale)
self.joint2.setPos(0.0, (0.07 + 2 * Lr) * self._scale, 0.15 * self._scale)
self.joint2.wrtReparentTo(self.arm)
# Pole
self.pole = self.loader.loadModel(osp.join(self.dir, "cylinder_bottom_red.egg"))
self.pole.setScale(pole_radius * self._scale, pole_radius * self._scale, Lp * self._scale)
self.pole.setPos(0, (0.07 + 2 * Lr) * self._scale, 0.15 * self._scale)
self.pole.wrtReparentTo(self.arm)
def update(self, task: Task):
# Accessing the current parameter values
g = self._env.domain_param["g"]
Mr = self._env.domain_param["Mr"]
Mp = self._env.domain_param["Mp"]
Lr = float(self._env.domain_param["Lr"])
Lp = float(self._env.domain_param["Lp"])
km = self._env.domain_param["km"]
Rm = self._env.domain_param["Rm"]
Dr = self._env.domain_param["Dr"]
Dp = self._env.domain_param["Dp"]
th, al, _, _ = self._env.state
# Update rotation of arm
self.arm.setH(th * 180 / np.pi - 180)
# Update rotation of pole
self.pole.setR(al * 180 / np.pi)
# Get position of pole
pole_pos = self.pole.getPos(self.render)
# Calculate position of new point
current_pos = (
pole_pos[0] + 2 * Lp * np.sin(al) * np.cos(th) * self._scale,
pole_pos[1] + 2 * Lp * np.sin(al) * np.sin(th) * self._scale,
pole_pos[2] - 2 * Lp * np.cos(al) * self._scale,
)
# Draw line to that point
self.draw_trace(current_pos)
# Update displayed text
self.text.setText(
f"""
theta: {self._env.state[0] * 180 / np.pi : 3.1f}
alpha: {self._env.state[1] * 180 / np.pi : 3.1f}
dt: {self._env._dt :1.4f}
g: {g : 1.3f}
Mr: {Mr : 1.4f}
Mp: {Mp : 1.4f}
Lr: {Lr : 1.4f}
Lp: {Lp : 1.4f}
Dr: {Dr : 1.7f}
Dp: {Dp : 1.7f}
Rm: {Rm : 1.3f}
km: {km : 1.4f}
"""
)
return Task.cont
|
the-stack_106_26035 | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
# Module for residual/skip connections
class FCResBlock(nn.Module):
def __init__(self, dim, n, nonlinearity, batch_norm=True):
"""
:param dim:
:param n:
:param nonlinearity:
"""
super(FCResBlock, self).__init__()
self.n = n
self.nonlinearity = nonlinearity
self.batch_norm = batch_norm
if self.batch_norm:
self.block = nn.ModuleList(
[nn.ModuleList([nn.Linear(dim, dim), nn.BatchNorm1d(num_features=dim)])
for _ in range(self.n)]
)
else:
self.block = nn.ModuleList([nn.Linear(dim, dim) for _ in range(self.n)])
def forward(self, x):
e = x + 0
if self.batch_norm:
for i, pair in enumerate(self.block):
fc, bn = pair
e = fc(e)
e = bn(e)
if i < (self.n - 1):
e = self.nonlinearity(e)
else:
for i, layer in enumerate(self.block):
e = layer(e)
if i < (self.n - 1):
e = self.nonlinearity(e)
return self.nonlinearity(e + x)
# Building block for convolutional encoder with same padding
class Conv2d3x3(nn.Module):
def __init__(self, in_channels, out_channels, downsample=False):
super(Conv2d3x3, self).__init__()
stride = 2 if downsample else 1
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1, stride=stride)
def forward(self, x):
return self.conv(x)
# SHARED CONVOLUTIONAL ENCODER
class SharedConvolutionalEncoder(nn.Module):
def __init__(self, nonlinearity):
super(SharedConvolutionalEncoder, self).__init__()
self.nonlinearity = nonlinearity
self.conv_layers = nn.ModuleList([
Conv2d3x3(in_channels=1, out_channels=64),
Conv2d3x3(in_channels=64, out_channels=64),
Conv2d3x3(in_channels=64, out_channels=64, downsample=True),
# shape is now (-1, 64, 14 , 14)
Conv2d3x3(in_channels=64, out_channels=128),
Conv2d3x3(in_channels=128, out_channels=128),
Conv2d3x3(in_channels=128, out_channels=128, downsample=True),
# shape is now (-1, 128, 7, 7)
Conv2d3x3(in_channels=128, out_channels=256),
Conv2d3x3(in_channels=256, out_channels=256),
Conv2d3x3(in_channels=256, out_channels=256, downsample=True)
# shape is now (-1, 256, 4, 4)
])
self.bn_layers = nn.ModuleList([
nn.BatchNorm2d(num_features=64),
nn.BatchNorm2d(num_features=64),
nn.BatchNorm2d(num_features=64),
nn.BatchNorm2d(num_features=128),
nn.BatchNorm2d(num_features=128),
nn.BatchNorm2d(num_features=128),
nn.BatchNorm2d(num_features=256),
nn.BatchNorm2d(num_features=256),
nn.BatchNorm2d(num_features=256),
])
def forward(self, x):
h = x.view(-1, 1, 28, 28)
for conv, bn in zip(self.conv_layers, self.bn_layers):
h = conv(h)
h = bn(h)
h = self.nonlinearity(h)
return h
# PRE-POOLING FOR STATISTIC NETWORK
class PrePool(nn.Module):
"""
"""
def __init__(self, batch_size, n_features, n_hidden, hidden_dim, nonlinearity):
super(PrePool, self).__init__()
self.batch_size = batch_size
self.n_features = n_features
self.n_hidden = n_hidden
self.hidden_dim = hidden_dim
self.nonlinearity = nonlinearity
# modules
self.fc = nn.Linear(self.n_features, self.hidden_dim)
self.bn = nn.BatchNorm1d(self.hidden_dim)
def forward(self, h):
# reshape and affine
e = h.view(-1, self.n_features)
e = self.fc(e)
e = self.bn(e)
e = self.nonlinearity(e)
return e
# POST POOLING FOR STATISTIC NETWORK
class PostPool(nn.Module):
"""
"""
def __init__(self, n_hidden, hidden_dim, c_dim, nonlinearity):
super(PostPool, self).__init__()
self.n_hidden = n_hidden
self.hidden_dim = hidden_dim + 1
self.c_dim = c_dim
self.nonlinearity = nonlinearity
# modules
self.fc_layers = nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim),
nn.Linear(self.hidden_dim, self.hidden_dim)])
self.bn_layers = nn.ModuleList([nn.BatchNorm1d(self.hidden_dim),
nn.BatchNorm1d(self.hidden_dim)])
self.fc_params = nn.Linear(self.hidden_dim, 2 * self.c_dim)
self.bn_params = nn.BatchNorm1d(1, eps=1e-3, momentum=1e-2)
def forward(self, e):
for fc, bn in zip(self.fc_layers, self.bn_layers):
e = fc(e)
e = bn(e)
e = self.nonlinearity(e)
# affine transformation to parameters
e = self.fc_params(e)
# 'global' batch norm
e = e.view(-1, 1, 2 * self.c_dim)
e = self.bn_params(e)
e = e.view(-1, 2 * self.c_dim)
mean, logvar = e[:, :self.c_dim], e[:, self.c_dim:]
return mean, logvar
# STATISTIC NETWORK q(c|D)
class StatisticNetwork(nn.Module):
"""
"""
def __init__(self, batch_size, sample_size, n_features,
n_hidden, hidden_dim, c_dim, nonlinearity):
super(StatisticNetwork, self).__init__()
self.batch_size = batch_size
self.sample_size = sample_size
self.n_features = n_features
self.n_hidden = n_hidden
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.nonlinearity = nonlinearity
# modules
self.prepool = PrePool(self.batch_size, self.n_features,
self.n_hidden, self.hidden_dim, self.nonlinearity)
self.postpool = PostPool(self.n_hidden, self.hidden_dim,
self.c_dim, self.nonlinearity)
def forward(self, h, summarize=False, single_sample=False):
e = self.prepool(h)
if summarize or single_sample or not self.training:
ef = Variable(torch.ones(e.shape[0],1).cuda())
e = torch.cat([e, ef], -1)
else:
e = self.sample_dropout(e)
e = self.pool(e, summarize, single_sample)
e = self.postpool(e)
return e
def pool(self, e, summarize=False, single_sample=False):
if summarize:
e = e.view(1, -1, self.hidden_dim + 1)
else:
if single_sample:
e = e.view(-1, 1, self.hidden_dim + 1)
else:
e = e.view(-1, self.sample_size, self.hidden_dim + 1)
e = e.mean(1).view(-1, self.hidden_dim + 1)
return e
def sample_dropout(self, e):
# create mask
a = Variable(torch.ones((self.batch_size, 1, 1)).cuda())
p = 0.8 if self.training else 1
b = Variable(torch.bernoulli(p * torch.ones((self.batch_size,
self.sample_size - 1, 1)).cuda()))
mask = torch.cat([a, b], 1)
sample_size = self.sample_size
e = e.view(self.batch_size, sample_size, self.hidden_dim)
# zero out samples
e = e * mask.expand_as(e)
extra_feature = torch.sum(mask, 1)
extra_feature = extra_feature.repeat(1, sample_size).unsqueeze(2)
# add number of retained samples as extra feature
cc = torch.cat([e, extra_feature], 2)
cc = cc * mask.expand_as(cc)
return cc
class InferenceNetwork(nn.Module):
"""
Inference network q(z|h, z, c) gives approximate posterior over latent variables.
"""
def __init__(self, batch_size, sample_size, n_features,
n_hidden, hidden_dim, c_dim, z_dim, nonlinearity):
super(InferenceNetwork, self).__init__()
self.batch_size = batch_size
self.sample_size = sample_size
self.n_features = n_features
self.n_hidden = n_hidden
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
# modules
self.fc_h = nn.Linear(self.n_features, self.hidden_dim)
self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)
self.fc_z = nn.Linear(self.z_dim, self.hidden_dim)
self.fc_res_block = FCResBlock(dim=self.hidden_dim*3, n=self.n_hidden,
nonlinearity=self.nonlinearity, batch_norm=True)
self.fc_params = nn.Linear(3*self.hidden_dim, 2 * self.z_dim)
self.bn_params = nn.BatchNorm1d(1, eps=1e-3, momentum=1e-2)
def forward(self, h, z, c):
# combine h, z, and c
# embed h
eh = h.view(-1, self.n_features)
eh = self.fc_h(eh)
eh = eh.view(self.batch_size, self.sample_size, self.hidden_dim)
# embed z if we have more than one stochastic layer
if z is not None:
ez = z.view(-1, self.z_dim)
ez = self.fc_z(ez)
ez = ez.view(self.batch_size, self.sample_size, self.hidden_dim)
else:
ez = Variable(torch.zeros(eh.size()).cuda())
# embed c and expand for broadcast addition
ec = self.fc_c(c)
ec = ec.view(self.batch_size, 1, self.hidden_dim).expand_as(eh)
# sum and reshape
e = torch.cat([eh, ez, ec],2)
e = e.view(self.batch_size * self.sample_size, 3 * self.hidden_dim)
e = self.nonlinearity(e)
# for layer in self.fc_block:
e = self.fc_res_block(e)
# affine transformation to parameters
e = self.fc_params(e)
# 'global' batch norm
e = e.view(-1, 1, 2 * self.z_dim)
e = self.bn_params(e)
e = e.view(-1, 2 * self.z_dim)
mean, logvar = e[:, :self.z_dim].contiguous(), e[:, self.z_dim:].contiguous()
return mean, logvar
# LATENT DECODER p(z|z, c)
class LatentDecoder(nn.Module):
"""
"""
def __init__(self, batch_size, sample_size, n_features,
n_hidden, hidden_dim, c_dim, z_dim, nonlinearity):
super(LatentDecoder, self).__init__()
self.batch_size = batch_size
self.sample_size = sample_size
self.n_features = n_features
self.n_hidden = n_hidden
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
# modules
self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)
self.fc_z = nn.Linear(self.z_dim, self.hidden_dim)
self.fc_res_block = FCResBlock(dim=2*self.hidden_dim, n=self.n_hidden,
nonlinearity=self.nonlinearity, batch_norm=False)
self.fc_params = nn.Linear(2*self.hidden_dim, 2 * self.z_dim)
self.bn_params = nn.BatchNorm1d(1, eps=1e-3, momentum=1e-2)
def forward(self, z, c):
# combine z and c
# embed z if we have more than one stochastic layer
if z is not None:
ez = z.view(-1, self.z_dim)
ez = self.fc_z(ez)
ez = ez.view(self.batch_size, self.sample_size, self.hidden_dim)
else:
ez = Variable(torch.zeros(self.batch_size, 1, self.hidden_dim).cuda())
# embed c and expand for broadcast addition
ec = self.fc_c(c)
ec = ec.view(self.batch_size, 1, self.hidden_dim).expand_as(ez)
# sum and reshape
e = torch.cat([ez, ec],2)
e = e.view(-1, 2*self.hidden_dim)
e = self.nonlinearity(e)
# for layer in self.fc_block:
e = self.fc_res_block(e)
# affine transformation to parameters
e = self.fc_params(e)
# 'global' batch norm
e = e.view(-1, 1, 2 * self.z_dim)
e = self.bn_params(e)
e = e.view(-1, 2 * self.z_dim)
mean, logvar = e[:, :self.z_dim].contiguous(), e[:, self.z_dim:].contiguous()
return mean, logvar
# Observation Decoder p(x|z, c)
class ObservationDecoder(nn.Module):
"""
"""
def __init__(self, batch_size, sample_size, n_features,
n_hidden, hidden_dim, c_dim, n_stochastic, z_dim,
nonlinearity):
super(ObservationDecoder, self).__init__()
self.batch_size = batch_size
self.sample_size = sample_size
self.n_features = n_features
self.n_hidden = n_hidden
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.n_stochastic = n_stochastic
self.z_dim = z_dim
self.nonlinearity = nonlinearity
# modules
self.fc_zs = nn.Linear(self.n_stochastic * self.z_dim, self.hidden_dim)
self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)
self.fc_initial = nn.Linear(2*self.hidden_dim, 256 * 4 * 4)
self.conv_layers = nn.ModuleList([
Conv2d3x3(256, 256),
Conv2d3x3(256, 256),
nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2),
Conv2d3x3(256, 128),
Conv2d3x3(128, 128),
nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2),
Conv2d3x3(128, 64),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2)
])
self.bn_layers = nn.ModuleList([
nn.BatchNorm2d(256),
nn.BatchNorm2d(256),
nn.BatchNorm2d(256),
nn.BatchNorm2d(128),
nn.BatchNorm2d(128),
nn.BatchNorm2d(128),
nn.BatchNorm2d(64),
nn.BatchNorm2d(64),
nn.BatchNorm2d(64),
])
self.conv_final = nn.Conv2d(64, 1, kernel_size=1)
def forward(self, zs, c):
# concatenate zs and c
ezs = self.fc_zs(zs)
ezs = ezs.view(self.batch_size, self.sample_size, self.hidden_dim)
ec = self.fc_c(c)
ec = ec.view(self.batch_size, 1, self.hidden_dim).expand_as(ezs)
e = torch.cat([ezs, ec],2)
e = self.nonlinearity(e)
e = e.view(-1, 2*self.hidden_dim)
e = self.fc_initial(e)
e = e.view(-1, self.hidden_dim, 4, 4)
for conv, bn in zip(self.conv_layers, self.bn_layers):
e = conv(e)
e = bn(e)
e = self.nonlinearity(e)
e = self.conv_final(e)
e = F.sigmoid(e)
return e |
the-stack_106_26036 | import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy import wcs
from astropy.nddata import Cutout2D
import astropy.io.fits as fits
import numpy as np
import matplotlib.pyplot as plt
import os
from astropy.table import Table, join
from matplotlib.patches import Circle
from aspecs_catalog_builder import get_aspecs_radec
catalog_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst.v4.1.cats/Catalog/goodss_3dhst.v4.1.cat.FITS")
skelton_goodss = catalog_goodss[1].data
print(catalog_goodss[0].header)
print(skelton_goodss.columns)
print(skelton_goodss['id'])
def create_cutout(ax, wcs_header, image, ra, dec, aspecs_ra, aspecs_dec):
"""
:param ax: Matplotlib ax to use
:param wcs_header: Image header with WCS info
:param image: Image
:param ra: RA coordiantes for center in J2000
:param dec: DEC coordinates for center in J2000
:param size: Size, in degrees, of the cutout
:return:
"""
w = wcs.WCS(wcs_header)
center = SkyCoord(ra * u.deg, dec * u.deg, frame='fk5')
aspecs_ra_dec_center = SkyCoord(aspecs_ra * u.deg, aspecs_dec * u.deg, frame='fk5')
size = np.ceil(center.separation(aspecs_ra_dec_center).arcsecond * 2.)
if size < 4.0:
size = 4
# then make an array cutout
aspecs_cutout = Cutout2D(image, aspecs_ra_dec_center, size=size * u.arcsec, wcs=w)
co = Cutout2D(image, center, size=size * u.arcsec, wcs=w)
ax.imshow(co.data, origin='lower', cmap='gray')
center_image = Circle((co.center_cutout[0], co.center_cutout[1]), 5, fill=False, color='r')
ax.add_patch(center_image)
aspecs_loc_x, aspecs_loc_y = co.to_cutout_position(aspecs_cutout.center_original)
ax.errorbar(aspecs_loc_x, aspecs_loc_y, yerr=5, xerr=5, color='r')
return ax
def create_ax_cutout(ax, name, fit_data, aspecs_coordinate, catalog_coordinate):
ax = create_cutout(ax, fit_data[0].header, fit_data[0].data, catalog_coordinate.ra.deg, catalog_coordinate.dec.deg,
aspecs_coordinate.ra.deg, aspecs_coordinate.dec.deg)
ax.set_title(name)
ax.tick_params(direction='in', colors='w', bottom=True, top=True, left=True, right=True, labelbottom=True,
labeltop=False, labelleft=True, labelright=False)
return ax
def create_aspecs_cutouts(aspecs_coordinates, fits_files, fits_names, wcs_data, catalog_coordinates, id, aspecs_freqs):
# get size of file
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
f = plt.figure(figsize=(20, 20))
f.suptitle('ID: ' + str(id) + " ALMA Freq: " + str(aspecs_freqs[id]))
for index, image in enumerate(fits_files):
ax = f.add_subplot(shape_file, shape_file, index + 1, projection=wcs_data)
create_ax_cutout(ax, fits_names[index], image, aspecs_coordinates, catalog_coordinates)
return f
hdu_list = fits.open("data/jacob_aspecs_catalog_fixed_magphys_jcb3.fits")
print(hdu_list[1].columns)
roberto_muse = hdu_list[1].data
from astropy.utils import data
from spectral_cube import SpectralCube
# ASPECS_Data Cubes
# aspecs_a1_chn = SpectralCube.read("/home/jacob/Research/gs_A1_2chn.fits")
# aspecs_a2_chn = SpectralCube.read("/home/jacob/Research/gs_A2_2chn.fits")
# plt.imshow(aspecs_a1_chn.unitless[1,:,:])
# plt.show()
# print(aspecs_a1_chn)
summed_cub = fits.open("/home/jacob/Research/Wide_ASPECS/Data/gs_C1_2chn.fits")
summed_cub = summed_cub[0].data
summed_cub = np.reshape(summed_cub, (480, 2048, 2048))
print(summed_cub.shape)
summed_cub = np.sum(summed_cub, axis=0)
plt.imshow(summed_cub)
plt.show()
f160w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f160w/goodss_3dhst.v4.0.F160W_orig_sci.fits")
w = wcs.WCS(f160w_goodss[0].header)
roberto_muse = Table.read("roberto_catalog_muse.fits", format='fits')
test_roberto = Table.read("/home/jacob/Development/Wide_ASPECS/mapghys_in_nov2018_all.fits", format='fits')
# Add in RA and Dec to test_roberto
roberto_muse = join(test_roberto, roberto_muse, keys='id')
muse_catalog = fits.open(os.path.join("data", "MW_44fields_main_table_v1.0.fits"))[1].data
roberto_ra_dec = SkyCoord(roberto_muse['ra'] * u.deg, roberto_muse['dc'] * u.deg, frame='fk5')
muse_ra_dec = SkyCoord(muse_catalog['RA'] * u.deg, muse_catalog['DEC'] * u.deg, frame='fk5')
indicies_to_use = []
muse_ids_to_use = []
# For ones with same ID
from matplotlib.text import OffsetFrom
# For ones with off Z
f125w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f125w/goodss_3dhst.v4.0.F125W_orig_sci.fits")
f140w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f140w/goodss_3dhst.v4.0.F140W_orig_sci.fits")
f160w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f160w/goodss_3dhst.v4.0.F160W_orig_sci.fits")
f435w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f435w/goodss_3dhst.v4.0.F435W_orig_sci.fits")
f606w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst.v4.0.f606wcand/goodss_3dhst.v4.0.F606Wcand_orig_sci.fits")
f775w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f775w/goodss_3dhst.v4.0.F775W_orig_sci.fits")
f850lp_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst_v4.0_f850lp/goodss_3dhst.v4.0.F850LP_orig_sci.fits")
R_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_R/GOODS-S_R_sci.fits")
U38_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_WFI_U38/GOODS-S_WFI_U38_sci.fits")
V_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_WFI_V/GOODS-S_WFI_V_sci.fits")
B_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_WFI_B/GOODS-S_WFI_B_sci.fits")
# U_goodss = fits.open("/home/jacob/Research/GOODS-S_U/GOODS-S_U_sci.fits")
J_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_convJ/GOODS-S_convJ_sci.fits")
H_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_convH/GOODS-S_convH_sci.fits")
f814w_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst.v4.0.f814wcand/goodss_3dhst.v4.0.F814Wcand_orig_sci.fits")
I_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_WFI_I/GOODS-S_WFI_I_sci.fits")
# Tenis Ones
tKs_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_tenisK/GOODS-S_tenisK_sci.fits")
tJ_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_tenisJ/GOODS-S_tenisJ_sci.fits")
# MUSYC
# ia427_goodss = fits.open("/home/jacob/Research/GOODS-S_IA427/GOODS-S_IA427_sci.fits")
# ia445_goodss = fits.open("/home/jacob/Research/GOODS-S_IA445/GOODS-S_IA445_sci.fits")
# ia505_goodss = fits.open("/home/jacob/Research/GOODS-S_IA505/GOODS-S_IA505_sci.fits")
# ia527_goodss = fits.open("/home/jacob/Research/GOODS-S_IA527/GOODS-S_IA527_sci.fits")
# ia550_goodss = fits.open("/home/jacob/Research/GOODS-S_IA550/GOODS-S_IA550_sci.fits")
# ia574_goodss = fits.open("/home/jacob/Research/GOODS-S_IA574/GOODS-S_IA574_sci.fits")
# ia624_goodss = fits.open("/home/jacob/Research/GOODS-S_IA624/GOODS-S_IA624_sci.fits")
# ia651_goodss = fits.open("/home/jacob/Research/GOODS-S_IA651/GOODS-S_IA651_sci.fits")
# ia679_goodss = fits.open("/home/jacob/Research/GOODS-S_IA679/GOODS-S_IA679_sci.fits")
# ia738_foodss = fits.open("/home/jacob/Research/GOODS-S_IA738/GOODS-S_IA738_sci.fits")
# ia797_foodss = fits.open("/home/jacob/Research/GOODS-S_IA797/GOODS-S_IA797_sci.fits")
# IRAC
irac1_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_SEDS1/GOODS-S_SEDS1_sci_sub.fits")
irac2_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_SEDS2/GOODS-S_SEDS2_sci_sub.fits")
irac3_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_irac3/GOODS-S_irac3_s1_sci.fits")
irac4_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/GOODS-S_irac4/GOODS-S_irac4_s1_sci.fits")
# Add to the list with names
# ia_ones = [ia427_goodss, ia445_goodss, ia505_goodss, ia527_goodss, ia550_goodss, ia574_goodss,
# ia624_goodss, ia651_goodss, ia679_goodss, ia738_foodss, ia797_foodss, ]
fits_files = [f125w_goodss, f140w_goodss, f160w_goodss, f435w_goodss, f606w_goodss, f775w_goodss, f850lp_goodss,
f814w_goodss, R_goodss, U38_goodss, V_goodss, B_goodss, J_goodss, H_goodss, I_goodss, tKs_goodss,
tJ_goodss, irac1_goodss, irac2_goodss,
irac3_goodss, irac4_goodss]
ia_nmes = ["IA427", "IA445", "IA505", "IA527", "IA550", "IA574",
"IA624", "IA651", "IA679", "IA738", "IA797", ]
fits_names = ["F125W", "F140W", "F160W", "F435W", "F606W", "F775W", "F850LP",
"F814W", "R", "U38", "V", "B", "J", "H", "I", "tKs",
"tJ",
"IRAC1", "IRAC2", "IRAC3", "IRAC4"]
catalog_goodss = fits.open("/home/jacob/Research/Wide_ASPECS/Historical_Data/goodss_3dhst.v4.1.cats/Catalog/goodss_3dhst.v4.1.cat.FITS")
skelton_goodss = catalog_goodss[1].data
def create_overlap_cutout(ax, wcs_header, image, muse, row, other_row, index, other_index):
"""
:param ax: Matplotlib ax to use
:param wcs_header: Image header with WCS info
:param image: Image
:param ra: RA coordiantes for center in J2000
:param dec: DEC coordinates for center in J2000
:param size: Size, in degrees, of the cutout
:return:
"""
w = wcs.WCS(wcs_header)
muse_mask = (muse['UNIQUE_ID'] == int(other_row['muse_id']))
center = SkyCoord(muse[muse_mask]['RA'] * u.deg, muse[muse_mask]['DEC'] * u.deg, frame='fk5')
row_center = roberto_ra_dec[index]
other_row_center = roberto_ra_dec[other_index]
size = np.ceil(
np.max([center.separation(row_center).arcsecond * 2., center.separation(other_row_center).arcsecond * 2.]))
if size < 3.0:
size = 3
# then make an array cutout
row_cutout = Cutout2D(image, row_center, size=size * u.arcsec, wcs=w)
other_row_cutout = Cutout2D(image, other_row_center, size=size * u.arcsec, wcs=w)
co = Cutout2D(image, center, size=size * u.arcsec, wcs=w)
ax.imshow(co.data, origin='lower', cmap='gray')
center_image = Circle((co.center_cutout[0], co.center_cutout[1]), 3, fill=False, color='r')
ax.add_patch(center_image)
ax.annotate(str(int(row['muse_id'])), xy=(co.center_cutout[0], co.center_cutout[1]), textcoords='offset pixels',
xytext=(2, 1), color='r')
aspecs_loc_x, aspecs_loc_y = co.to_cutout_position(row_cutout.center_original)
aspecs_loc_x_other, aspecs_loc_y_other = co.to_cutout_position(other_row_cutout.center_original)
first_image = Circle((aspecs_loc_x, aspecs_loc_y), 3, fill=False, color='g')
ax.add_patch(first_image)
ax.annotate(row['id'], xy=(aspecs_loc_x, aspecs_loc_y), textcoords='offset pixels', xytext=(2, 1), color='g')
other_image = Circle((aspecs_loc_x_other, aspecs_loc_y_other), 3, fill=False, color='w')
ax.add_patch(other_image)
ax.annotate(other_row['id'], xy=(aspecs_loc_x_other, aspecs_loc_y_other), textcoords='offset pixels',
xytext=(2, 1), color='w')
return ax
def create_overlap_ax_cutout(ax, name, fit_data, aspecs_coordinate, catalog_coordinate, other_row, index, other_index):
ax = create_overlap_cutout(ax, fit_data[0].header, fit_data[0].data, catalog_coordinate,
aspecs_coordinate, other_row, index, other_index)
ax.set_title(name)
ax.tick_params(direction='in', colors='w', bottom=True, top=True, left=True, right=True, labelbottom=True,
labeltop=False, labelleft=True, labelright=False)
return ax
"""
row_mask = (roberto_muse['id'] == 51778) | (roberto_muse['id'] == 57545) |(roberto_muse['id'] == 62887) |(roberto_muse['id'] == 18816)
row_masked = roberto_muse[row_mask]
row_masked[0]['muse_id'] = 125009025
row_masked[1]['muse_id'] = 125009025
row_masked[2]['muse_id'] = 119002002
row_masked[3]['muse_id'] = 119002002
print(row_masked)
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
f = plt.figure(figsize=(20, 20))
f.suptitle(
'MUSE ID: ' + str(int(row_masked[0]['muse_id'])) + " Roberto IDs: " + str(row_masked[0]['id']) + "/" + str(row_masked[1]['id']))
for third_index, image in enumerate(fits_files):
ax = f.add_subplot(shape_file, shape_file, third_index + 1, projection=w)
create_overlap_ax_cutout(ax, fits_names[third_index], image, catalog_coordinate=muse_catalog, aspecs_coordinate=row_masked[0],
other_row=row_masked[1], index=46881,
other_index=46963)
plt.show()
f.savefig(str("Overlap_2_MUSE_Cutout_" + str(46881) + "MUSE" + str(row_masked[0]['muse_id']) + ".png"), dpi=300)
f.clf()
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
f = plt.figure(figsize=(20, 20))
f.suptitle(
'MUSE ID: ' + str(int(row_masked[0]['muse_id'])) + " Roberto IDs: " + str(row_masked[2]['id']) + "/" + str(row_masked[3]['id']))
for third_index, image in enumerate(fits_files):
ax = f.add_subplot(shape_file, shape_file, third_index + 1, projection=w)
create_overlap_ax_cutout(ax, fits_names[third_index], image, catalog_coordinate=muse_catalog, aspecs_coordinate=row_masked[2],
other_row=row_masked[3], index=50461,
other_index=50464)
plt.show()
f.savefig(str("Overlap_2_MUSE_Cutout_" + str(50461) + "MUSE" + str(row_masked[3]['muse_id']) + ".png"), dpi=300)
f.clf()
"""
def create_multi_overlap_cutout(ax, wcs_header, image, aspecs, matches, ra_dec=roberto_ra_dec, rob_z=0):
"""
:param ax: Matplotlib ax to use
:param wcs_header: Image header with WCS info
:param image: Image
:param ra: RA coordiantes for center in J2000
:param dec: DEC coordinates for center in J2000
:param size: Size, in degrees, of the cutout
:return:
:return:
"""
w = wcs.WCS(wcs_header)
center = aspecs
other_centers = []
for coord in matches:
other_centers.append(ra_dec[coord])
size = 10
cutouts = []
for row_center in other_centers:
# then make an array cutout
cutouts.append(Cutout2D(image, row_center, size=size * u.arcsec, wcs=w))
co = Cutout2D(image, center, size=size * u.arcsec, wcs=w)
ax.imshow(co.data, origin='lower', cmap='gray_r')
center_image = Circle((co.center_cutout[0], co.center_cutout[1]), 3, fill=False, color='r')
ax.add_patch(center_image)
if rob_z > 0:
ax.annotate(str(np.round(rob_z,3)), xy=(co.center_cutout[0], co.center_cutout[1]), textcoords='offset pixels',
xytext=(2, 1), color='r')
for idx, cutout in enumerate(cutouts):
aspecs_loc_x, aspecs_loc_y = co.to_cutout_position(cutout.center_original)
first_image = Circle((aspecs_loc_x, aspecs_loc_y), 3, fill=False, color='g')
ax.add_patch(first_image)
#ax.annotate(freqs[matches[idx]], xy=(aspecs_loc_x, aspecs_loc_y), textcoords='offset pixels', xytext=(5, 20),
# color='g')
#ax.annotate(np.round(z_s[matches[idx]],3), xy=(aspecs_loc_x, aspecs_loc_y), textcoords='offset pixels', xytext=(5, -20),
# color='orange')
return ax
def create_multi_overlap_ax_cutout(ax, name, fit_data, catalog_coordinate, matches, ra_dec=roberto_ra_dec, rob_z=0):
ax = create_multi_overlap_cutout(ax, fit_data[0].header, fit_data[0].data, aspecs=catalog_coordinate,
matches=matches, ra_dec=ra_dec, rob_z=rob_z)
ax.set_title(name)
ax.tick_params(direction='in', colors='w', bottom=True, top=True, left=True, right=True, labelbottom=True,
labeltop=False, labelleft=True, labelright=False)
return ax
def create_multi_matches_ax_cutout(ax, name, fit_data, catalog_coordinates, matches, ra_dec=roberto_ra_dec):
"""
Makes a plot of all the matches on the same wavelength cutout
:param ax:
:param name:
:param fit_data: The FIT image to use
:param catalog_coordinates: Coordinates for the catalog matches
:param matches: The coordinates of the matches
:param redshifts: The redshifts in tuple format (CO, match)
:param ra_dec:
:return:
"""
ax.set_title(name)
ax.tick_params(direction='in', colors='w', bottom=True, top=True, left=True, right=True, labelbottom=True,
labeltop=False, labelleft=True, labelright=False)
return ax
def convert_to_rest_frame_ghz(z, ghz):
"""
Take a measured GHz value, and calculates the restframe GHz value based on the given z of the matched galaxy
:param z:
:param ghz:
:return:
"""
# First step is to convert to nm rom rest rame GHz
nm = (ghz * u.GHz).to(u.nm, equivalencies=u.spectral())
# Second step is to convert from nm observed to restframe nm
# Obseved/(z+1) = emitted
nm_emitted = nm / (z + 1)
print("Nm Emitted: {}, Z: {}".format(nm_emitted, z))
# third step is to convert from restframe nm back to GHz
final_ghz = (nm_emitted).to(u.GHz, equivalencies=u.spectral())
return final_ghz
#aspecs_lines = Table.read("ASPECS_Line_Candidates_Z44_Total_Z_Limit.txt", format="ascii", header_start=0, data_start=1)
"""
aspecs_lines = Table.read("/home/jacob/Development/Wide_ASPECS/independent/ASPECS_Line_Candidates_all_closest_Sep_1.5_SN_6.0.ecsv", format='ascii.ecsv')
transitions = {"1-0": [0.0030, 0.3694, 115.271],
"2-1": [1.0059, 1.7387, 230.538],
"3-2": [2.0088, 3.1080, 345.796],
"4-3": [3.0115, 4.4771, 461.041],
"5-4": [4.0142, 5.8460, 576.268],
"6-5": [5.0166, 7.2146, 691.473],
"7-6": [6.0188, 8.5829, 806.652],
"C1 1-0": [3.2823, 4.8468, 492.161],
"C1 2-1": [6.0422, 8.6148, 809.342]}
coords = SkyCoord(aspecs_lines['RA (J2000)'] * u.deg, aspecs_lines['DEC (J2000)'] * u.deg, frame='fk5')
freqs = aspecs_lines['Observed CO (GHz)']
z_s = aspecs_lines['Z (CO)']
rob_z = aspecs_lines['Z (Matched)']
# Now plot all Radio Sources and see what is around them for all ones without a match
for index, row in enumerate(aspecs_lines):
if row['Roberto ID'] < 0:
# Make the cutouts
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
f = plt.figure(figsize=(20, 20))
# no counterpart
distances = [0]
freq_valus = [np.round(row['Observed CO (GHz)'], 3)]
rest_frame_ghz = [np.round(row['Restframe CO (GHz)'], 3)]
f.suptitle(
" Z: " + str(row['Z (CO)']) + " Delta_Z: " + str(
row['Delta Z']) +
" Observed: " + str(freq_valus) + "\n Spec Z: " + str(
row['Spec Z'])
+ "\n Rest Frame GHz: " + str(rest_frame_ghz))
for third_index, image in enumerate(fits_files):
ax = f.add_subplot(shape_file, shape_file, third_index + 1, projection=w)
create_multi_overlap_ax_cutout(ax, fits_names[third_index], image,
catalog_coordinate=coords[index],
matches=[index], ra_dec=coords)
f.savefig(str("March_Output/ASPECS_Cutout_NoCounter_Sep1.5_SN6.0_" + str(index) + ".png"), dpi=300)
f.clf()
plt.close()
idx, d2d, d3d = coords.match_to_catalog_sky(roberto_ra_dec)
"""
from astropy.coordinates import match_coordinates_sky, search_around_sky
# idxc, idxcatalog, d2d, d3d = search_around_sky(coords, roberto_ra_dec, 2.0 * u.arcsecond)
def make_skycoords(source, ra='ra', dec='dec', distance=None):
"""
Makes and returns a SkyCoord array from given source
:param source: Source with information
:param ra: Key for RA
:param dec: Key for Dec
:return: SkyCoord list
"""
try:
skycoords = SkyCoord(source[ra] * u.deg, source[dec] * u.deg, frame='icrs')
except:
skycoords = SkyCoord(source[ra], source[dec], unit=(u.hour, u.deg), frame='icrs')
return skycoords
def load_table(ascii_table, header=0, start=1):
ascii_table_data = Table.read(ascii_table, format="ascii", header_start=header, data_start=start)
return ascii_table_data
aspecs_lines = load_table("ASPECS_Pilot_C_Matches.txt")
coords = make_skycoords(aspecs_lines, ra='rra', dec='rdc')
idxc, idxcatalog, d2d, d3d = search_around_sky(coords, roberto_ra_dec, 1.0 * u.arcsecond)
aspecs_matches = [[] for _ in range(len(aspecs_lines))]
back_match = {}
z_specs = {}
for index, id in enumerate(idxc):
if coords[idxc[index]].separation(roberto_ra_dec[idxcatalog[index]]).arcsecond < 1.0:# and np.abs(aspecs_lines[idxc[index]]['Z (CO)'] - roberto_muse[idxcatalog[index]]['z_1']) < 0.3:
test_mask = (roberto_muse['id'] == roberto_muse[idxcatalog[index]]['id'])
test_rob = roberto_muse[test_mask]
spec_z_mask = (test_rob["z_spec_3dh"] > 0.001) | (test_rob["zm_vds"] > 0.001) | (
test_rob["zm_coeS"] > 0.001) | (test_rob['muse_wide_z'] > 0.0001) \
| (test_rob["zs_mor"] > 0.001) | (test_rob["zm_ina"] > 0.001) | (test_rob["zm_her"] > 0.001)
#if int(aspecs_lines[idxc[index]]["Roberto ID"]) == int(roberto_muse[idxcatalog[index]]['id']):
aspecs_matches[idxc[index]].append(roberto_muse[idxcatalog[index]]['id'])
if roberto_muse[idxcatalog[index]]['id'] in back_match.keys():
back_match[roberto_muse[idxcatalog[index]]['id']].append(idxc[index])
z_specs[roberto_muse[idxcatalog[index]]['id']].append(len(test_rob[spec_z_mask]))
else:
back_match[roberto_muse[idxcatalog[index]]['id']] = [idxc[index]]
z_specs[roberto_muse[idxcatalog[index]]['id']] = [len(test_rob[spec_z_mask])]
for fits_index in range(len(fits_files)):
f = plt.figure(figsize=(20, 20))
f.suptitle(
'Continuum Lines Matched To Galaxies')
image_index = 0
for key, values in back_match.items():
if len(values) > 0:
# Make the cutouts
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
test_mask = (roberto_muse['id'] == key)
roberto_ra_dec_index = 1e30
for index, i in enumerate(roberto_muse):
if i['id'] == key:
roberto_ra_dec_index = index
for third_index, image in enumerate([fits_files[fits_index]]):
ax = f.add_subplot(shape_file, shape_file, image_index + 1, projection=w)
create_multi_overlap_ax_cutout(ax, fits_names[fits_index], image,
catalog_coordinate=roberto_ra_dec[roberto_ra_dec_index],
matches=values, ra_dec=coords, rob_z=0)
ax.set_title(aspecs_lines[values]['name'])
image_index += 1
# plt.show()
f.savefig(str("Continuum/ASPECS_Continuum_{}.png".format(fits_names[fits_index])), dpi=300)
f.clf()
plt.close()
exit()
# exit()
# Now have the matches, plot them on the sky
all_restframe_ghz = {}
# Since all of them have a single match, just check if each one has a value > 0 and go with those
for fits_index in range(len(fits_files)):
f = plt.figure(figsize=(20, 20))
f.suptitle(
'CO Lines Matched To Galaxies')
image_index = 0
for key, values in back_match.items():
if len(values) > 0:
# Make the cutouts
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
test_mask = (roberto_muse['id'] == key)
roberto_ra_dec_index = 1e30
for index, i in enumerate(roberto_muse):
if i['id'] == key:
roberto_ra_dec_index = index
for third_index, image in enumerate([fits_files[fits_index]]):
ax = f.add_subplot(shape_file, shape_file, image_index + 1, projection=w)
create_multi_overlap_ax_cutout(ax, fits_names[fits_index], image,
catalog_coordinate=roberto_ra_dec[roberto_ra_dec_index],
matches=values, ra_dec=coords, rob_z=rob_z[values[0]])
image_index += 1
# plt.show()
f.savefig(str("March_Output/ASPECS_Cutout_Matched_Galaxies_Sep1.5_SN6.0_{}.png".format(fits_names[fits_index])), dpi=300)
f.clf()
plt.close()
all_restframe_ghz = {}
for key, values in back_match.items():
if len(values) > 0:
# Make the cutouts
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
f = plt.figure(figsize=(20, 20))
test_mask = (roberto_muse['id'] == key)
roberto_ra_dec_index = 1e30
for index, i in enumerate(roberto_muse):
if i['id'] == key:
roberto_ra_dec_index = index
distances = []
freq_valus = []
rest_frame_ghz = []
for value in values:
print("Value: ", value)
print("Freq: ", freqs[value])
freq_valus.append(freqs[value])
print("Rest Frame GHz: " + str(convert_to_rest_frame_ghz(roberto_muse[test_mask]['z_1'][0], freqs[value])))
rest_frame_ghz.append(
np.round(convert_to_rest_frame_ghz(np.round(roberto_muse[test_mask]['z_1'][0], 3), freqs[value]), 3))
for index, id in enumerate(idx):
if index == value:
distances.append(np.round(coords[index].separation(roberto_ra_dec[id]).arcsecond, 4))
all_restframe_ghz[value] = (np.round(roberto_muse[test_mask]['z_1'][0], 3), np.round(
convert_to_rest_frame_ghz(np.round(roberto_muse[test_mask]['z_1'][0], 3), freqs[value]), 3),
key)
f.suptitle(
'Roberto ID: ' + str(key) + " Z_1: " + str(roberto_muse[test_mask]['z_1'][0]) + " Z_2: " + str(
roberto_muse[test_mask]['z_2'][0]) +
" Matches: " + str(freq_valus) + " \nDistance: " + str(distances) + "\n Spec Z: " + str(
z_specs[roberto_muse[test_mask]['id'][0]])
+ "\n Rest Frame GHz: " + str(rest_frame_ghz))
for third_index, image in enumerate(fits_files):
ax = f.add_subplot(shape_file, shape_file, third_index + 1, projection=w)
create_multi_overlap_ax_cutout(ax, fits_names[third_index], image,
catalog_coordinate=roberto_ra_dec[roberto_ra_dec_index],
matches=values, index=idx, ra_dec=coords)
# plt.show()
f.savefig(str("March_Output/ASPECS_Cutout_" + str(key) + ".png"), dpi=300)
f.clf()
plt.close()
for key, values in enumerate(aspecs_matches):
if len(values) > 0:
# Make the cutouts
shape_file = int(np.ceil(np.sqrt(len(fits_files))))
f = plt.figure(figsize=(20, 20))
test_mask = (roberto_muse['id'] == key)
roberto_ra_dec_index = 1e30
for index, i in enumerate(roberto_muse):
if i['id'] == key:
roberto_ra_dec_index = index
distances = []
freq_valus = []
rest_frame_ghz = []
for value in values:
print("Value: ", value)
print("Freq: ", freqs[value])
freq_valus.append(freqs[value])
print("Rest Frame GHz: " + str(convert_to_rest_frame_ghz(roberto_muse[test_mask]['z_1'][0], freqs[value])))
rest_frame_ghz.append(
np.round(convert_to_rest_frame_ghz(np.round(roberto_muse[test_mask]['z_1'][0], 3), freqs[value]), 3))
for index, id in enumerate(idx):
if index == value:
distances.append(np.round(coords[index].separation(roberto_ra_dec[id]).arcsecond, 4))
all_restframe_ghz[value] = (np.round(roberto_muse[test_mask]['z_1'][0], 3), np.round(
convert_to_rest_frame_ghz(np.round(roberto_muse[test_mask]['z_1'][0], 3), freqs[value]), 3),
key)
f.suptitle(
'Roberto ID: ' + str(key) + " Z_1: " + str(roberto_muse[test_mask]['z_1'][0]) + " Z_2: " + str(
roberto_muse[test_mask]['z_2'][0]) +
" Matches: " + str(freq_valus) + " \nDistance: " + str(distances) + "\n Spec Z: " + str(
z_specs[roberto_muse[test_mask]['id'][0]])
+ "\n Rest Frame GHz: " + str(rest_frame_ghz))
for third_index, image in enumerate(fits_files):
ax = f.add_subplot(shape_file, shape_file, third_index + 1, projection=w)
create_multi_overlap_ax_cutout(ax, fits_names[third_index], image,
catalog_coordinate=roberto_ra_dec[roberto_ra_dec_index],
matches=values, index=idx, ra_dec=coords)
# plt.show()
f.savefig(str("March_Output/ASPECS_Cutout_" + str(key) + ".png"), dpi=300)
f.clf()
plt.close()
exit() |
the-stack_106_26040 | import pprint
import json
import pandas as pd
import numpy as np
from json_shot_scraper import flatten_shot, flatten_corner #flatten_goal, flatten_complete_pass, flatten_incomplete_pass
from player_scraper import flatten_player, flatten_sub
from dataframe_cleaner import (pass_to_shot, corner_to_shot, transpose_coordinates, coord_to_yards,
shot_distance_angle, dummy_columns, drop_own_goals, goal_dummy, minutes_played)
def game_to_player_df(game):
"""input game from db and output pandas dataframe with plaer information + minutes played"""
game_id = game['match']['matchId']
players = list(game['players'].items())
player_list_dicts = [flatten_player(player, game_id) for player in players]
player_df = pd.DataFrame(player_list_dicts)
subs = list(game['incidences']['substitutions'].items())
subs_dicts = [flatten_sub(sub, game_id) for sub in subs]
subs_df = pd.DataFrame(subs_dicts)
minutes_player_df = minutes_played(subs_df, player_df)
return minutes_player_df
def afa_player_dict(games):
"""
turns game from mongodb into pd --> back into dictionary
parameters
----------------
game: a game from mongodb
returns:
dictionary of player_info and total_minutes player
"""
player_dict = {}
for game in games:
temp_df = game_to_player_df(game)
for player in temp_df['player_id'].unique():
if player not in player_dict:
player_dict[player] = {'name': temp_df[temp_df['player_id'] == player]['name'].values[0] , 'position_id': temp_df[temp_df['player_id'] == player]['position_id'].values[0],
'squad_num': temp_df[temp_df['player_id'] == player]['squad_number'].values[0], 'team_id': temp_df[temp_df['player_id'] == player]['team_id'].values[0],
'minutes_played': temp_df[temp_df['player_id'] == player]['minutes_played'].values[0], 'player_id': temp_df[temp_df['player_id'] == player]['player_id'].values[0] }
else:
player_dict[player]['minutes_played'] += temp_df.loc[temp_df['player_id']==player, 'minutes_played'].values[0]
return player_dict
def create_player_min_frame():
"""return a dataframe to concat to for player_sub_information"""
attach_to_df = pd.DataFrame(columns = ['game_id', 'name', 'player_id', 'position_id', 'squad_number',
'substitute', 'team_id', 'minutes_played'])
return attach_to_df
# def create_master_player_min_df(games):
# """input games from mongodb by db.games.find() and return a cleaned dataframe with club
# abbreviation instead of club_id"""
# columns = ['game_id', 'name', 'player_id', 'position_id', 'squad_number',
# 'substitute', 'team_id', 'minutes_played']
# afa_team_dict = {20: 'VEL', 13: 'NOB', 136: 'TIG', 19: 'SLO', 8: 'GIM', 2: 'ARG',
# 137: 'UNI', 122: 'ALD', 869: 'PA', 6: 'COL', 124: 'BEL', 134: 'SMS',
# 5: 'BOC', 135: 'TAL', 132: 'GOD', 7: 'EST', 12: 'LAN', 129: 'DEF',
# 18: 'ROS', 4: 'BAN', 100: 'HUR', 17: 'RIV', 815: 'ATT', 16: 'RAC', 10: 'IND',
# 490: 'SMT'}
# attach_df = create_player_min_frame()
# for game in games:
# df = game_to_player_df(game)
# merged_df = pd.concat([attach_df, df], axis=0, ignore_index=True)
# attach_df = merged_df.copy()
# merged_df['club_brev'] = merged_df['team_id'].map(afa_team_dict)
# return merged_df
# from ..scraping_tools.html_scraper import db
# def game_to_cleaned_df(game):
# """input game pulled from mongoDB 'db' and run through cleaning functions
# output: pandas dataframe"""
# shots = list(game['incidences']['shots'].items())
# game_id = game['match']['matchId']
# shot_list_dicts = [flatten_shot(shot, game_id) for shot in shots]
# shot_df = pd.DataFrame(shot_list_dicts)
# completed_passes = list(game['incidences']['correctPasses'].items())
# completed_list_dicts = [flatten_complete_pass(apass, game_id) for apass in completed_passes]
# completed_passes_df = pd.DataFrame(completed_list_dicts)
# shot_pass_df = pass_to_shot(shot_df, completed_passes_df)
# corners = list(game['incidences']['cornerKicks'].items())
# if len(corners) > 0:
# corner_dicts = [flatten_corner(kick, game_id) for kick in corners]
# corner_df = pd.DataFrame(corner_dicts)
# shot_pass_corner = corner_to_shot(shot_pass_df, corner_df)
# transposed_df = transpose_coordinates(shot_pass_corner)
# else:
# transposed_df = transpose_coordinates(shot_pass_df)
# yard_df = coord_to_yards(transposed_df)
# shot_distance_df = shot_distance_angle(yard_df)
# df = dummy_columns(shot_distance_df)
# df_no_own = drop_own_goals(df)
# df_final = goal_dummy(df_no_own)
# return df_final
# def create_frame():
# """create dataframe to concat new dataframes to"""
# attach_to_df = pd.DataFrame(columns=['game_id', 'player_id', 'shot_coord_x1', 'shot_coord_x2',
# 'shot_coord_y1', 'shot_coord_y2', 'shot_coord_z1', 'shot_coord_z2',
# 'shot_id', 'shot_type', 'team_id', 'time_of_event(min)',
# 'passed_from_id', 'pass_coord_x1', 'pass_coord_x2', 'pass_coord_y1',
# 'pass_coord_y2', 'pass_coord_z1', 'pass_coord_z2', 'corner_kick',
# 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt',
# 'is_goal'])
# return attach_to_df
# def create_master_df(games):
# """input games from mongodb by db.games.find() and return a cleaned dataframe
# """
# columns = ['game_id', 'player_id', 'shot_coord_x1', 'shot_coord_x2',
# 'shot_coord_y1', 'shot_coord_y2', 'shot_coord_z1', 'shot_coord_z2',
# 'shot_id', 'shot_type', 'team_id', 'time_of_event(min)',
# 'passed_from_id', 'pass_coord_x1', 'pass_coord_x2', 'pass_coord_y1',
# 'pass_coord_y2', 'pass_coord_z1', 'pass_coord_z2', 'corner_kick',
# 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt',
# 'is_goal']
# attach_to_df = create_frame()
# for game in games:
# df = game_to_cleaned_df(game)
# final_df = pd.concat([attach_to_df, df], axis=0, ignore_index=True, sort=True)
# ### may get a warning because a game doesn't have corner_kick values ###
# attach_to_df = final_df[columns].copy()
# return final_df[columns].copy()
###below is for players
|
the-stack_106_26041 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in WideNDeep model classes."""
from tensorflow.python.eager import backprop
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.experimental.WideDeepModel')
class WideDeepModel(keras_training.Model):
r"""Wide & Deep Model for regression and classification problems.
This model jointly train a linear and a dnn model.
Example:
```python
linear_model = LinearModel()
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
# or define a single `tf.data.Dataset` that contains a single tensor or
# separate tensors for dnn_inputs and linear_inputs.
dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y))
combined_model.fit(dataset, epochs)
```
Both linear and dnn model can be pre-compiled and trained separately
before jointly training:
Example:
```python
linear_model = LinearModel()
linear_model.compile('adagrad', 'mse')
linear_model.fit(linear_inputs, y, epochs)
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
```
"""
def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
linear_model: a premade LinearModel, its output must match the output of
the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
**kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
Allowed keyword arguments include `name`.
"""
super(WideDeepModel, self).__init__(**kwargs)
base_layer.keras_premade_model_gauge.get_cell('WideDeep').set(True)
self.linear_model = linear_model
self.dnn_model = dnn_model
self.activation = activations.get(activation)
def call(self, inputs, training=None):
if not isinstance(inputs, (tuple, list)) or len(inputs) != 2:
linear_inputs = dnn_inputs = inputs
else:
linear_inputs, dnn_inputs = inputs
linear_output = self.linear_model(linear_inputs)
# pylint: disable=protected-access
if self.dnn_model._expects_training_arg:
if training is None:
training = backend.learning_phase()
dnn_output = self.dnn_model(dnn_inputs, training=training)
else:
dnn_output = self.dnn_model(dnn_inputs)
output = nest.map_structure(lambda x, y: (x + y), linear_output, dnn_output)
if self.activation:
return nest.map_structure(self.activation, output)
return output
# This does not support gradient scaling and LossScaleOptimizer.
def train_step(self, data):
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
x, y, sample_weight = data_adapter.expand_1d((x, y, sample_weight))
with backprop.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if isinstance(self.optimizer, (list, tuple)):
linear_vars = self.linear_model.trainable_variables
dnn_vars = self.dnn_model.trainable_variables
linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars))
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
else:
trainable_variables = self.trainable_variables
grads = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(grads, trainable_variables))
return {m.name: m.result() for m in self.metrics}
def _make_train_function(self):
# Only needed for graph mode and model_to_estimator.
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if not isinstance(backend.symbolic_learning_phase(), int):
inputs += [backend.symbolic_learning_phase()]
if isinstance(self.optimizer, (list, tuple)):
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
else:
linear_optimizer = self.optimizer
dnn_optimizer = self.optimizer
with backend.get_graph().as_default():
with backend.name_scope('training'):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
params=self.linear_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
params=self.dnn_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += dnn_updates
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with backend.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = backend.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def get_config(self):
linear_config = generic_utils.serialize_keras_object(self.linear_model)
dnn_config = generic_utils.serialize_keras_object(self.dnn_model)
config = {
'linear_model': linear_config,
'dnn_model': dnn_config,
'activation': activations.serialize(self.activation),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
linear_config = config.pop('linear_model')
linear_model = layer_module.deserialize(linear_config, custom_objects)
dnn_config = config.pop('dnn_model')
dnn_model = layer_module.deserialize(dnn_config, custom_objects)
activation = activations.deserialize(
config.pop('activation', None), custom_objects=custom_objects)
return cls(
linear_model=linear_model,
dnn_model=dnn_model,
activation=activation,
**config)
|
the-stack_106_26042 | import discord
import os
from lxml import etree
from discord.ext import commands
import asyncio, json
import youtube_dl
import youtube_search
playlists = {} # playlists loaded from each guild will be stored here
musics = {} # musics from each guild will be stored here.
now_playing = {} # music actually playing will be stored here
ytdl_opts = { # Youtube-dl options to improve the quality
'format' : 'bestaudio/best',
'quiet' : True,
'postprocessors' : [{
'key' : 'FFmpegExtractAudio',
'preferredcodec' : 'wav',
'preferredquality' : '192'
}]
}
ytdl = youtube_dl.YoutubeDL(ytdl_opts)
bot = commands.Bot(command_prefix=">", case_insensitive=True, help_command=None)
async def sendMsg(ctx, str_to_send, title=None, color=None, old_msg=None):
''' Function used to send discord message.
return: (discord.Message)
ctx: context from discord.py
str_to_send: (str) --> message content
title: (str) --> title of the embed. if None, message will not be a discord.Embed but a discord.Message
color: (hexa) --> value of the embed color in hexa. If none, embed will not have a color.
old_msg: (discord.Message) --> message that will be suppressed before sending the new one
'''
if title == None:
await ctx.send(str_to_send)
else:
if color == None:
embed = discord.Embed()
else:
embed = discord.Embed(color=color)
embed.add_field(name=title, value=str_to_send, inline=False)
if old_msg != None:
await old_msg.delete()
msg = await ctx.send(embed=embed)
return msg
async def addReactions(msg, tab):
''' Function used to add reactions to a message.
msg: (discord.Message) --> message to add reactions to.
tab: (list) --> list of reaction to add
'''
for reaction in tab:
await msg.add_reaction(reaction)
def isPlaylist(ctx, name):
""" Return True if the named playlist exists.
False if not.
"""
### CREATING/OPENING STORAGE FILE ###
if not os.path.exists('commands/playlists'):
os.makedirs('commands/playlists')
STORAGE_FOLDER = os.path.dirname('commands/playlists/')
REL_PATH = f'{ctx.guild}.xml' # name of the output file
abs_file_path = os.path.join(STORAGE_FOLDER, REL_PATH)
try:
f = open(abs_file_path, "r+") # open the file in read+write
except:
f = open(abs_file_path, "w+")
f.close()
#####################################
# Parse xml file
try:
tree = etree.parse(f'commands/playlists/{ctx.guild}.xml')
root = tree.getroot()
noms = []
for playlist in root:
noms.append(playlist.values()[0])
if name not in noms:
return False
else:
return True
except:
return False
class Video:
def __init__(self, ctx, link=None, video=None):
if video == None and link == None:
raise ValueError
else:
if video:
video_format = video["formats"][0]
self.url = video["webpage_url"]
self.stream_url = video_format["url"]
self.title = video["title"]
self.duration = video["duration"]
if link:
try:
video = ytdl.extract_info(link, download=False)
video_format = video["formats"][0]
self.url = video["webpage_url"]
self.stream_url = video_format["url"]
self.title = video["title"]
self.duration = video["duration"]
except Exception as e:
to_print = """Impossible de charger la vidéo
à cause des erreurs suivantes :\n"""
to_print += e.args[0][e.args[0].index(' '):]
msg = asyncio.run_coroutine_threadsafe(sendMsg(ctx,
str_to_send=to_print, title="Error", color=0x00ffb7), bot.loop)
raise e
class Music:
def __init__(self, title, url, stream_url, duration):
self.title = title
self.url = url
self.stream_url = stream_url
self.duration = duration
class Playlist:
def __init__(self, ctx, name, video_list, load=False):
""" Init a playlist
ctx: context from discord.py
name: (str)
video_list: (list) of Video object
"""
if load == False:
if isPlaylist(ctx, name) == True:
asyncio.run_coroutine_threadsafe(sendMsg(ctx,
f"Playlist {name} already exists.", "Playlist",
0x00ffb7), bot.loop)
raise ValueError
else:
self.name = name
self.author = ctx.author
self.lenght = len(video_list)
self.list = video_list
else:
self.name = name
self.lenght = len(video_list)
self.list = video_list
asyncio.run_coroutine_threadsafe(sendMsg(ctx,
f"Playlist {name} created.", "Playlist",
0x00ffb7), bot.loop)
@staticmethod
def load(ctx):
try:
tree = etree.parse(f'commands/playlists/{ctx.guild}.xml')
except:
asyncio.run_coroutine_threadsafe(sendMsg(ctx,
"No playlist to load from this server.",
"Playlist", 0x00ffb7), bot.loop)
return
if ctx.guild not in playlists:
playlists[ctx.guild] = []
root = tree.getroot()
for playlist in root:
p_name = playlist.values()[0]
music_list = []
for music in playlist:
elems = []
for elem in music:
elems.append(elem.text)
music_list.append(Music(elems[0], elems[1], elems[2],
elems[3]))
playlists[ctx.guild].append(Playlist(p_name, music_list,
load=True))
def save(self, ctx):
try:
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(f'commands/playlists/{ctx.guild}.xml', parser)
root = tree.getroot()
playlist = root.find(f"Playlist[@name='{self.name}']")
if not playlist:
playlist = etree.SubElement(root, "Playlist", name=f"{self.name}",
lenght=f"{self.lenght}", author=f"{self.author}")
existing_music = []
for music in playlist:
for elem in music:
if elem.tag == "title":
existing_music.append(elem.text)
except:
root = etree.Element("data")
playlist = etree.SubElement(root, "Playlist", name=f"{self.name}",
lenght=f"{self.lenght}", author=f"{self.author}")
for music in self.list:
if music.title not in existing_music:
e = etree.Element("Music")
etree.SubElement(e, "title").text = f"{music.title}"
etree.SubElement(e, "url").text = f"{music.url}"
etree.SubElement(e, "stream_url").text = f"{music.stream_url}"
etree.SubElement(e, "duration").text = f"{music.duration}"
playlist.append(e)
tree = etree.ElementTree(element=root)
tree.write("tests/playlists/864029702708264960.xml", pretty_print=True,
xml_declaration=True, encoding="utf-8")
def addMusic(self, ctx, name, video):
if name not in self.playlist[ctx.guild]:
asyncio.run_coroutine_threadsafe(sendMsg(ctx,
"Specified playlist does not exist.",
"Error", 0x00ffb7), bot.loop)
else:
if video.title not in self.playlist[ctx.guild][name]:
self.playlist[ctx.guild][name][video.title] = [video.url,
video.stream_url, video.duration]
else:
asyncio.run_coroutine_threadsafe(sendMsg(ctx,
f"{video.title} is already in the playlist.",
"Error", 0x00ffb7), bot.loop)
def buildStrFromArgs(args):
''' Function used to transform a tab of str into one str with spaces.
return: (str)
args: (tab)[(str)]
'''
string = ""
for mot in args:
string += mot + " "
return string
async def add_to_queue(ctx, selected):
''' Function used to add the selected music in the queue.
selected: (dict) of the video from youtub_dl
'''
client = ctx.guild.voice_client
title = selected['title']
id = selected['id']
url = 'https://www.youtube.com/watch?v=' + id
to_print = f"**[{title}]({url})**"
if client and client.channel: # if bot already connected to a channel
video = Video(ctx, link=url)
musics[ctx.guild].append(video)
to_print += f"\n\n`Position in queue : {len(musics[ctx.guild])}`"
msg = await sendMsg(ctx, str_to_send=to_print, title="Queued", color=0x00ffb7)
else:
try: # try to retrieve the channel of the author of the msg
channel = ctx.author.voice.channel
except:
msg = await sendMsg(ctx, str_to_send="**You have to be connected to a channel to play music.**",
title="Error", color=0x00ffb7)
return
video = Video(ctx, link=url)
musics[ctx.guild] = []
now_playing[ctx.guild] = [video]
msg = await sendMsg(ctx, str_to_send=to_print, title="Now playing", color=0x00ffb7)
try:
client = await channel.connect()
await play_song(client, musics[ctx.guild], video, [ctx, msg])
except:
msg = await sendMsg(ctx, str_to_send="**Unable to connect to the channel.**",
title="Error", color=0x00ffb7)
async def Search(ctx, args, robot):
''' Function used to search musics with youtube_search.
ctx: context from discord.py
args: (list)[(str)]
robot: (object) --> commands.Bot from bot.py
'''
search = buildStrFromArgs(args) # Parsing args
nb_results = 20 # Define how many musics will be fetched from youtube_search api
try: # Retrieving data
yt = youtube_search.YoutubeSearch(search, max_results=nb_results).to_json()
results = json.loads(yt)['videos']
except:
msg = await sendMsg(ctx, str_to_send="No results", title="Error", color=0x00ffb7)
return
i = 0
def to_send(results, i):
''' Function to build a string from the data retrieved earlier.
return: (str)
results: (list) --> list of videos from json
i: (int) --> starting index of the "results" list
'''
k = 1
max = i+4
to_print = "```\n"
while i <= max and i < len(results):
music = results[i]
title = music['title']
duration = music['duration']
to_print += f"{k}. {title} ({duration})\n"
i += 1
k += 1
to_print += "```"
return to_print
# Send first results
msg = await sendMsg(ctx, str_to_send=to_send(results, i), title="Search", color=0x00ffb7)
await addReactions(msg, ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '➡️']) # Add reactions to first msg
# Handler of reaction events of first message
def check1(reaction):
if reaction.user_id == ctx.author.id and msg.id == reaction.message_id:
if reaction.emoji.name in ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '➡️']:
return reaction
# Handler of reaction events of next messages
def check2(reaction):
if reaction.user_id == ctx.author.id and msg.id == reaction.message_id:
if reaction.emoji.name in ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '⬅️', '➡️']:
return reaction
try:
choice = await robot.wait_for("raw_reaction_add", check=check1, timeout=60) # Handling events
choice = choice.emoji.name # retrieving emote
# In case user does not react, handle TimeoutError and exit the function.
except asyncio.TimeoutError:
return
dictio = { # Emote-to-value matching dictionary
"1️⃣" : 1,
"2️⃣" : 2,
"3️⃣" : 3,
"4️⃣" : 4,
"5️⃣" : 5
}
start = 0
async def next(msg, start, results, nb_results):
''' Function used to define what will be send when the RIGHT_ARROW emote is pressed,
and which emote should be added.
return: (discord.Message)
msg: (discord.Message) --> old message
start: (int) --> starting index of the results list
results: (list) --> list of videos from json
nb_results: (int) --> len(results)
'''
new_msg = await sendMsg(ctx, str_to_send=to_send(results, start),
title="Search", color=0x00ffb7, old_msg=msg)
k = 0
i = start
tab = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣']
emote_to_add = []
while i < len(results) and k < len(tab):
emote_to_add += [tab[k]]
i += 1
k += 1
emote_to_add += ['⬅️']
if start < nb_results-5:
emote_to_add += ['➡️']
await addReactions(new_msg, emote_to_add)
return new_msg
async def prev(msg, start, results):
''' Function used to define what will be send when the LEFT_ARROW emote is pressed,
and which emote should be added.
return: (discord.Message)
msg: (discord.Message) --> old message
start: (int) --> starting index of the results list
results: (list) --> list of videos from json
'''
new_msg = await sendMsg(ctx, str_to_send=to_send(results, start),
title="Search", color=0x00ffb7, old_msg=msg)
tab = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣']
if start > 4:
tab += ['⬅️']
tab += ['➡️']
await addReactions(new_msg, tab)
return new_msg
async def decision(choice, start, msg, results, nb_results):
''' Function used to decide what to do according to the "choice" variable.
return: (discord.Message) --> actual msg,
(int) --> actual index of "results" list,
(bool) --> True if a music got added to the queue
choice: (str) --> emote from the reaction of the user
msg: (discord.Message) --> last message sent
results: (list) --> list of videos from json
nb_results: (int) --> len(results)
'''
added_to_queue = False
if choice in dictio:
nb = dictio[choice] - 1
nb += start
selected = results[nb]
await add_to_queue(ctx, selected)
added_to_queue = True
elif choice == '➡️':
if start < len(results):
start += 5
msg = await next(msg, start, results, nb_results)
elif choice == '⬅️':
if start >= 5:
start -= 5
msg = await prev(msg, start, results)
return msg, start, added_to_queue
# Choose what to do
msg, start, added_to_queue = await decision(choice, start, msg, results, nb_results)
# Continue this until user select a music to add to the queue
while added_to_queue == False:
try: # Handling events
choice = await robot.wait_for("raw_reaction_add", check=check2, timeout=60)
choice = choice.emoji.name # retreiving emote
except asyncio.TimeoutError:
return
# Choose what to do
msg, start, added_to_queue = await decision(choice, start, msg, results, nb_results)
async def Delete(ctx, nb):
''' Function used to delete a music from the queue.
ctx: context from discord.py
nb: (str) --> index of the music to delete
'''
nb = int(nb) # casting str to int
if len(musics[ctx.guild]) >= nb:
title = musics[ctx.guild][nb-1].title
url = musics[ctx.guild][nb-1].url
del musics[ctx.guild][nb-1]
msg = await sendMsg(ctx, str_to_send=f"**[{title}]({url}) is deleted from the queue.**",
title="Queue update", color=0x00ffb7)
else:
msg = await sendMsg(ctx, str_to_send="**There isn't as much musics in the queue or the queue is empty.**",
title="Error", color=0x00ffb7)
async def Leave(ctx):
''' Function used to make the bot quit the audio channel. (Empty the queue at the same time)
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if client:
await client.disconnect()
musics[ctx.guild] = []
async def Resume(ctx):
''' Function used to resume the music on the bot.
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if client.is_paused():
client.resume()
async def Pause(ctx):
''' Function used to pause the music on the bot.
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if not client.is_paused():
client.pause()
async def Skip(ctx):
''' Function used to skip the music currently playing on the bot.
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if client:
client.stop()
async def Queue(ctx, robot):
def getTime(duration):
''' Function used to transform a duration(int) in sec into a duration(str) like hh:mm:ss.
return: (str)
duration: (int)
'''
total_sec = duration
h = (total_sec - (total_sec % 3600)) / 3600
sec_min_h = (total_sec - h * 3600)
min = (sec_min_h - (sec_min_h % 60)) / 60
sec = sec_min_h - min * 60
time = '{}:{}:{}'.format(int(h), str(
min/10).replace('.', ''), int(sec))
return time
# Check if the bot is connected to a vocal channel
client = ctx.guild.voice_client
pages = [] # if the queue is split into pages, each page will be inside this list
def check(reaction):
if reaction.user_id == ctx.author.id and msg.id == reaction.message_id:
if reaction.emoji.name in ['⬅️', '➡️']:
return reaction
if client: # if connected
# retrieve duration in desired format
time = getTime(now_playing[ctx.guild][0].duration)
# starting to build the string to send
to_print = "```\n" + f"Now playing:\n\t{now_playing[ctx.guild][0].title} ({time})\n\n"
i = 1
queue = musics[ctx.guild]
to_print += f"Total queued: {len(queue)} song(s)\n\n"
if len(queue) > 10: # if queue is too long
y = 1
actual_page = to_print
for music in queue:
time = getTime(music.duration) # retrieve duration
actual_page += f"{i}. {music.title} ({time})\n" # build string to send
if y == 10 or music == queue[-1]: # each 10 music, or at the end of the queue, we end the page
actual_page += "```" # ending actual page
pages += [actual_page] # adding the page to the list of pages
actual_page = "```\n" # starting a new page
y = 1
else:
y += 1
i += 1
i = 0
nb_page = 1
msg = await sendMsg(ctx, str_to_send=pages[i],
title=f"Queue (Page {nb_page})", color=0x00ffb7)
while True:
old = msg
msg = await sendMsg(ctx, str_to_send=pages[i],
title=f"Queue (Page {nb_page})", color=0x00ffb7,
old_msg=old)
if nb_page > 1 and nb_page < len(pages):
emotes = ['⬅️', '➡️']
elif nb_page >= len(pages):
emotes = ['⬅️']
else:
emotes = ['➡️']
await addReactions(msg, emotes)
try: # handling events
react = await robot.wait_for("raw_reaction_add", check=check, timeout=60)
except asyncio.TimeoutError:
return # exit the function if user stop reacting
emoji = react.emoji.name
if emoji == '⬅️':
nb_page -= 1
i -= 1
if emoji == '➡️':
nb_page += 1
i += 1
else: # if queue isn't too loong
for music in queue:
time = getTime(music.duration) # retrieve duration
to_print += f"{i}. {music.title} ({time})\n" # build string to send
i += 1
to_print += "```" # end of the string
msg = await sendMsg(ctx, str_to_send=to_print, title="Music(s) in queue :",
color=0x00ffb7)
else: # if bot not connected
msg = await sendMsg(ctx, str_to_send="**Bot should be connected to your channel to print the queue.**",
title="Error", color=0x00ffb7)
async def play_song(client, queue, song, tab_ctx):
''' Function used to play a music on the bot.
client: (ctx.author.voice.channel.connect())
queue: (list) --> list of musics from youtube_dl
song: ((class)Video) --> Video object
tab_ctx: (list)[ctx, old_msg]
'''
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(song.stream_url,
before_options= "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5"))
ctx = tab_ctx[0]
msg = await sendMsg(ctx, str_to_send=f"**[{song.title}]({song.url})**",
title="Now playing", color=0x00ffb7, old_msg=tab_ctx[1])
def next(_):
if len(queue) > 0:
new_song = queue[0]
now_playing[ctx.guild] = [queue[0]]
del queue[0]
asyncio.run_coroutine_threadsafe(play_song(client, queue, new_song, [ctx, msg]), bot.loop)
else:
asyncio.run_coroutine_threadsafe(client.disconnect(), bot.loop)
try:
client.play(source, after=next)
except:
pass
async def playlist(ctx, url):
''' Function used to add a playlist to the queue.
ctx: context from discord.py
url: (str) --> link of a youtube playlist
'''
client = ctx.guild.voice_client
status = await sendMsg(ctx, "***downloading playlist...***",
"Status", 0x00ffb7)
playlist = ytdl.extract_info(url, download=False)
if client and client.channel: # if bot connected
for video in playlist['entries']: # for each music of the playlist
to_append = Video(ctx, video=video)
musics[ctx.guild].append(to_append) # add each music of the playlist inside the queue
msg = await sendMsg(ctx, f"**[{playlist['title']}]({playlist['webpage_url']})**",
"Playlist queued", 0x00ffb7, status)
else: # if bot not connected
try: # try to connect
channel = ctx.author.voice.channel
except: # if error
msg = await sendMsg(ctx, str_to_send="***You must join a channel for that !***",
old_msg=status)
return
musics[ctx.guild] = [] # creating the queue
now_playing[ctx.guild] = [] # creating now playing
i = 0
for video in playlist['entries']: # for each video of the playlist
if i == 0:
to_play = Video(ctx, video=video)
now_playing[ctx.guild] = [to_play] # currently playing music is stored in there
else:
to_append = Video(ctx, video=video)
musics[ctx.guild].append(to_append) # add each music to queue
i+=1
try: # try to connect to the channel of the user
client = await channel.connect()
except: # if error
msg = await sendMsg(ctx, "**Unable to connect to voice channel**",
"Error", 0x00ffb7)
return
msg = await sendMsg(ctx, f"**[{playlist['title']}]({playlist['webpage_url']})**",
"Now playing playlist", 0x00ffb7)
tab_ctx = [ctx, msg]
# start to play the song
await play_song(client, musics[ctx.guild], to_play, tab_ctx)
async def Play(ctx, args):
client = ctx.guild.voice_client
search = ""
for mot in args:
search += mot + " "
if "https://youtube.com/playlist" in search:
await playlist(ctx, search)
return
elif "https://" in search:
url = search
else:
try:
yt = youtube_search.YoutubeSearch(search, max_results=1).to_json()
except Exception as e:
to_print = "Impossible de charger la vidéo à cause des erreurs suivantes :\n"
to_print += e.args[0][e.args[0].index(' '):]
msg = await sendMsg(ctx, to_print, "Error", 0x00ffb7)
return
try:
yt_id = str(json.loads(yt)['videos'][0]['id'])
url = 'https://www.youtube.com/watch?v=' + yt_id
except:
msg = await sendMsg(ctx, "No results", "Error", 0x00ffb7)
return
if client and client.channel:
video = Video(ctx, url)
musics[ctx.guild].append(video)
msg = await sendMsg(ctx, f"**[{video.title}]({video.url})**\n\n`Position in queue : {len(musics[ctx.guild])}`",
"Queued", 0x00ffb7)
else:
try:
video = Video(ctx, url)
except:
return
try:
channel = ctx.author.voice.channel
except:
msg = await sendMsg(ctx, "***You must be connected to a channel to do that.***",
"Error", 0x00ffb7)
return
musics[ctx.guild] = []
now_playing[ctx.guild] = [video]
client = await channel.connect()
msg = await sendMsg(ctx, f"**[{video.title}]({video.url})**",
"Now playing", 0x00ffb7)
await play_song(client, musics[ctx.guild], video, [ctx, msg])
async def Playtop(ctx, args):
client = ctx.guild.voice_client
search = ""
for mot in args:
search += mot + " "
if "https://" in search:
url = search
else:
try:
yt = youtube_search.YoutubeSearch(search, max_results=1).to_json()
except Exception as e:
to_print = "Impossible de charger la vidéo à cause des erreurs suivantes :\n"
to_print += e.args[0][e.args[0].index(' '):]
msg = await sendMsg(ctx, to_print, "Error", 0x00ffb7)
return
try:
yt_id = str(json.loads(yt)['videos'][0]['id'])
url = 'https://www.youtube.com/watch?v=' + yt_id
except:
msg = await sendMsg(ctx, "No results", "Error", 0x00ffb7)
return
if client and client.channel:
try:
video = Video(url, ctx)
except:
return
musics[ctx.guild].insert(0, video)
msg = await sendMsg(ctx, f"**[{video.title}]({video.url})**",
"Queued", 0x00ffb7)
else:
try:
video = Video(url, ctx)
except:
return
try:
channel = ctx.author.voice.channel
except:
msg = await sendMsg(ctx, "***You must be connected to a channel to do that.***",
"Error", 0x00ffb7)
return
musics[ctx.guild] = []
client = await channel.connect()
msg = await sendMsg(ctx, f"**[{video.title}]({video.url})**",
"Now playing", 0x00ffb7)
await play_song(client, musics[ctx.guild], video, [ctx, msg])
|
the-stack_106_26044 | # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from iris_relay.app import read_config_from_argv
from iris_relay.gmail import Gmail
config = read_config_from_argv()
gmclient = Gmail(config.get('gmail'), config.get('proxy'))
print('Fetching unread messages...')
for msg_id_gmail, headers, body in gmclient.list_unread_message():
print({'body': body, 'headers': headers})
|
the-stack_106_26045 | import os
import re
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.core.management import call_command
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from ecs.utils.testcases import LoginTestCase
from ecs.documents.models import DocumentType
from ecs.notifications.models import NotificationType, Notification, ProgressReportNotification
from ecs.votes.models import Vote
from ecs.core.tests.test_submissions import create_submission_form
class NotificationFormTest(LoginTestCase):
'''Tests for the Notification and NotificationType module
Tests for creating Notifications, upload of Notification documents, PDF document generation and Notification type selection.
'''
def test_creation_type_selection(self):
'''Tests if a notification type can be created and if the view for its selection is accessible.
'''
NotificationType.objects.create(name='foo notif')
response = self.client.get(reverse('ecs.notifications.views.select_notification_creation_type'))
self.assertEqual(response.status_code, 200)
self.assertTrue(b'foo notif' in response.content)
def _create_POST_data(self, **extra):
data = {
'comments': 'foo comment',
}
data.update(extra)
return data
def test_notification_form(self):
'''Tests notification creation and autosave mode. Further tests if notification can be saved,
submited with incomplete data and finally if the correct redirect happens if submitted with complete data.
'''
notification_type = NotificationType.objects.create(name='foo notif')
# GET the form and expect a docstash transactions redirect, then follow this redirect
response = self.client.get(reverse('ecs.notifications.views.create_notification', kwargs={'notification_type_pk': notification_type.pk}))
self.assertEqual(response.status_code, 302)
url = response['Location']
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(b'foo notif' in response.content)
self.assertTrue(b'<form' in response.content)
# POST the form in `autosave` mode
response = self.client.post(url, self._create_POST_data(autosave='autosave'))
self.assertEqual(response.status_code, 200)
self.assertFalse(b'<form' in response.content)
# POST the form in `save` mode
response = self.client.post(url, self._create_POST_data(save='save', comments='bar comment'))
self.assertEqual(response.status_code, 200)
self.assertFalse(b'<form' in response.content)
# POST the form in `submit` mode (incomplete data)
response = self.client.post(url, self._create_POST_data(submit='submit'))
self.assertEqual(response.status_code, 200)
self.assertTrue(b'<form' in response.content)
form = response.context['form']
self.assertEqual(form['comments'].data, 'foo comment')
# POST the form in `submit` mode (complete data) and follow the redirect
submission_form = create_submission_form()
response = self.client.post(url, self._create_POST_data(submit='submit', submission_forms=submission_form.pk))
self.assertEqual(response.status_code, 302)
view_url = response['Location']
response = self.client.get(view_url)
obj = response.context['notification']
self.assertEqual(obj.comments, 'foo comment')
self.assertEqual(obj.submission_forms.all()[0], submission_form)
def test_submission_data_for_notification(self):
'''Tests if the submission_data_for_notification view is accessible for a created notification for a submission.
'''
notification_type, _ = NotificationType.objects.get_or_create(name='foo notif')
notification = Notification.objects.create(type=notification_type)
submission_form = create_submission_form()
response = self.client.get(reverse('ecs.notifications.views.submission_data_for_notification'), {'submission_form': submission_form.pk})
self.assertEqual(response.status_code, 200)
def test_notification_pdf(self):
'''Tests if a pdf is produced if a Notification is created.
'''
notification_type, _ = NotificationType.objects.get_or_create(name='foo notif')
notification = Notification.objects.create(type=notification_type)
notification.render_pdf_document()
response = self.client.get(reverse('ecs.notifications.views.notification_pdf', kwargs={'notification_pk': notification.pk}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(next(response.streaming_content)[:5], b'%PDF-')
def _setup_POST_url(self):
notification_type = NotificationType.objects.create(name='foo notif')
response = self.client.get(reverse('ecs.notifications.views.create_notification', kwargs={'notification_type_pk': notification_type.pk}))
return response['Location']
def test_document_upload(self):
'''Tests if a pdf file can be uploaded. Further tests if meta data of the document is stored correctly and
if the resuting download link contains a PDF file.
'''
url = self._setup_POST_url()
upload_url = re.sub(r'new/\d+/', 'doc/upload/', url) # XXX: ugly
data = self._create_POST_data()
doctype = DocumentType.objects.create(name='foo doctype')
f = open(os.path.join(os.path.dirname(__file__), '..', 'core', 'tests', 'data', 'menschenrechtserklaerung.pdf'), 'rb')
data.update({
'document-file': f,
'document-doctype': doctype.pk,
'document-name': 'menschenrechtserklärung',
'document-version': '3.1415',
'document-date': '17.03.2010',
})
response = self.client.post(upload_url, data)
f.close()
self.assertTrue(b'<form' in response.content)
documents = response.context['documents']
self.assertEqual(len(documents), 1)
doc = documents[0]
self.assertEqual(doc.version, '3.1415')
response = self.client.get(
reverse('ecs.docstash.views.download_document', kwargs={
'docstash_key': response.context['request'].docstash.key,
'document_pk': doc.pk
})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(next(response.streaming_content)[:5], b'%PDF-')
def test_incomplete_upload(self):
'''Tests an incomplete document upload. Regression test for the KeyError bug fixed in r729:b022598f8e55
'''
url = self._setup_POST_url()
upload_url = re.sub(r'new/\d+/', 'doc/upload/', url) # XXX: ugly
data = self._create_POST_data()
doctype = DocumentType.objects.create(name='regression doctype')
data.update({
'document-0-doctype': doctype.pk,
'document-0-version': '3.1415',
'document-0-date': '30.03.2010',
})
response = self.client.post(upload_url, data)
self.assertEqual(response.status_code, 200)
self.assertTrue(b'<form' in response.content)
def test_vote_extension_workflow(self):
from django.contrib.auth.models import Group
call_command('bootstrap')
now = timezone.now()
nt = NotificationType.objects.get(form='ecs.notifications.forms.ProgressReportNotificationForm')
presenter = self.create_user('test_presenter')
office = self.create_user('test_office', profile_extra={'is_internal': True})
office.groups.add(Group.objects.get(name='EC-Office'))
executive = self.create_user('text_executive', profile_extra={'is_internal': True, 'is_executive': True})
executive.groups.add(
Group.objects.get(name='EC-Executive'),
Group.objects.get(name='EC-Office'),
)
sf = create_submission_form(presenter=presenter)
with self.login('test_presenter'):
response = self.client.get(reverse('ecs.notifications.views.create_notification', kwargs={'notification_type_pk': nt.pk}))
url = response['Location'] # docstash redirect
# no vote yet => we cannot select the submission form
response = self.client.get(url)
self.assertFalse(response.context['form'].fields['submission_form'].queryset.filter(pk=sf.pk).exists())
# create a permanent final postive vote
vote = sf.votes.create(result='1', is_final_version=True, signed_at=now, published_at=now, valid_until=now.replace(year=now.year + 1))
# now we have a vote => submission form is selectable
response = self.client.get(url)
self.assertTrue(response.context['form'].fields['submission_form'].queryset.filter(pk=sf.pk).exists())
# create a notification, request a vote extension
response = self.client.post(url, {
'submission_form': sf.pk,
'extension_of_vote_requested': 'on',
'runs_till': '12.12.2012',
'submit': 'on',
'SAE_count': '0',
'SUSAR_count': '0',
'study_started': 'on',
'comments': 'foo',
})
self.assertEqual(response.status_code, 302)
notification = self.client.get(response['Location']).context['notification']
def do_review(user, action='complete'):
response = self.client.get(reverse('ecs.tasks.views.my_tasks', kwargs={'submission_pk': sf.submission.pk}))
task = response.context['open_tasks'].get(
data_id=notification.pk,
content_type=ContentType.objects.get_for_model(ProgressReportNotification),
)
task.accept(user)
response = self.client.get(task.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(task.url, {
'task_management-submit': 'Abschicken',
'task_management-action': action,
'task_management-post_data': 'text=Test.',
})
self.assertEqual(response.status_code, 302)
# office review
with self.login('test_office'):
do_review(office)
# executive review
with self.login('text_executive'):
do_review(executive, 'complete_0')
notification = ProgressReportNotification.objects.get(pk=notification.pk)
old_valid_until = vote.valid_until
vote = Vote.objects.get(pk=vote.pk)
self.assertEqual(vote.valid_until, old_valid_until + timedelta(365))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.