filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_22532 | """Speech to text."""
import io
import os
import subprocess
import tempfile
import time
import wave
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
from urllib.parse import urljoin
import requests
from rhasspy.actor import RhasspyActor
from rhasspy.utils import convert_wav, hass_request_kwargs, maybe_convert_wav
# -----------------------------------------------------------------------------
class TranscribeWav:
"""Request to transcribe text from WAV buffer."""
def __init__(
self,
wav_data: bytes,
receiver: Optional[RhasspyActor] = None,
handle: bool = True,
) -> None:
self.wav_data = wav_data
self.receiver = receiver
self.handle = handle
class WavTranscription:
"""Response to TranscribeWav."""
def __init__(self, text: str, handle: bool = True, confidence: float = 1) -> None:
self.text = text
self.confidence = confidence
self.handle = handle
# -----------------------------------------------------------------------------
def get_decoder_class(system: str) -> Type[RhasspyActor]:
"""Get type for profile speech to text decoder."""
assert system in [
"dummy",
"pocketsphinx",
"kaldi",
"remote",
"hass_stt",
"command",
], ("Invalid speech to text system: %s" % system)
if system == "pocketsphinx":
# Use pocketsphinx locally
return PocketsphinxDecoder
if system == "kaldi":
# Use kaldi locally
return KaldiDecoder
if system == "remote":
# Use remote Rhasspy server
return RemoteDecoder
if system == "hass_stt":
# Use Home Assistant STT platform
return HomeAssistantSTTIntegration
if system == "command":
# Use external program
return CommandDecoder
# Use dummy decoder as a fallback
return DummyDecoder
# -----------------------------------------------------------------------------
class DummyDecoder(RhasspyActor):
"""Always returns an emptry transcription"""
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, TranscribeWav):
self.send(message.receiver or sender, WavTranscription(""))
# -----------------------------------------------------------------------------
# Pocketsphinx based WAV to text decoder
# https://github.com/cmusphinx/pocketsphinx
# -----------------------------------------------------------------------------
class PocketsphinxDecoder(RhasspyActor):
"""Pocketsphinx based WAV to text decoder."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.decoder = None
self.min_confidence: float = 0
self.preload: bool = False
self.decoder = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.min_confidence = self.profile.get(
"speech_to_text.pocketsphinx.min_confidence", 0.0
)
self.open_transcription = self.profile.get(
"speech_to_text.pocketsphinx.open_transcription", False
)
self.preload = self.config.get("preload", False)
if self.preload:
with self._lock:
try:
self.load_decoder()
except Exception as e:
self._logger.warning("preload: %s", e)
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, TranscribeWav):
try:
self.load_decoder()
text, confidence = self.transcribe_wav(message.wav_data)
self.send(
message.receiver or sender,
WavTranscription(
text, confidence=confidence, handle=message.handle
),
)
except Exception:
self._logger.exception("transcribing wav")
# Send empty transcription back
self.send(
message.receiver or sender,
WavTranscription("", handle=message.handle),
)
# -------------------------------------------------------------------------
def load_decoder(self) -> None:
"""Load Pocketsphinx HMM/LM/Dictionary."""
if self.decoder is None:
# Load decoder
import pocketsphinx
ps_config = self.profile.get("speech_to_text.pocketsphinx", {})
# Load decoder settings
hmm_path = self.profile.read_path(
ps_config.get("acoustic_model", "acoustic_model")
)
if self.open_transcription:
self._logger.debug("Open transcription mode")
# Use base language model/dictionary
dict_path = self.profile.read_path(
ps_config.get("base_dictionary", "base_dictionary.txt")
)
lm_path = self.profile.read_path(
ps_config.get("base_language_model", "base_language_model.txt")
)
else:
# Custom voice commands
dict_path = self.profile.read_path(
ps_config.get("dictionary", "dictionary.txt")
)
lm_path = self.profile.read_path(
ps_config.get("language_model", "language_model.txt")
)
self._logger.debug(
"Loading decoder with hmm=%s, dict=%s, lm=%s",
hmm_path,
dict_path,
lm_path,
)
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", hmm_path)
decoder_config.set_string("-dict", dict_path)
decoder_config.set_string("-lm", lm_path)
decoder_config.set_string("-logfn", "/dev/null")
mllr_path = self.profile.read_path(ps_config["mllr_matrix"])
if os.path.exists(mllr_path):
self._logger.debug(
"Using tuned MLLR matrix for acoustic model: %s", mllr_path
)
decoder_config.set_string("-mllr", mllr_path)
self.decoder = pocketsphinx.Decoder(decoder_config)
def transcribe_wav(self, wav_data: bytes) -> Tuple[str, float]:
"""Get text from WAV buffer."""
# Ensure 16-bit 16Khz mono
assert self.decoder is not None
with io.BytesIO(wav_data) as wav_io:
with wave.open(wav_io, "rb") as wav_file:
rate, width, channels = (
wav_file.getframerate(),
wav_file.getsampwidth(),
wav_file.getnchannels(),
)
self._logger.debug(
"rate=%s, width=%s, channels=%s.", rate, width, channels
)
if (rate != 16000) or (width != 2) or (channels != 1):
self._logger.info("Need to convert to 16-bit 16Khz mono.")
# Use converted data
audio_data = convert_wav(wav_data)
else:
# Use original data
audio_data = wav_file.readframes(wav_file.getnframes())
# Process data as an entire utterance
start_time = time.time()
self.decoder.start_utt()
self.decoder.process_raw(audio_data, False, True)
self.decoder.end_utt()
end_time = time.time()
self._logger.debug("Decoded WAV in %s second(s)", end_time - start_time)
hyp = self.decoder.hyp()
if hyp is not None:
confidence = self.decoder.get_logmath().exp(hyp.prob)
self._logger.debug("Transcription confidence: %s", confidence)
if confidence >= self.min_confidence:
# Return best transcription
self._logger.debug(hyp.hypstr)
return hyp.hypstr, confidence
self._logger.warning(
"Transcription did not meet confidence threshold: %s < %s",
confidence,
self.min_confidence,
)
# No transcription
return "", 0
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
import pocketsphinx # noqa: F401
except Exception:
problems[
"Missing pocketsphinx"
] = "pocketsphinx Python library not installed. Try pip3 install pocketsphinx"
ps_config = self.profile.get("speech_to_text.pocketsphinx", {})
hmm_path = self.profile.read_path(
ps_config.get("acoustic_model", "acoustic_model")
)
if not os.path.exists(hmm_path):
problems[
"Missing acoustic model"
] = f"Acoustic model directory not found at {hmm_path}. Did you download your profile?"
base_dict_path = self.profile.read_path(
ps_config.get("base_dictionary", "base_dictionary.txt")
)
if not os.path.exists(base_dict_path):
problems[
"Missing base dictionary"
] = f"Base dictionary not found at {base_dict_path}. Did you download your profile?"
dict_path = self.profile.read_path(
ps_config.get("dictionary", "dictionary.txt")
)
if not os.path.exists(dict_path):
problems[
"Missing dictionary"
] = f"Dictionary not found at {dict_path}. Did you train your profile?"
lm_path = self.profile.read_path(
ps_config.get("language_model", "language_model.txt")
)
if not os.path.exists(lm_path):
problems[
"Missing language model"
] = f"Language model not found at {lm_path}. Did you train your profile?"
return problems
# -----------------------------------------------------------------------------
# HTTP based decoder on remote Rhasspy server
# -----------------------------------------------------------------------------
class RemoteDecoder(RhasspyActor):
"""Forwards speech to text request to a rmemote Rhasspy server"""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.remote_url = ""
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.remote_url = self.profile.get("speech_to_text.remote.url")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, TranscribeWav):
text = self.transcribe_wav(message.wav_data)
self.send(message.receiver or sender, WavTranscription(text))
def transcribe_wav(self, wav_data: bytes) -> str:
"""POST to remote server and return response."""
import requests
headers = {"Content-Type": "audio/wav"}
self._logger.debug(
"POSTing %d byte(s) of WAV data to %s", len(wav_data), self.remote_url
)
# Pass profile name through
params = {"profile": self.profile.name}
response = requests.post(
self.remote_url, headers=headers, data=wav_data, params=params
)
try:
response.raise_for_status()
except Exception:
self._logger.exception("transcribe_wav")
return ""
return response.text
# -----------------------------------------------------------------------------
# Kaldi Decoder
# http://kaldi-asr.org
# -----------------------------------------------------------------------------
class KaldiDecoder(RhasspyActor):
"""Kaldi based decoder"""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.kaldi_dir: Optional[Path] = None
self.model_dir: Optional[Path] = None
self.graph_dir: Optional[Path] = None
self.decode_path: Optional[Path] = None
self.decode_command: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.kaldi_dir = Path(
os.path.expandvars(
self.profile.get("speech_to_text.kaldi.kaldi_dir", "/opt/kaldi")
)
)
model_dir_name = self.profile.get(
"training.speech_to_text.kaldi.model_dir",
self.profile.get("speech_to_text.kaldi.model_dir", "model"),
)
self.model_dir = Path(self.profile.read_path(model_dir_name))
self.open_transcription = self.profile.get(
"speech_to_text.kaldi.open_transcription", False
)
if self.open_transcription:
self.graph_dir = self.model_dir / self.profile.get(
"speech_to_text.kaldi.base_graph", "base_graph"
)
else:
self.graph_dir = self.model_dir / self.profile.get(
"speech_to_text.kaldi.graph", "graph"
)
self.decode_path = Path(self.profile.read_path(model_dir_name, "decode.sh"))
self.decode_command = [
"bash",
str(self.decode_path),
str(self.kaldi_dir),
str(self.model_dir),
str(self.graph_dir),
]
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, TranscribeWav):
text = self.transcribe_wav(message.wav_data)
self.send(message.receiver or sender, WavTranscription(text))
def transcribe_wav(self, wav_data: bytes) -> str:
"""Get text from WAV by calling external Kaldi script."""
try:
with tempfile.NamedTemporaryFile(suffix=".wav", mode="wb+") as wav_file:
# Ensure 16-bit 16Khz mono
subprocess.run(
[
"sox",
"-t",
"wav",
"-",
"-r",
"16000",
"-e",
"signed-integer",
"-b",
"16",
"-c",
"1",
"-t",
"wav",
wav_file.name,
],
check=True,
input=wav_data,
)
wav_file.seek(0)
command = self.decode_command + [wav_file.name]
self._logger.debug(command)
try:
return (
subprocess.check_output(command, stderr=subprocess.STDOUT)
.decode()
.strip()
)
except subprocess.CalledProcessError as e:
output = e.output.decode()
self._logger.error(output)
raise Exception(output)
except Exception:
self._logger.exception("transcribe_wav")
return ""
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
assert self.kaldi_dir is not None
if not self.kaldi_dir.is_dir():
problems[
"Missing Kaldi"
] = f"Kaldi not found at {self.kaldi_dir}. See http://kaldi-asr.org"
assert self.graph_dir is not None
hclg_path = self.graph_dir / "HCLG.fst"
if not hclg_path.is_file():
problems[
"Missing HCLG.fst"
] = f"Graph not found at {hclg_path}. Did you train your profile?"
assert self.model_dir is not None
conf_path = self.model_dir / "online" / "conf" / "online.conf"
if not conf_path.is_file():
problems[
"Missing online.conf"
] = f"Configuration file not found at {conf_path}. Did you train your profile?"
return problems
# -----------------------------------------------------------------------------
# Home Assistant STT Integration
# https://www.home-assistant.io/integrations/stt
# -----------------------------------------------------------------------------
class HomeAssistantSTTIntegration(RhasspyActor):
"""Use STT integration to Home Assistant"""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.hass_config: Dict[str, Any] = {}
self.pem_file: Optional[str] = ""
self.platform: Optional[str] = None
self.chunk_size: int = 2048
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.hass_config = self.profile.get("home_assistant", {})
# PEM file for self-signed HA certificates
self.pem_file = self.hass_config.get("pem_file", "")
if self.pem_file:
self.pem_file = os.path.expandvars(self.pem_file)
self._logger.debug("Using PEM file at %s", self.pem_file)
else:
self.pem_file = None # disabled
self.platform = self.profile.get("speech_to_text.hass_stt.platform")
self.chunk_size = int(
self.profile.get("speech_to_text.hass_stt.chunk_size", 2048)
)
self.sample_rate = int(
self.profile.get("speech_to_text.hass_stt.sample_rate", 16000)
)
self.bit_rate = int(self.profile.get("speech_to_text.hass_stt.bit_rate", 16))
self.channels = int(self.profile.get("speech_to_text.hass_stt.channels", 1))
self.language = str(
self.profile.get("speech_to_text.hass_stt.language", "en-US")
)
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, TranscribeWav):
text = self.transcribe_wav(message.wav_data)
self.send(message.receiver or sender, WavTranscription(text))
def transcribe_wav(self, wav_data: bytes) -> str:
"""Get text Home Assistant STT platform."""
try:
assert self.platform, "Missing platform name"
# Convert WAV to desired format
wav_data = maybe_convert_wav(
wav_data,
rate=self.sample_rate,
width=self.bit_rate,
channels=self.channels,
)
stt_url = urljoin(self.hass_config["url"], f"api/stt/{self.platform}")
# Send to Home Assistant
kwargs = hass_request_kwargs(self.hass_config, self.pem_file)
if self.pem_file is not None:
kwargs["verify"] = self.pem_file
headers = kwargs.get("headers", {})
headers["X-Speech-Content"] = "; ".join(
[
"format=wav",
"codec=pcm",
f"sample_rate={self.sample_rate}",
f"bit_rate={self.bit_rate}",
f"channel={self.channels}",
f"language={self.language}",
]
)
def generate_chunks() -> Iterable[bytes]:
with io.BytesIO(wav_data) as wav_buffer:
with wave.open(wav_buffer, "rb") as wav_file:
# Send empty WAV as initial chunk (header only)
with io.BytesIO() as empty_wav_buffer:
with wave.open(empty_wav_buffer, "wb") as empty_wav_file:
empty_wav_file.setframerate(wav_file.getframerate())
empty_wav_file.setsampwidth(wav_file.getsampwidth())
empty_wav_file.setnchannels(wav_file.getnchannels())
yield empty_wav_buffer.getvalue()
# Stream chunks
audio_data = wav_file.readframes(wav_file.getnframes())
while audio_data:
chunk = audio_data[: self.chunk_size]
yield chunk
audio_data = audio_data[self.chunk_size :]
# POST WAV data to STT
response = requests.post(stt_url, data=generate_chunks(), **kwargs)
response.raise_for_status()
response_json = response.json()
self._logger.debug(response_json)
assert response_json["result"] == "success"
return response_json["text"]
except Exception:
self._logger.exception("transcribe_wav")
return ""
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
if not self.platform:
problems[
"Missing platform name"
] = "Expected Home Assistant STT platform name in speech_to_text.hass_stt.platform"
stt_url = urljoin(self.hass_config["url"], f"api/stt/{self.platform}")
try:
kwargs = hass_request_kwargs(self.hass_config, self.pem_file)
requests.get(stt_url, **kwargs)
except Exception:
problems[
"Can't contact server"
] = f"Unable to reach your Home Assistant STT platform at {stt_url}. Is the platform configured?"
return problems
# -----------------------------------------------------------------------------
# Command Decoder
# -----------------------------------------------------------------------------
class CommandDecoder(RhasspyActor):
"""Command-line based decoder"""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.command: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
program = os.path.expandvars(self.profile.get("speech_to_text.command.program"))
arguments = [
os.path.expandvars(str(a))
for a in self.profile.get("speech_to_text.command.arguments", [])
]
self.command = [program] + arguments
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, TranscribeWav):
text = self.transcribe_wav(message.wav_data)
self.send(message.receiver or sender, WavTranscription(text))
def transcribe_wav(self, wav_data: bytes) -> str:
"""Get text from WAV using external program."""
try:
self._logger.debug(self.command)
# WAV -> STDIN -> STDOUT -> text
return (
subprocess.run(
self.command, check=True, input=wav_data, stdout=subprocess.PIPE
)
.stdout.decode()
.strip()
)
except Exception:
self._logger.exception("transcribe_wav")
return ""
|
the-stack_106_22533 | #
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from build_definitions import *
class GPerfToolsDependency(Dependency):
def __init__(self):
super(GPerfToolsDependency, self).__init__(
'gperftools', '2.7',
'https://github.com/gperftools/gperftools/releases/download/gperftools-{0}/'
'gperftools-{0}.tar.gz',
BUILD_GROUP_INSTRUMENTED)
self.copy_sources = True
self.patch_version = 0
self.patch_strip = 1
self.post_patch = ['autoreconf', '-fvi']
def build(self, builder):
log_prefix = builder.log_prefix(self)
os.environ["YB_REMOTE_COMPILATION"] = "0"
log_output(log_prefix, ['./configure', '--prefix={}'.format(builder.prefix),
'--enable-frame-pointers', '--enable-heap-checker', '--with-pic'])
log_output(log_prefix, ['make', 'clean'])
log_output(log_prefix, ['make', 'install', '-j', '1'])
def should_build(self, instrumented):
return not instrumented
|
the-stack_106_22534 | import random
import numpy as np
def downsample(data_numpy, step, random_sample=True):
# input: C,T,V,M
begin = np.random.randint(step) if random_sample else 0
return data_numpy[:, begin::step, :, :]
def temporal_slice(data_numpy, step):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
return data_numpy.reshape(C, T / step, step, V, M).transpose(
(0, 1, 3, 2, 4)).reshape(C, T / step, V, step * M)
def mean_subtractor(data_numpy, mean):
# input: C,T,V,M
# naive version
if mean == 0:
return
C, T, V, M = data_numpy.shape
valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
begin = valid_frame.argmax()
end = len(valid_frame) - valid_frame[::-1].argmax()
data_numpy[:, :end, :, :] = data_numpy[:, :end, :, :] - mean
return data_numpy
def auto_pading(data_numpy, size, random_pad=False):
C, T, V, M = data_numpy.shape
if T < size:
begin = random.randint(0, size - T) if random_pad else 0
data_numpy_paded = np.zeros((C, size, V, M))
data_numpy_paded[:, begin:begin + T, :, :] = data_numpy
return data_numpy_paded
else:
return data_numpy
def random_choose(data_numpy, size, auto_pad=True):
C, T, V, M = data_numpy.shape
if T == size:
return data_numpy
elif T < size:
if auto_pad:
return auto_pading(data_numpy, size, random_pad=True)
else:
return data_numpy
else:
begin = random.randint(0, T - size)
return data_numpy[:, begin:begin + size, :, :]
def random_move(data_numpy,
angle_candidate=[-10., -5., 0., 5., 10.],
scale_candidate=[0.9, 1.0, 1.1],
transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2],
move_time_candidate=[1]):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
move_time = random.choice(move_time_candidate)
node = np.arange(0, T, T * 1.0 / move_time).round().astype(int)
node = np.append(node, T)
num_node = len(node)
A = np.random.choice(angle_candidate, num_node)
S = np.random.choice(scale_candidate, num_node)
T_x = np.random.choice(transform_candidate, num_node)
T_y = np.random.choice(transform_candidate, num_node)
a = np.zeros(T)
s = np.zeros(T)
t_x = np.zeros(T)
t_y = np.zeros(T)
# linspace
for i in range(num_node - 1):
a[node[i]:node[i + 1]] = np.linspace(
A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180
s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1],
node[i + 1] - node[i])
t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1],
node[i + 1] - node[i])
t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1],
node[i + 1] - node[i])
theta = np.array([[np.cos(a) * s, -np.sin(a) * s],
[np.sin(a) * s, np.cos(a) * s]])
# perform transformation
for i_frame in range(T):
xy = data_numpy[0:2, i_frame, :, :]
new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1))
new_xy[0] += t_x[i_frame]
new_xy[1] += t_y[i_frame]
data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M)
return data_numpy
def random_shift(data_numpy):
C, T, V, M = data_numpy.shape
data_shift = np.zeros(data_numpy.shape)
valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
begin = valid_frame.argmax()
end = len(valid_frame) - valid_frame[::-1].argmax()
size = end - begin
bias = random.randint(0, T - size)
data_shift[:, bias:bias + size, :, :] = data_numpy[:, begin:end, :, :]
return data_shift
def openpose_match(data_numpy):
C, T, V, M = data_numpy.shape
assert (C == 3)
score = data_numpy[2, :, :, :].sum(axis=1)
# the rank of body confidence in each frame (shape: T-1, M)
rank = (-score[0:T - 1]).argsort(axis=1).reshape(T - 1, M)
# data of frame 1
xy1 = data_numpy[0:2, 0:T - 1, :, :].reshape(2, T - 1, V, M, 1)
# data of frame 2
xy2 = data_numpy[0:2, 1:T, :, :].reshape(2, T - 1, V, 1, M)
# square of distance between frame 1&2 (shape: T-1, M, M)
distance = ((xy2 - xy1) ** 2).sum(axis=2).sum(axis=0)
# match pose
forward_map = np.zeros((T, M), dtype=int) - 1
forward_map[0] = range(M)
for m in range(M):
choose = (rank == m)
forward = distance[choose].argmin(axis=1)
for t in range(T - 1):
distance[t, :, forward[t]] = np.inf
forward_map[1:][choose] = forward
assert (np.all(forward_map >= 0))
# string data
for t in range(T - 1):
forward_map[t + 1] = forward_map[t + 1][forward_map[t]]
# generate data
new_data_numpy = np.zeros(data_numpy.shape)
for t in range(T):
new_data_numpy[:, t, :, :] = data_numpy[:, t, :, forward_map[
t]].transpose(1, 2, 0)
data_numpy = new_data_numpy
# score sort
trace_score = data_numpy[2, :, :, :].sum(axis=1).sum(axis=0)
rank = (-trace_score).argsort()
data_numpy = data_numpy[:, :, :, rank]
return data_numpy
|
the-stack_106_22535 | import logging
import os
from collections import OrderedDict
from typing import List
from dateutil.parser import parse
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.exceptions import ClassInstantiationError
from great_expectations.render.util import num_to_str
from ...core.expectation_validation_result import ExpectationSuiteValidationResult
from ...core.run_identifier import RunIdentifier
from ...validation_operators.types.validation_operator_result import (
ValidationOperatorResult,
)
from ..types import (
CollapseContent,
RenderedDocumentContent,
RenderedHeaderContent,
RenderedMarkdownContent,
RenderedSectionContent,
RenderedStringTemplateContent,
RenderedTableContent,
TextContent,
)
from .renderer import Renderer
logger = logging.getLogger(__name__)
class ValidationResultsPageRenderer(Renderer):
def __init__(self, column_section_renderer=None, run_info_at_end: bool = False):
"""
Args:
column_section_renderer:
run_info_at_end: Move the run info (Info, Batch Markers, Batch Kwargs) to the end
of the rendered output rather than after Statistics.
"""
super().__init__()
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ValidationResultsColumnSectionRenderer"
}
module_name = "great_expectations.render.renderer.column_section_renderer"
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_environment={},
config_defaults={
"module_name": column_section_renderer.get("module_name", module_name)
},
)
if not self._column_section_renderer:
raise ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=column_section_renderer["class_name"],
)
self.run_info_at_end = run_info_at_end
def render_validation_operator_result(
self, validation_operator_result: ValidationOperatorResult
) -> List[RenderedDocumentContent]:
"""
Render a ValidationOperatorResult which can have multiple ExpectationSuiteValidationResult
Args:
validation_operator_result: ValidationOperatorResult
Returns:
List[RenderedDocumentContent]
"""
return [
self.render(validation_result)
for validation_result in validation_operator_result.list_validation_results()
]
# TODO: deprecate dual batch api support in 0.14
def render(self, validation_results: ExpectationSuiteValidationResult):
run_id = validation_results.meta["run_id"]
if isinstance(run_id, str):
try:
run_time = parse(run_id).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
except (ValueError, TypeError):
run_time = "__none__"
run_name = run_id
elif isinstance(run_id, dict):
run_name = run_id.get("run_name") or "__none__"
run_time = run_id.get("run_time") or "__none__"
elif isinstance(run_id, RunIdentifier):
run_name = run_id.run_name or "__none__"
run_time = run_id.run_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
expectation_suite_name = validation_results.meta["expectation_suite_name"]
batch_kwargs = (
validation_results.meta.get("batch_kwargs", {})
or validation_results.meta.get("batch_spec", {})
or {}
)
# add datasource key to batch_kwargs if missing
if "datasource" not in batch_kwargs and "datasource" not in batch_kwargs:
# check if expectation_suite_name follows datasource.batch_kwargs_generator.data_asset_name.suite_name pattern
if len(expectation_suite_name.split(".")) == 4:
if "batch_kwargs" in validation_results.meta:
batch_kwargs["datasource"] = expectation_suite_name.split(".")[0]
else:
batch_kwargs["datasource"] = expectation_suite_name.split(".")[0]
# Group EVRs by column
columns = {}
for evr in validation_results.results:
if "column" in evr.expectation_config.kwargs:
column = evr.expectation_config.kwargs["column"]
else:
column = "Table-Level Expectations"
if column not in columns:
columns[column] = []
columns[column].append(evr)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
overview_content_blocks = [
self._render_validation_header(validation_results),
self._render_validation_statistics(validation_results=validation_results),
]
collapse_content_blocks = [
self._render_validation_info(validation_results=validation_results)
]
if validation_results.meta.get("batch_markers"):
collapse_content_blocks.append(
self._render_nested_table_from_dict(
input_dict=validation_results["meta"].get("batch_markers"),
header="Batch Markers",
)
)
if validation_results.meta.get("batch_kwargs"):
collapse_content_blocks.append(
self._render_nested_table_from_dict(
input_dict=validation_results.meta.get("batch_kwargs"),
header="Batch Kwargs",
)
)
if validation_results.meta.get("batch_parameters"):
collapse_content_blocks.append(
self._render_nested_table_from_dict(
input_dict=validation_results.meta.get("batch_parameters"),
header="Batch Parameters",
)
)
if validation_results.meta.get("batch_spec"):
collapse_content_blocks.append(
self._render_nested_table_from_dict(
input_dict=validation_results.meta.get("batch_spec"),
header="Batch Spec",
)
)
if validation_results.meta.get("batch_request"):
collapse_content_blocks.append(
self._render_nested_table_from_dict(
input_dict=validation_results.meta.get("batch_request"),
header="Batch Definition",
)
)
collapse_content_block = CollapseContent(
**{
"collapse_toggle_link": "Show more info...",
"collapse": collapse_content_blocks,
"styling": {
"body": {"classes": ["card", "card-body"]},
"classes": ["col-12", "p-1"],
},
}
)
if not self.run_info_at_end:
overview_content_blocks.append(collapse_content_block)
sections = [
RenderedSectionContent(
**{
"section_name": "Overview",
"content_blocks": overview_content_blocks,
}
)
]
if "Table-Level Expectations" in columns:
sections += [
self._column_section_renderer.render(
validation_results=columns["Table-Level Expectations"]
)
]
sections += [
self._column_section_renderer.render(
validation_results=columns[column],
)
for column in ordered_columns
]
if self.run_info_at_end:
sections += [
RenderedSectionContent(
**{
"section_name": "Run Info",
"content_blocks": collapse_content_blocks,
}
)
]
data_asset_name = batch_kwargs.get("data_asset_name")
# Determine whether we have a custom run_name
try:
run_name_as_time = parse(run_name)
except ValueError:
run_name_as_time = None
try:
run_time_datetime = parse(run_time)
except ValueError:
run_time_datetime = None
include_run_name: bool = False
if run_name_as_time != run_time_datetime and run_name_as_time != "__none__":
include_run_name = True
page_title = "Validations / " + str(expectation_suite_name)
if data_asset_name:
page_title += " / " + str(data_asset_name)
if include_run_name:
page_title += " / " + str(run_name)
page_title += " / " + str(run_time)
return RenderedDocumentContent(
**{
"renderer_type": "ValidationResultsPageRenderer",
"page_title": page_title,
"batch_kwargs": batch_kwargs
if "batch_kwargs" in validation_results.meta
else None,
"batch_spec": batch_kwargs
if "batch_spec" in validation_results.meta
else None,
"expectation_suite_name": expectation_suite_name,
"sections": sections,
"utm_medium": "validation-results-page",
}
)
@classmethod
def _render_validation_header(cls, validation_results):
success = validation_results.success
expectation_suite_name = validation_results.meta["expectation_suite_name"]
expectation_suite_path_components = (
[".." for _ in range(len(expectation_suite_name.split(".")) + 3)]
+ ["expectations"]
+ str(expectation_suite_name).split(".")
)
expectation_suite_path = (
os.path.join(*expectation_suite_path_components) + ".html"
)
# TODO: deprecate dual batch api support in 0.14
batch_kwargs = validation_results.meta.get(
"batch_kwargs", {}
) or validation_results.meta.get("batch_spec", {})
data_asset_name = batch_kwargs.get("data_asset_name")
if success:
success = "Succeeded"
html_success_icon = (
'<i class="fas fa-check-circle text-success" aria-hidden="true"></i>'
)
else:
success = "Failed"
html_success_icon = (
'<i class="fas fa-times text-danger" aria-hidden="true"></i>'
)
return RenderedHeaderContent(
**{
"content_block_type": "header",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Overview",
"tag": "h5",
"styling": {"classes": ["m-0"]},
},
}
),
"subheader": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "${suite_title} ${expectation_suite_name}\n ${data_asset} ${data_asset_name}\n ${status_title} ${html_success_icon} ${success}",
"params": {
"suite_title": "Expectation Suite:",
"data_asset": "Data asset:",
"data_asset_name": data_asset_name,
"status_title": "Status:",
"expectation_suite_name": expectation_suite_name,
"success": success,
"html_success_icon": html_success_icon,
},
"styling": {
"params": {
"suite_title": {"classes": ["h6"]},
"status_title": {"classes": ["h6"]},
"expectation_suite_name": {
"tag": "a",
"attributes": {"href": expectation_suite_path},
},
},
"classes": ["mb-0", "mt-1"],
},
},
}
),
"styling": {
"classes": ["col-12", "p-0"],
"header": {"classes": ["alert", "alert-secondary"]},
},
}
)
@classmethod
def _render_validation_info(cls, validation_results):
run_id = validation_results.meta["run_id"]
if isinstance(run_id, str):
try:
run_time = parse(run_id).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
except (ValueError, TypeError):
run_time = "__none__"
run_name = run_id
elif isinstance(run_id, dict):
run_name = run_id.get("run_name") or "__none__"
run_time = run_id.get("run_time") or "__none__"
elif isinstance(run_id, RunIdentifier):
run_name = run_id.run_name or "__none__"
run_time = run_id.run_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
# TODO: Deprecate "great_expectations.__version__"
ge_version = validation_results.meta.get(
"great_expectations_version"
) or validation_results.meta.get("great_expectations.__version__")
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Info",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": [
["Great Expectations Version", ge_version],
["Run Name", run_name],
["Run Time", run_time],
],
"styling": {
"classes": ["col-12", "table-responsive", "mt-1"],
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
},
},
}
)
@classmethod
def _render_nested_table_from_dict(cls, input_dict, header=None, sub_table=False):
table_rows = []
for kwarg, value in input_dict.items():
if not isinstance(value, (dict, OrderedDict)):
table_row = [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": str(kwarg)},
"styling": {
"default": {"styles": {"word-break": "break-all"}},
},
},
"styling": {
"parent": {
"classes": ["pr-3"],
}
},
}
),
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": str(value)},
"styling": {
"default": {"styles": {"word-break": "break-all"}},
},
},
"styling": {
"parent": {
"classes": [],
}
},
}
),
]
else:
table_row = [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": str(kwarg)},
"styling": {
"default": {"styles": {"word-break": "break-all"}},
},
},
"styling": {
"parent": {
"classes": ["pr-3"],
}
},
}
),
cls._render_nested_table_from_dict(value, sub_table=True),
]
table_rows.append(table_row)
table_rows.sort(key=lambda row: row[0].string_template["params"]["value"])
if sub_table:
return RenderedTableContent(
**{
"content_block_type": "table",
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive"],
"body": {"classes": ["table", "table-sm", "m-0"]},
"parent": {"classes": ["pt-0", "pl-0", "border-top-0"]},
},
}
)
else:
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": header,
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": table_rows,
"styling": {
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
}
},
}
)
@classmethod
def _render_validation_statistics(cls, validation_results):
statistics = validation_results.statistics
statistics_dict = OrderedDict(
[
("evaluated_expectations", "Evaluated Expectations"),
("successful_expectations", "Successful Expectations"),
("unsuccessful_expectations", "Unsuccessful Expectations"),
("success_percent", "Success Percent"),
]
)
table_rows = []
for key, value in statistics_dict.items():
if statistics.get(key) is not None:
if key == "success_percent":
# table_rows.append([value, "{0:.2f}%".format(statistics[key])])
table_rows.append(
[value, num_to_str(statistics[key], precision=4) + "%"]
)
else:
table_rows.append([value, statistics[key]])
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Statistics",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive", "mt-1", "p-1"],
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
},
},
}
)
class ExpectationSuitePageRenderer(Renderer):
def __init__(self, column_section_renderer=None):
super().__init__()
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ExpectationSuiteColumnSectionRenderer"
}
module_name = "great_expectations.render.renderer.column_section_renderer"
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_environment={},
config_defaults={
"module_name": column_section_renderer.get("module_name", module_name)
},
)
if not self._column_section_renderer:
raise ClassInstantiationError(
module_name=column_section_renderer,
package_name=None,
class_name=column_section_renderer["class_name"],
)
def render(self, expectations):
columns, ordered_columns = self._group_and_order_expectations_by_column(
expectations
)
expectation_suite_name = expectations.expectation_suite_name
overview_content_blocks = [
self._render_expectation_suite_header(),
self._render_expectation_suite_info(expectations),
]
table_level_expectations_content_block = self._render_table_level_expectations(
columns
)
if table_level_expectations_content_block is not None:
overview_content_blocks.append(table_level_expectations_content_block)
asset_notes_content_block = self._render_expectation_suite_notes(expectations)
if asset_notes_content_block is not None:
overview_content_blocks.append(asset_notes_content_block)
sections = [
RenderedSectionContent(
**{
"section_name": "Overview",
"content_blocks": overview_content_blocks,
}
)
]
sections += [
self._column_section_renderer.render(expectations=columns[column])
for column in ordered_columns
if column != "_nocolumn"
]
return RenderedDocumentContent(
**{
"renderer_type": "ExpectationSuitePageRenderer",
"page_title": "Expectations / " + str(expectation_suite_name),
"expectation_suite_name": expectation_suite_name,
"utm_medium": "expectation-suite-page",
"sections": sections,
}
)
def _render_table_level_expectations(self, columns):
table_level_expectations = columns.get("_nocolumn")
if not table_level_expectations:
return None
else:
expectation_bullet_list = self._column_section_renderer.render(
expectations=table_level_expectations
).content_blocks[1]
expectation_bullet_list.header = RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Table-Level Expectations",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
)
return expectation_bullet_list
@classmethod
def _render_expectation_suite_header(cls):
return RenderedHeaderContent(
**{
"content_block_type": "header",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Overview",
"tag": "h5",
"styling": {"classes": ["m-0"]},
},
}
),
"styling": {
"classes": ["col-12"],
"header": {"classes": ["alert", "alert-secondary"]},
},
}
)
@classmethod
def _render_expectation_suite_info(cls, expectations):
expectation_suite_name = expectations.expectation_suite_name
# TODO: Deprecate "great_expectations.__version__"
ge_version = expectations.meta.get(
"great_expectations_version"
) or expectations.meta.get("great_expectations.__version__")
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Info",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": [
["Expectation Suite Name", expectation_suite_name],
["Great Expectations Version", ge_version],
],
"styling": {
"classes": ["col-12", "table-responsive", "mt-1"],
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
},
},
}
)
# TODO: Update tests
@classmethod
def _render_expectation_suite_notes(cls, expectations):
content = []
total_expectations = len(expectations.expectations)
columns = []
for exp in expectations.expectations:
if "column" in exp.kwargs:
columns.append(exp.kwargs["column"])
total_columns = len(set(columns))
content += [
# TODO: Leaving these two paragraphs as placeholders for later development.
# "This Expectation suite was first generated by {BasicDatasetProfiler} on {date}, using version {xxx} of Great Expectations.",
# "{name}, {name}, and {name} have also contributed additions and revisions.",
"This Expectation suite currently contains %d total Expectations across %d columns."
% (
total_expectations,
total_columns,
),
]
if "notes" in expectations.meta:
notes = expectations.meta["notes"]
note_content = None
if isinstance(notes, str):
note_content = [notes]
elif isinstance(notes, list):
note_content = notes
elif isinstance(notes, dict):
if "format" in notes:
if notes["format"] == "string":
if isinstance(notes["content"], str):
note_content = [notes["content"]]
elif isinstance(notes["content"], list):
note_content = notes["content"]
else:
logger.warning(
"Unrecognized Expectation suite notes format. Skipping rendering."
)
elif notes["format"] == "markdown":
if isinstance(notes["content"], str):
note_content = [
RenderedMarkdownContent(
**{
"content_block_type": "markdown",
"markdown": notes["content"],
"styling": {"parent": {}},
}
)
]
elif isinstance(notes["content"], list):
note_content = [
RenderedMarkdownContent(
**{
"content_block_type": "markdown",
"markdown": note,
"styling": {"parent": {}},
}
)
for note in notes["content"]
]
else:
logger.warning(
"Unrecognized Expectation suite notes format. Skipping rendering."
)
else:
logger.warning(
"Unrecognized Expectation suite notes format. Skipping rendering."
)
if note_content is not None:
content += note_content
return TextContent(
**{
"content_block_type": "text",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Notes",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"text": content,
"styling": {
"classes": ["col-12", "table-responsive", "mt-1"],
"body": {"classes": ["table", "table-sm"]},
},
}
)
class ProfilingResultsPageRenderer(Renderer):
def __init__(self, overview_section_renderer=None, column_section_renderer=None):
super().__init__()
if overview_section_renderer is None:
overview_section_renderer = {
"class_name": "ProfilingResultsOverviewSectionRenderer"
}
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ProfilingResultsColumnSectionRenderer"
}
module_name = "great_expectations.render.renderer.profiling_results_overview_section_renderer"
self._overview_section_renderer = instantiate_class_from_config(
config=overview_section_renderer,
runtime_environment={},
config_defaults={
"module_name": overview_section_renderer.get("module_name", module_name)
},
)
if not self._overview_section_renderer:
raise ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=overview_section_renderer["class_name"],
)
module_name = "great_expectations.render.renderer.column_section_renderer"
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_environment={},
config_defaults={
"module_name": column_section_renderer.get("module_name", module_name)
},
)
if not self._column_section_renderer:
raise ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=column_section_renderer["class_name"],
)
def render(self, validation_results):
run_id = validation_results.meta["run_id"]
if isinstance(run_id, str):
try:
run_time = parse(run_id).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
except (ValueError, TypeError):
run_time = "__none__"
run_name = run_id
elif isinstance(run_id, dict):
run_name = run_id.get("run_name") or "__none__"
run_time = run_id.get("run_time") or "__none__"
elif isinstance(run_id, RunIdentifier):
run_name = run_id.run_name or "__none__"
run_time = run_id.run_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
expectation_suite_name = validation_results.meta["expectation_suite_name"]
batch_kwargs = validation_results.meta.get(
"batch_kwargs", {}
) or validation_results.meta.get("batch_spec", {})
# add datasource key to batch_kwargs if missing
if "datasource" not in batch_kwargs and "datasource" not in batch_kwargs:
# check if expectation_suite_name follows datasource.batch_kwargs_generator.data_asset_name.suite_name pattern
if len(expectation_suite_name.split(".")) == 4:
if "batch_kwargs" in validation_results.meta:
batch_kwargs["datasource"] = expectation_suite_name.split(".")[0]
else:
batch_kwargs["datasource"] = expectation_suite_name.split(".")[0]
# Group EVRs by column
# TODO: When we implement a ValidationResultSuite class, this method will move there.
columns = self._group_evrs_by_column(validation_results)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
column_types = self._overview_section_renderer._get_column_types(
validation_results
)
data_asset_name = batch_kwargs.get("data_asset_name")
# Determine whether we have a custom run_name
try:
run_name_as_time = parse(run_name)
except ValueError:
run_name_as_time = None
try:
run_time_datetime = parse(run_time)
except ValueError:
run_time_datetime = None
include_run_name: bool = False
if run_name_as_time != run_time_datetime and run_name_as_time != "__none__":
include_run_name = True
page_title = "Profiling Results / " + str(expectation_suite_name)
if data_asset_name:
page_title += " / " + str(data_asset_name)
if include_run_name:
page_title += " / " + str(run_name)
page_title += " / " + str(run_time)
return RenderedDocumentContent(
**{
"renderer_type": "ProfilingResultsPageRenderer",
"page_title": page_title,
"expectation_suite_name": expectation_suite_name,
"utm_medium": "profiling-results-page",
"batch_kwargs": batch_kwargs
if "batch_kwargs" in validation_results.meta
else None,
"batch_spec": batch_kwargs
if "batch_spec" in validation_results.meta
else None,
"sections": [
self._overview_section_renderer.render(
validation_results, section_name="Overview"
)
]
+ [
self._column_section_renderer.render(
columns[column],
section_name=column,
column_type=column_types.get(column),
)
for column in ordered_columns
],
}
)
|
the-stack_106_22536 | import json
from unittest.mock import ANY
import pytest
import requests
import schemathesis
from schemathesis.models import APIOperation, Case, OperationDefinition
from schemathesis.parameters import ParameterSet, PayloadAlternatives
from schemathesis.specs.openapi.links import Link, get_container, get_links
from schemathesis.specs.openapi.parameters import OpenAPI20Body, OpenAPI30Body, OpenAPI30Parameter
from schemathesis.stateful import ParsedData, Stateful
API_OPERATION = APIOperation(
path="/users/{user_id}",
method="get",
verbose_name="GET /users/{user_id}",
definition=ANY,
schema=ANY,
base_url=ANY,
path_parameters=ParameterSet(
[
OpenAPI30Parameter({"in": "path", "name": "user_id", "schema": {"type": "integer"}}),
]
),
query=ParameterSet(
[
OpenAPI30Parameter({"in": "query", "name": "code", "schema": {"type": "integer"}}),
OpenAPI30Parameter({"in": "query", "name": "user_id", "schema": {"type": "integer"}}),
OpenAPI30Parameter({"in": "query", "name": "common", "schema": {"type": "integer"}}),
]
),
)
LINK = Link(
name="GetUserByUserId",
operation=API_OPERATION,
parameters={"path.user_id": "$response.body#/id", "query.user_id": "$response.body#/id"},
)
@pytest.fixture(scope="module")
def case():
return Case(API_OPERATION)
@pytest.fixture(scope="module")
def response():
response = requests.Response()
response._content = b'{"id": 5}'
response.status_code = 201
response.headers["Content-Type"] = "application/json"
return response
@pytest.mark.parametrize(
"url, expected",
(
(
"/users/",
[
Link(
name="GetUserByUserId",
operation=APIOperation(
path="/users/{user_id}",
method="get",
verbose_name="GET /users/{user_id}",
definition=ANY,
schema=ANY,
base_url=ANY,
path_parameters=ANY,
query=ANY,
),
parameters={"path.user_id": "$response.body#/id", "query.user_id": "$response.body#/id"},
),
Link(
name="UpdateUserById",
operation=ANY,
parameters={"user_id": "$response.body#/id"},
),
],
),
("/unknown", []),
),
)
@pytest.mark.operations("create_user", "get_user", "update_user")
def test_get_links(openapi3_base_url, schema_url, url, expected):
schema = schemathesis.from_uri(schema_url)
response = requests.post(f"{openapi3_base_url}{url}", json={"first_name": "TEST", "last_name": "TEST"}, timeout=1)
tests = schema["/users/"]["POST"].get_stateful_tests(response, Stateful.links)
assert len(tests) == len(expected)
for test, value in zip(tests, expected):
assert test.name == value.name
assert test.parameters == value.parameters
def test_response_type(case, empty_open_api_3_schema):
# See GH-1068
# When runtime expression for `requestBody` contains a reference to the whole body
empty_open_api_3_schema["paths"] = {
"/users/{user_id}/": {
"get": {
"operationId": "getUser",
"parameters": [
{"in": "query", "name": "user_id", "required": True, "schema": {"type": "string"}},
],
"responses": {
"200": {
"description": "OK",
"links": {
"UpdateUserById": {
"operationRef": "#/paths/~1users~1{user_id}~1/patch",
"parameters": {"user_id": "$response.body#/id"},
"requestBody": "$response.body",
}
},
},
"404": {"description": "Not found"},
},
},
"patch": {
"operationId": "updateUser",
"parameters": [
{"in": "query", "name": "user_id", "required": True, "schema": {"type": "string"}},
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"id": {"type": "string", "minLength": 3},
"first_name": {"type": "string", "minLength": 3},
"last_name": {"type": "string", "minLength": 3},
},
"required": ["first_name", "last_name"],
"additionalProperties": False,
}
}
},
"required": True,
},
"responses": {"200": {"description": "OK"}},
},
}
}
schema = schemathesis.from_dict(empty_open_api_3_schema)
response = requests.Response()
body = b'{"id": "foo", "first_name": "TEST", "last_name": "TEST"}'
response._content = body
response.status_code = 200
response.headers["Content-Type"] = "application/json"
tests = schema["/users/{user_id}/"]["GET"].get_stateful_tests(response, Stateful.links)
assert len(tests) == 1
link = tests[0]
parsed = link.parse(case, response)
assert parsed.parameters == {"user_id": "foo"}
# Then the parsed result should body with the actual type of the JSON value
assert parsed.body == json.loads(body)
def test_parse(case, response):
assert LINK.parse(case, response) == ParsedData({"path.user_id": 5, "query.user_id": 5})
EXPECTED_PATH_PARAMETERS = [
{
"additionalProperties": False,
"properties": {"user_id": {"const": 1, "in": "path", "name": "user_id", "type": "integer"}},
"required": ["user_id"],
"type": "object",
},
{
"additionalProperties": False,
"properties": {"user_id": {"const": 3, "in": "path", "name": "user_id", "type": "integer"}},
"required": ["user_id"],
"type": "object",
},
]
@pytest.mark.parametrize(
"value, path_user_id, query_user_id, code",
(
(
[{"path.user_id": 1, "query.user_id": 2, "code": 7}, {"path.user_id": 3, "query.user_id": 4, "code": 5}],
[1, 3],
{"enum": [2, 4]},
{"enum": [7, 5]},
),
(
[{"path.user_id": 1}, {"path.user_id": 3}],
[1, 3],
{"type": "integer"},
{"type": "integer"},
),
),
)
def test_make_operation(value, path_user_id, query_user_id, code):
operation = LINK.make_operation(list(map(ParsedData, value)))
# There is only one path parameter
assert len(operation.path_parameters) == 1
assert sorted(operation.path_parameters[0].definition["schema"]["enum"], key=json.dumps) == path_user_id
assert len(operation.query) == 3
for item in operation.query:
schema = item.definition["schema"]
if item.name == "code":
assert_schema(schema, code)
elif item.name == "user_id":
assert_schema(schema, query_user_id)
else:
assert schema == {"type": "integer"}
def assert_schema(target, expected):
if "enum" in expected:
assert len(target) == 1
assert sorted(target["enum"]) == sorted(expected["enum"])
else:
assert target == expected
def test_make_operation_single():
operation = LINK.make_operation([ParsedData({"path.user_id": 1, "query.user_id": 2, "code": 7})])
assert len(operation.path_parameters) == 1
assert isinstance(operation.path_parameters[0], OpenAPI30Parameter)
assert operation.path_parameters[0].definition == {"in": "path", "name": "user_id", "schema": {"enum": [1]}}
for item in operation.query:
schema = item.definition["schema"]
if item.name == "code":
assert schema == {"enum": [7]}
elif item.name == "user_id":
assert schema == {"enum": [2]}
else:
assert schema == {"type": "integer"}
BODY_SCHEMA = {"required": ["foo"], "type": "object", "properties": {"foo": {"type": "string"}}}
@pytest.mark.parametrize(
"body",
(
OpenAPI20Body(
{
"name": "attributes",
"in": "body",
"required": True,
"schema": BODY_SCHEMA,
},
media_type="application/json",
),
OpenAPI30Body(definition={"schema": BODY_SCHEMA}, media_type="application/json", required=True),
),
)
def test_make_operation_body(body):
# See GH-1069
# When `requestBody` is present in the link definition
# And in the target operation
operation = APIOperation(
path="/users/",
method="post",
verbose_name="GET /users/{user_id}",
definition=ANY,
schema=ANY,
base_url=ANY,
body=PayloadAlternatives([body]),
)
body = {"foo": "bar"} # Literal value
link = Link(
name="Link",
operation=operation,
parameters={},
request_body={"requestBody": body},
)
# Then it should be taken into account during creation a modified version of that operation
new_operation = link.make_operation([ParsedData({}, body=body)]) # Actual parsed data will contain the literal
assert new_operation.body[0].definition["schema"] == {"enum": [body]}
def test_invalid_request_body_definition():
# When a link defines `requestBody` for operation that does not accept one
operation = APIOperation(
path="/users/",
method="get",
verbose_name="GET /users/{user_id}",
definition=ANY,
schema=ANY,
base_url=ANY,
)
# Then a proper error should be triggered
with pytest.raises(ValueError):
Link(name="Link", operation=operation, parameters={}, request_body={"requestBody": {"foo": "bar"}})
@pytest.mark.parametrize("parameter", ("wrong.id", "unknown", "header.id"))
def test_make_operation_invalid_location(parameter):
with pytest.raises(
ValueError, match=f"Parameter `{parameter}` is not defined in API operation GET /users/{{user_id}}"
):
LINK.make_operation([ParsedData({parameter: 4})])
def test_get_container_invalid_location(swagger_20):
operation = APIOperation(
path="/users/{user_id}",
method="get",
schema=swagger_20,
verbose_name="GET /users/{user_id}",
definition=OperationDefinition(
raw={},
resolved={},
scope="",
parameters=[
OpenAPI30Parameter({"in": "query", "name": "code", "type": "integer"}),
OpenAPI30Parameter({"in": "query", "name": "user_id", "type": "integer"}),
OpenAPI30Parameter({"in": "query", "name": "common", "type": "integer"}),
],
),
)
case = operation.make_case()
with pytest.raises(ValueError, match="Parameter `unknown` is not defined in API operation `GET /users/{user_id}`"):
get_container(case, None, "unknown")
@pytest.mark.parametrize(
"status_code, expected",
(
(200, ["Foo"]),
(201, ["Bar"]),
),
)
def test_get_links_numeric_response_codes(status_code, openapi_30, expected):
# See GH-1226
# When API definition contains response statuses as integers
operation = openapi_30["/users"]["GET"]
link_definition = {"operationRef": "#/paths/~1users/get"}
operation.definition.resolved["responses"] = {
"200": {"description": "OK", "links": {"Foo": link_definition}},
# Could be here due to YAML parsing + disabled schema validation
201: {"description": "OK", "links": {"Bar": link_definition}},
}
response = requests.Response()
response.status_code = status_code
# Then they still should be checked, even that is not compliant with the spec
assert [link.name for link in get_links(response, operation, "links")] == expected
def test_custom_link_name(openapi_30):
# When `name` is used with `add_link`
operation = openapi_30["/users"]["GET"]
name = "CUSTOM_NAME"
openapi_30.add_link(source=operation, target=operation, status_code="200", parameters={}, name=name)
# Then the resulting link has that name
links = openapi_30.get_links(openapi_30["/users"]["GET"])
assert name in links["200"]
|
the-stack_106_22538 | from mov_sdk.utxo_manager import decode_address, address_to_script
print(address_to_script("btm2", "tn1q5zjfmndnexlx79n98wmjk6mhdd33qfwx78xt4w", "testnet"))
print(address_to_script("btm", "tm1q5zjfmndnexlx79n98wmjk6mhdd33qfwxv7pm3g", "testnet"))
from mov_sdk.mov_api import MovApi
# s1 = MovApi(mnemonic_str="")
# print(s1.main_address)
# print(s1.get_main_chain_balance())
# print(s1.get_exchange_info())
# print(s1.cross_chain_in("btm", "3"))
# print(s1.cross_chain_in("sup", "0.001"))
# print(s1.cross_chain_out("btm", 5, s1.main_address))
# print(s1.cross_chain_out("sup", 0.002, s1.main_address))
s2 = MovApi(mnemonic_str="chalk memory wear dizzy universe govern obtain eye fiber denial judge six")
asset = "btm"
amount = 0.01
address = "0xa6cb31b0a18af665eafaf48ef6a05bd8a4387309"
print(s2.cross_chain_in_to_bmc(asset, amount, address))
|
the-stack_106_22544 | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/11/16 20:39
contact: [email protected]
desc: 腾讯-股票-实时行情-成交明细
下载成交明细-每个交易日16:00提供当日数据
该列表港股报价延时15分钟
"""
from io import StringIO
import pandas as pd
import requests
def stock_zh_a_tick(code="sh600848", trade_date="20191011"):
"""
成交明细-每个交易日16:00提供当日数据
:param code: 带市场标识的股票代码
:type code: str
:param trade_date: 需要提取数据的日期
:type trade_date: str
:return: 返回当日股票成交明细的数据
:rtype: pandas.DataFrame
"""
url = "http://stock.gtimg.cn/data/index.php"
params = {
"appn": "detail",
"action": "download",
"c": code,
"d": trade_date,
}
res = requests.get(url, params=params)
res.encoding = "gbk"
temp_df = pd.read_table(StringIO(res.text))
return temp_df
if __name__ == "__main__":
date_list = pd.date_range(start="20190801", end="20191111").tolist()
date_list = [item.strftime("%Y%m%d") for item in date_list]
for item in date_list:
data = stock_zh_a_tick(code="sh601872", trade_date=f"{item}")
if not data.empty:
print(data)
|
the-stack_106_22545 | import Global
import logging
from handlers.kbeServer.Editor.Interface import interface_global
#点赞
def DoMsg_Dianzan(DB ,self_uid,uid ,wid ,lid ,siscid ,tid ,dian ,ctype):
# DEBUG_MSG("Msg_Dianzan : %d - %d - %d - %d - %d - %s" % (uid, wid, tid, dian,lid,siscid))
#logging.info("Dianzan - uid[%i],wid[%i],lid[%i],siscid[%s],tid[%i],dian[%i],ctype[%i]",(uid ,wid ,lid ,siscid ,tid ,dian ,ctype))
#print("dianzan,",uid ,wid ,lid ,siscid ,tid ,dian ,ctype)
if ctype == 0:
table_name = Global.GetWorkZanTableName(uid ,wid)
elif ctype == 1:
table_name = Global.GetCourseZanTableName(uid ,wid ,lid)
else:
table_name = Global.GetSisZanTableName(siscid)
# table_name = "tb_work_zan_" + str(uid) + "_" + str(wid)
sql = "select zan from " + table_name + " where UID = " + str(self_uid) + " and TID = " + str(tid) + ";"
data = DB.fetchone(sql,None)
_zan = -1
if data:
_zan = int(data[0])
# DEBUG_MSG("Msg_Dianzan : %d - %d " % (dian, _zan))
if dian == 0:
# 取消点赞
if _zan != 1:
return 0
else:
if _zan == 1:
return -1
# DEBUG_MSG("Msg_Dianzan 1 : %d - %d " % (dian, _zan))
sql = ""
if _zan == -1:
sql = "Insert into " + table_name + " (UID,TID,ZAN) values (" + str(self_uid) + "," + str(tid) + ",1);"
elif dian == 0:
sql = "update " + table_name + " set zan = 0 where uid = " + str(self_uid) + " and TID = " + str(tid) + ";"
else:
sql = "update " + table_name + " set zan = 1 where uid = " + str(self_uid) + " and TID = " + str(tid) + ";"
DB.edit(sql,None)
return tid
#self.client.App_MsgToClient(101, str(tid))
#作品评分/评论
#wid 作品ID
#uid 作者ID
#score 评分 >0 的整形
# log 如果无内容表示评分 有内容表示评论
#P_UID 不为0 表示回复消息
#s_lid 课时ID
#ctype 评论分类 0-作品 1-市场的课程 2-SIS课程
def DoWorkMark(DB,self_uid,s_wid,s_lid,uid,score,log,P_UID,ctype):
wid = 0
lid = 0
sis_cid = ""
if ctype == 0:
wid = int(s_wid)
elif ctype == 1:
wid = int(s_wid)
lid = s_lid
else:
sis_cid = s_wid
if score < 0:
return 0
if ctype == 0:
table_name = Global.GetWorkLogTableName(uid,wid)
elif ctype == 1:
table_name = Global.GetCourseLogTableName(uid,wid,lid)
else:
table_name = Global.GetSisLogTableName(sis_cid)
_exist = interface_global.Global_TableExist(table_name,DB)
if not _exist:
data = DB.callprocAll('CreateWorkLogTable', (uid,wid,lid,ctype,sis_cid))
if data:
code = int(data[0])
if code == 0:
return -1 #作品不存在
DB.callprocAll('CreateWorkZanTable', (uid, wid,lid,ctype,sis_cid))
_succ = 0
if score > 0:
if ctype == 0 or ctype == 1:
if uid == self_uid:
return -3 #作品不存在
_succ = 1
sql = "select ID,score from "+table_name+" where UID = "+str(self_uid) + " and log = '';"
data = DB.fetchone(sql,None)
_score = 0
_id = 0
if data:
_id = int(data[0])
_score = int(data[1])
if _id > 0 and _score > 0:
return -2 # 已经评过分
if _id == 0:
sql = "Insert into "+table_name+" (UID,score,log) values ("+str(self_uid)+","+str(score)+",'');"
else:
sql = "update " + table_name + " set score = "+str(score) + " where ID = "+str(_id)
DB.edit(sql,None)
# 转发消息
# self.SendMsgOnLine(0, 1, _author_name, _work_name)
# self.SendMsgOnLine(1, 1, _author_name, _work_name)
if len(log) > 0:
_succ = 2
sql = "Insert into " + table_name + " (UID,score,log,PID) values (" + str(self_uid) + ",0,'"+log+"',"+str(P_UID)+");"
DB.edit(sql,None)
# 转发消息
# self.SendMsgOnLine(0,2,_author_name,_work_name)
# self.SendMsgOnLine(1, 2, _author_name, _work_name)
return _succ
#获取 作品评分数据
def DoWorkScoreData(DB,self_uid,wid,lid,uid,sis_cid,ctype):
_base_string = ""
#table_name = "tb_work_log_" + str(uid) + "_" + str(wid)
if ctype == 0:
table_name = Global.GetWorkLogTableName(uid,wid)
elif ctype == 1:
table_name = Global.GetCourseLogTableName(uid,wid,lid)
else:
table_name = Global.GetSisLogTableName(sis_cid)
_exist = interface_global.Global_TableExist(table_name,DB)
_pf = 0
if _exist:
sql = "select score from " + table_name + " where LOG = '' and uid = " + str(self_uid) + ";"
data = DB.fetchone(sql,None)
if data:
_pf = int(data[0])
else:
_pf = 0
if ctype == 0 or ctype == 1:
sql = "select UserName,NickName,NickUrl from tb_userdata where uid = " + str(uid)
data = DB.fetchone(sql,None)
if data:
_base_string = data[0] + "$" + data[1] + "$" + data[2] + "$" + str(_pf)
else:
_base_string = "Feidie$飞蝶VR$$" + str(_pf)
#DEBUG_MSG(_base_string)
_score_data = ""
if _exist:
sql = "select count(ID),1 from " + table_name + " where log = '' AND SCORE = 1 union ALL select count(ID),2 from " + table_name + " where log = '' AND SCORE = 2 union ALL select count(ID),3 from " + table_name + " where log = '' AND SCORE = 3 union ALL select count(ID),4 from " + table_name + " where log = '' AND SCORE = 4 union ALL select count(ID),5 from " + table_name + " where log = '' AND SCORE = 5;"
data = DB.fetchall(sql,None)
if data:
list_data = list(data)
for minfo in list_data:
minfo_list = list(minfo)
if _score_data == "":
_score_data = str(minfo_list[1]) + "," + str(minfo_list[0])
else:
_score_data = _score_data + "&" + str(minfo_list[1]) + "," + str(minfo_list[0])
_send_data = _base_string + "@" + _score_data
return _send_data
#获取评论数据
def DoWorkLogData(DB,self_uid,wid,lid, uid, sis_cid, PID , ipage , ilenght,ctype):
_base_string = ""
_url_string = ""
if ctype == 0:
table_name = Global.GetWorkLogTableName(uid,wid)
elif ctype == 1:
table_name = Global.GetCourseLogTableName(uid,wid,lid)
else:
table_name = Global.GetSisLogTableName(sis_cid)
#table_name1 = "tb_work_zan_" + str(uid) + "_" + str(wid)
if ctype == 0:
table_name1 = Global.GetWorkZanTableName(uid,wid)
elif ctype == 1:
table_name1 = Global.GetCourseZanTableName(uid,wid,lid)
else:
table_name1 = Global.GetSisZanTableName(sis_cid)
_exist = interface_global.Global_TableExist(table_name,DB)
sql1 = ""
if _exist:
if ipage == 0:
if PID == 0:
sql = "select t1.log,t1.`Date`,t2.UserName,t2.NickName,t2.UID,t1.ID,(SELECT COUNT(ID) FROM " + table_name + " WHERE PID = t1.ID) AS HF,(SELECT COUNT(ID) FROM " + table_name1 + " WHERE TID = t1.ID AND ZAN = 1) AS ZAN,(select zan from " + table_name1 + " where UID = " + str(self_uid) + " and TID = t1.ID) as IsZan from " + table_name + " as t1 inner join tb_userdata as t2 on t1.uid = t2.uid and t1.log != '' AND T1.PID = 0 order by t1.`Date` desc;"
sql1 = "select UID,NickUrl from tb_userdata where uid in ( select UID from " + table_name + " WHERE LOG != '' AND PID = 0 GROUP BY UID );"
else:
sql = "select t1.log,t1.`Date`,t2.UserName,t2.NickName,t2.UID,t1.ID,(SELECT COUNT(ID) FROM " + table_name + " WHERE PID = t1.ID) AS HF,(SELECT COUNT(ID) FROM " + table_name1 + " WHERE TID = t1.ID AND ZAN = 1) AS ZAN,(select zan from " + table_name1 + " where UID = " + str(self_uid) + " and TID = t1.ID) as IsZan from " + table_name + " as t1 inner join tb_userdata as t2 on t1.uid = t2.uid and t1.log != '' AND T1.PID = " + str(PID) + " order by t1.`Date` desc;"
sql1 = "select UID,NickUrl from tb_userdata where uid in ( select UID from " + table_name + " WHERE LOG != '' AND PID = " + str(PID) + " GROUP BY UID );"
else:
if PID == 0:
sql = "select t1.log,t1.`Date`,t2.UserName,t2.NickName,t2.UID,t1.ID,(SELECT COUNT(ID) FROM " + table_name + " WHERE PID = t1.ID) AS HF,(SELECT COUNT(ID) FROM " + table_name1 + " WHERE TID = t1.ID AND ZAN = 1) AS ZAN,(select zan from " + table_name1 + " where UID = " + str(self_uid) + " and TID = t1.ID) as IsZan from " + table_name + " as t1 inner join tb_userdata as t2 on t1.uid = t2.uid and t1.log != '' AND T1.PID = 0 order by t1.`Date` desc limit " + str((ipage - 1) * ilenght) + "," + str(ilenght) + ";"
sql1 = "select UID,NickUrl from tb_userdata where uid in ( select UID from " + table_name + " WHERE LOG != '' AND PID = 0 GROUP BY UID );"
else:
sql = "select t1.log,t1.`Date`,t2.UserName,t2.NickName,t2.UID,t1.ID,(SELECT COUNT(ID) FROM " + table_name + " WHERE PID = t1.ID) AS HF,(SELECT COUNT(ID) FROM " + table_name1 + " WHERE TID = t1.ID AND ZAN = 1) AS ZAN,(select zan from " + table_name1 + " where UID = " + str(self_uid) + " and TID = t1.ID) as IsZan from " + table_name + " as t1 inner join tb_userdata as t2 on t1.uid = t2.uid and t1.log != '' AND T1.PID = " + str(PID) + " order by t1.`Date` desc limit " + str((ipage - 1) * ilenght) + "," + str(ilenght) + ";"
sql1 = "select UID,NickUrl from tb_userdata where uid in ( select UID from " + table_name + " WHERE LOG != '' AND PID = " + str(PID) + " GROUP BY UID );"
# sql = "select t1.log,t1.`Date`,t2.UserName,t2.TheName,t2.UID from " + table_name + " as t1 inner join tb_userdata as t2 on t1.uid = t2.uid and t1.log != '' order by t1.`Date` desc limit "+str((ipage-1)*ilenght)+","+str(ilenght)+";"
# sql1 = "select UID,NickUrl from tb_userdata where uid in ( select UID from " + table_name + " WHERE LOG != '' GROUP BY UID ) limit "+str((ipage-1)*ilenght)+","+str(ilenght)+";"
data = DB.fetchall(sql,None)
if data:
list_data = list(data)
for minfo in list_data:
minfo_list = list(minfo)
_ZAN_NUM = 0
if minfo_list[7] != None:
_ZAN_NUM = minfo_list[7]
_ZAN = 0
if minfo_list[8] != None:
_ZAN = minfo_list[8]
if _base_string == "":
_base_string = minfo_list[0] + "@lyyym@" + str(minfo_list[1]) + "@lyyym@" + minfo_list[2] + "@lyyym@" + minfo_list[3] + "@lyyym@" + str(minfo_list[4]) + "@lyyym@" + str(minfo_list[5]) + "@lyyym@" + str(minfo_list[6]) + "@lyyym@" + str(_ZAN_NUM) + "@lyyym@" + str(_ZAN)
else:
_base_string = _base_string + "@lyyyym@" + minfo_list[0] + "@lyyym@" + str(minfo_list[1]) + "@lyyym@" + minfo_list[2] + "@lyyym@" + minfo_list[3] + "@lyyym@" + str(minfo_list[4]) + "@lyyym@" + str(minfo_list[5]) + "@lyyym@" + str(minfo_list[6]) + "@lyyym@" + str(_ZAN_NUM) + "@lyyym@" + str(_ZAN)
data = DB.fetchall(sql1,None)
if data:
list_data = list(data)
for minfo in list_data:
minfo_list = list(minfo)
if _url_string == "":
_url_string = str(minfo_list[0]) + "@lyyym@" + minfo_list[1]
else:
_url_string = _url_string + "@lyyyym@" + str(minfo_list[0]) + "@lyyym@" + minfo_list[1]
_send_string = _url_string + "@lyyyyym@" + _base_string
return _send_string |
the-stack_106_22546 | class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BST:
def __init__(self, root_val):
self.root = Node(root_val)
def insert(self, value):
newnode = Node(value)
if self.root==None:
self.root = newnode
return
current = self.root
while True:
if value < current.value:
if current.left == None:
current.left = newnode
break
current = current.left
else:
if current.right == None:
current.right = newnode
break
current = current.right
def height(self, root):
try:
if root.left == None and root.right==None:
return 0
except:
pass
if root == None:
return 0
return 1 + max(self.height(root.left), self.height(root.right))
if __name__ == "__main__":
bst = BST(7)
bst.insert(10)
bst.insert(3)
bst.insert(5)
#print(bst.root.value)
print(bst.height(bst.root)) |
the-stack_106_22547 | # -*- coding: utf-8 -*-
# Copyright © 2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Process images."""
import datetime
import gzip
import os
import re
import lxml
import piexif
from PIL import ExifTags, Image
from nikola import utils
EXIF_TAG_NAMES = {}
class ImageProcessor(object):
"""Apply image operations."""
image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff', '.webp']
def _fill_exif_tag_names(self):
"""Connect EXIF tag names to numeric values."""
if not EXIF_TAG_NAMES:
for ifd in piexif.TAGS:
for tag, data in piexif.TAGS[ifd].items():
EXIF_TAG_NAMES[tag] = data['name']
def filter_exif(self, exif, whitelist):
"""Filter EXIF data as described in the documentation."""
# Scenario 1: keep everything
if whitelist == {'*': '*'}:
return exif
# Scenario 2: keep nothing
if whitelist == {}:
return None
# Scenario 3: keep some
self._fill_exif_tag_names()
exif = exif.copy() # Don't modify in-place, it's rude
for k in list(exif.keys()):
if type(exif[k]) != dict:
pass # At least thumbnails have no fields
elif k not in whitelist:
exif.pop(k) # Not whitelisted, remove
elif k in whitelist and whitelist[k] == '*':
# Fully whitelisted, keep all
pass
else:
# Partially whitelisted
for tag in list(exif[k].keys()):
if EXIF_TAG_NAMES[tag] not in whitelist[k]:
exif[k].pop(tag)
return exif or None
def resize_image(self, src, dst=None, max_size=None, bigger_panoramas=True, preserve_exif_data=False, exif_whitelist={}, preserve_icc_profiles=False, dst_paths=None, max_sizes=None):
"""Make a copy of the image in the requested size(s).
max_sizes should be a list of sizes, and the image would be resized to fit in a
square of each size (preserving aspect ratio).
dst_paths is a list of the destination paths, and should be the same length as max_sizes.
Backwards compatibility:
* If max_sizes is None, it's set to [max_size]
* If dst_paths is None, it's set to [dst]
* Either max_size or max_sizes should be set
* Either dst or dst_paths should be set
"""
if dst_paths is None:
dst_paths = [dst]
if max_sizes is None:
max_sizes = [max_size]
if len(max_sizes) != len(dst_paths):
raise ValueError('resize_image called with incompatible arguments: {} / {}'.format(dst_paths, max_sizes))
extension = os.path.splitext(src)[1].lower()
if extension in {'.svg', '.svgz'}:
self.resize_svg(src, dst_paths, max_sizes, bigger_panoramas)
return
_im = Image.open(src)
# The jpg exclusion is Issue #3332
is_animated = hasattr(_im, 'n_frames') and _im.n_frames > 1 and extension not in {'.jpg', '.jpeg'}
exif = None
if "exif" in _im.info:
exif = piexif.load(_im.info["exif"])
# Rotate according to EXIF
if "0th" in exif:
value = exif['0th'].get(piexif.ImageIFD.Orientation, 1)
if value in (3, 4):
_im = _im.transpose(Image.ROTATE_180)
elif value in (5, 6):
_im = _im.transpose(Image.ROTATE_270)
elif value in (7, 8):
_im = _im.transpose(Image.ROTATE_90)
if value in (2, 4, 5, 7):
_im = _im.transpose(Image.FLIP_LEFT_RIGHT)
exif['0th'][piexif.ImageIFD.Orientation] = 1
exif = self.filter_exif(exif, exif_whitelist)
icc_profile = _im.info.get('icc_profile') if preserve_icc_profiles else None
for dst, max_size in zip(dst_paths, max_sizes):
if is_animated: # Animated gif, leave as-is
utils.copy_file(src, dst)
continue
im = _im.copy()
size = w, h = im.size
if w > max_size or h > max_size:
size = max_size, max_size
# Panoramas get larger thumbnails because they look *awful*
if bigger_panoramas and w > 2 * h:
size = min(w, max_size * 4), min(w, max_size * 4)
try:
im.thumbnail(size, Image.ANTIALIAS)
save_args = {}
if icc_profile:
save_args['icc_profile'] = icc_profile
if exif is not None and preserve_exif_data:
# Put right size in EXIF data
w, h = im.size
if '0th' in exif:
exif["0th"][piexif.ImageIFD.ImageWidth] = w
exif["0th"][piexif.ImageIFD.ImageLength] = h
if 'Exif' in exif:
exif["Exif"][piexif.ExifIFD.PixelXDimension] = w
exif["Exif"][piexif.ExifIFD.PixelYDimension] = h
# Filter EXIF data as required
save_args['exif'] = piexif.dump(exif)
im.save(dst, **save_args)
except Exception as e:
self.logger.warning("Can't process {0}, using original "
"image! ({1})".format(src, e))
utils.copy_file(src, dst)
def resize_svg(self, src, dst_paths, max_sizes, bigger_panoramas):
"""Make a copy of an svg at the requested sizes."""
# Resize svg based on viewport hacking.
# note that this can also lead to enlarged svgs
if src.endswith('.svgz'):
with gzip.GzipFile(src, 'rb') as op:
xml = op.read()
else:
with open(src, 'rb') as op:
xml = op.read()
for dst, max_size in zip(dst_paths, max_sizes):
try:
tree = lxml.etree.XML(xml)
width = tree.attrib['width']
height = tree.attrib['height']
w = int(re.search("[0-9]+", width).group(0))
h = int(re.search("[0-9]+", height).group(0))
# calculate new size preserving aspect ratio.
ratio = float(w) / h
# Panoramas get larger thumbnails because they look *awful*
if bigger_panoramas and w > 2 * h:
max_size = max_size * 4
if w > h:
w = max_size
h = max_size / ratio
else:
w = max_size * ratio
h = max_size
w = int(w)
h = int(h)
tree.attrib.pop("width")
tree.attrib.pop("height")
tree.attrib['viewport'] = "0 0 %ipx %ipx" % (w, h)
if dst.endswith('.svgz'):
op = gzip.GzipFile(dst, 'wb')
else:
op = open(dst, 'wb')
op.write(lxml.etree.tostring(tree))
op.close()
except (KeyError, AttributeError) as e:
self.logger.warning("No width/height in %s. Original exception: %s" % (src, e))
utils.copy_file(src, dst)
def image_date(self, src):
"""Try to figure out the date of the image."""
if src not in self.dates:
try:
im = Image.open(src)
exif = im._getexif()
im.close()
except Exception:
exif = None
if exif is not None:
for tag, value in list(exif.items()):
decoded = ExifTags.TAGS.get(tag, tag)
if decoded in ('DateTimeOriginal', 'DateTimeDigitized'):
try:
if isinstance(value, tuple):
value = value[0]
self.dates[src] = datetime.datetime.strptime(
value, '%Y:%m:%d %H:%M:%S')
break
except ValueError: # Invalid EXIF date.
pass
if src not in self.dates:
self.dates[src] = datetime.datetime.fromtimestamp(
os.stat(src).st_mtime)
return self.dates[src]
|
the-stack_106_22548 | """
Nadel's construction problem in cpmpy.
From Rina Dechter 'Constraint Processing', page 5.
Attributes the problem to
B.A. Nadel 'Constraint satisfaction algorithms' (1989).
'''
* The recreation area should be near the lake.
* Steep slopes are to be avoided for all but the recreation area.
* Poor soil should be avoided for those developments that
involve construction, namely the apartments and the family houses.
* The highway, being noisy, should not be near the apartments,
the housing, or the recreation area.
* The dumpsite should not be visible from the apartments,
the houses, or the lake.
* Lots 3 and 4 have bad soil.
* Lots 3, 4, 7, and 8 are on steep slopes .
* Lots 2, 3, and 4 are near the lake.
* Lots 1 and 2 are near the highway.
'''
Comment:
I have not found any model that satisfies all the constraints.
However this 'soft' approach counts the broken constraints
and minimizes to 1 broken constraint.
The model (which - of course - could be erroneous) generates 28 different
solutions. The broken constraints are either
- steep_slopes constraints or
- near_dump constraints.
This cpmpy model was written by Hakan Kjellerstrand ([email protected])
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def nadel(total_broken_val=None):
n = 8 # number of lots
d = 5 # number of developments
# * Lots 3 and 4 have bad soil.
# * Lots 3, 4, 7, and 8 are on steep slopes .
# * Lots 2, 3, and 4 are near the lake.
# * Lots 1 and 2 are near the highway.
# 1, 2, 3, 4, 5, 6, 7, 8
bad_soil = cpm_array([0, 0, 1, 1, 0, 0, 0, 0])
steep_slopes = cpm_array([0, 0, 1, 1, 0, 0, 1, 1])
near_lake = cpm_array([0, 1, 1, 1, 0, 0, 0, 0])
near_highway = cpm_array([1, 1, 0, 0, 0, 0, 0, 0])
# matrix with the proximity of lots
# (for the dump placement)
near_lots = np.array([
# 1 2 3 4 5 6 7 8
[0, 1, 0, 0, 1, 0, 0, 0], # 1
[1, 0, 1, 0, 0, 1, 0, 0], # 2
[0, 1, 0, 1, 0, 0, 1, 0], # 3
[0, 0, 1, 0, 0, 0, 0, 1], # 4
[1, 0, 0, 0, 0, 1, 0, 0], # 5
[0, 1, 0, 0, 1, 0, 1, 0], # 6
[0, 0, 1, 0, 0, 1, 0, 1], # 7
[0, 0, 0, 1, 0, 0, 1, 0] # 8
])
# alternative neighborhood matrix,
# where diagonals also makes a neighbour.
# This generates 8 models (all with 1 broken constraint)
#
# near_lots = [
# # 1 2 3 4 5 6 7 8
# [0, 1, 0, 0, 1, 1, 0, 0], # 1
# [1, 0, 1, 0, 1, 1, 1, 0], # 2
# [0, 1, 0, 1, 0, 1, 1, 1], # 3
# [0, 0, 1, 0, 0, 0, 1, 1], # 4
# [1, 1, 0, 0, 0, 1, 0, 0], # 5
# [1, 1, 1, 0, 1, 0, 1, 0], # 6
# [0, 1, 1, 1, 0, 1, 0, 1], # 7
# [0, 0, 1, 1, 0, 0, 1, 0] # 8
# ]
# Array copy of near_lots (for 'matrix element' constraint)
near_lots_a = boolvar(shape=n*n,name="near_lots")
#
# variables
#
# the development to place in one of the lots
recreation = intvar(0,n-1,name="recreation")
apartments = intvar(0,n-1,name="apartments")
houses = intvar(0,n-1,name="houses")
cemetery = intvar(0,n-1,name="cemetery")
dump = intvar(0,n-1,name="dump")
developments = [recreation, apartments, houses, cemetery, dump]
c = 13 # number of (potentially) broken constraints (soft constraints)
broken = boolvar(shape=c,name="broken") # indicator of broken constraint
total_broken = intvar(0,c,name="total_broken") # sum(broken)
# sol.minimize(total_broken)
if total_broken_val == None:
model = Model(minimize=total_broken)
else:
model = Model(total_broken==total_broken_val)
for i in range(n):
for j in range(n):
model += (near_lots_a[i*n+j] == near_lots[i][j])
# constraints
model += (total_broken == sum(broken))
model += (AllDifferent(developments))
# * The recreation area should be near the lake.
model += ((near_lake[recreation] == 1) == ( broken[0] == 0))
# * Steep slopes are to be avoided for all but the recreation area.
model += ((steep_slopes[apartments] == 0) == (broken[1] == 0))
model += ((steep_slopes[houses] == 0) == (broken[2] == 0))
model += ((steep_slopes[cemetery] == 0) == (broken[3] == 0))
model += ((steep_slopes[dump] == 0) == (broken[4] == 0))
# * Poor soil should be avoided for those developments that
# involve construction, namely the apartments and the family houses.
model += ((bad_soil[apartments] == 0) == (broken[5] == 0))
model += ((bad_soil[houses] == 0) == (broken[6] == 0))
# * The highway, being noisy, should not be near the apartments,
# the housing, or the recreation area.
model += ((near_highway[apartments] == 0) == (broken[7] == 0))
model += ((near_highway[houses] == 0) == (broken[8] == 0))
model += ((near_highway[recreation] == 0) == (broken[9] == 0))
# The dumpsite should not be visible from the apartments,
# the houses, or the lake.
# not near the lake
model += ((near_lake[dump] == 0) == (broken[10] == 0))
# not near the house
model += (
((near_lots_a[dump*n+houses] == 0) & (near_lots_a[houses*n+dump] == 0)) == (broken[11] == 0)
)
# not near the apartments
model += (
((near_lots_a[dump*n+apartments] == 0) & (near_lots_a[apartments*n+dump] == 0)) == (broken[12] == 0)
)
ss = CPM_ortools(model)
num_solutions = 0
if total_broken_val == None:
if ss.solve():
num_solutions += 1
print("developments:", [v.value() for v in developments])
print("broken constraints:", [i for i in range(c) if broken[i].value()])
print("total_broken:", total_broken.value())
print()
return total_broken.value()
else:
# Get all the optimal solutions and calculate the number
# of times a constraints is broken in the solutions.
broken_constraints_dict = {}
while ss.solve():
num_solutions += 1
print("developments:", [v.value() for v in developments])
broken_constraints = [i for i in range(c) if broken[i].value()]
print("broken constraints:", broken_constraints)
for b in broken_constraints:
broken_constraints_dict[b] = broken_constraints_dict.get(b,0)+1
print("total_broken:", total_broken.value())
print()
get_different_solution(ss,developments)
print("The broken constraints and their occurrences are:")
for b in sorted(broken_constraints_dict):
print(f"Constraint #{b:2d}: {broken_constraints_dict[b]} occurrences")
print()
print("num_solutions:",num_solutions)
print("Find optimal value:")
total_broken=nadel(None)
print(f"Optimal value is {total_broken}.\nNow find all optimal solutions and the broken constraints:")
nadel(total_broken)
|
the-stack_106_22549 | #!/user/bin/env python
# -*- coding: utf-8 -*-
"""CodeSeeker
A simple tool to search for code on GitHub.
Usage example:
> python -m codeseeker cube
1 file(s) found(s).
repository/path/to/file.py
> python -m codeseeker cube -o
1 file(s) found(s).
repository/path/to/file.py
Opening in a web browser...
For more information, see:
https://leugimkm.github.io/codeseeker/
"""
from .base import parse_args
from .seeker import Seeker, open_url, to_txt, show_links, get_file
from .utils import show
def main():
args = parse_args()
seeker = Seeker()
if args.repo:
seeker.repo = args.repo
if args.lang:
seeker.lang = args.lang
if args.tag:
seeker.tag = args.tag
data = seeker.search(args.keyword, args.repo, args.lang)
if data is None:
print("No results found.")
else:
if args.keyword:
show(data)
if args.open:
open_url(seeker, data)
if args.txt:
to_txt(seeker, data)
if args.links:
show_links(seeker, data)
if args.get:
get_file(seeker, data)
if __name__ == "__main__":
main()
|
the-stack_106_22552 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
elif 'scale_height' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_height(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __scale_height(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if oh == target_size and ow >= crop_size:
return img
h = target_size
w = int(max(ow*target_size/oh, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
|
the-stack_106_22554 | # Example for create a delay in the client side ( the request is splited to 2)
# connect(); wait for connection
# delay 100msec
# send(req)
from trex.astf.api import *
import argparse
# we can send either Python bytes type as below:
http_req = b'GET /3384 HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n'
# or we can send Python string containing ascii chars, as below:
http_response = 'HTTP/1.1 200 OK\r\nServer: Microsoft-IIS/6.0\r\nContent-Type: text/html\r\nContent-Length: 32000\r\n\r\n<html><pre>**********</pre></html>'
class Prof1():
def __init__(self):
pass # tunables
def create_profile(self):
# client commands
prog_c = ASTFProgram()
prog_c.delay(100000); # delay 100msec
prog_c.send(http_req)
prog_c.recv(len(http_response))
prog_s = ASTFProgram()
prog_s.recv(len(http_req))
prog_s.send(http_response)
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
# template
temp_c = ASTFTCPClientTemplate(program=prog_c, ip_gen=ip_gen)
temp_s = ASTFTCPServerTemplate(program=prog_s) # using default association
template = ASTFTemplate(client_template=temp_c, server_template=temp_s)
# profile
profile = ASTFProfile(default_ip_gen=ip_gen, templates=template)
return profile
def get_profile(self, tunables, **kwargs):
parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args(tunables)
return self.create_profile()
def register():
return Prof1()
|
the-stack_106_22555 | import numpy as np
import math
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
def make_plot_plast(elms, scale=0):
'''Рисует исходную схему'''
polygons_plast = []
codes_plast = []
polygons_nonplast = []
codes_nonplast = []
polygons_quad = []
codes_quad = []
for i in elms: # рисует исходную схему
if len(i.pnt) == 3 and i.pl is True:
polygons_plast += [
(i.pnt[0].x + i.pnt[0].tot_displ_x * scale,
i.pnt[0].y + i.pnt[0].tot_displ_y * scale),
(i.pnt[1].x + i.pnt[1].tot_displ_x * scale,
i.pnt[1].y + i.pnt[1].tot_displ_y * scale),
(i.pnt[2].x + i.pnt[2].tot_displ_x * scale,
i.pnt[2].y + i.pnt[2].tot_displ_y * scale),
(0, 0)]
codes_plast += [Path.MOVETO] + [Path.LINETO]*2 + [Path.CLOSEPOLY]
elif len(i.pnt) == 3 and i.pl is not True:
polygons_nonplast += [
(i.pnt[0].x + i.pnt[0].tot_displ_x * scale,
i.pnt[0].y + i.pnt[0].tot_displ_y * scale),
(i.pnt[1].x + i.pnt[1].tot_displ_x * scale,
i.pnt[1].y + i.pnt[1].tot_displ_y * scale),
(i.pnt[2].x + i.pnt[2].tot_displ_x * scale,
i.pnt[2].y + i.pnt[2].tot_displ_y * scale),
(0, 0)]
codes_nonplast += [Path.MOVETO] + [Path.LINETO]*2 + [Path.CLOSEPOLY]
if len(i.pnt) == 4:
polygons_quad += [
(i.pnt[0].x + i.pnt[0].tot_displ_x * scale,
i.pnt[0].y + i.pnt[0].tot_displ_y * scale),
(i.pnt[1].x + i.pnt[1].tot_displ_x * scale,
i.pnt[1].y + i.pnt[1].tot_displ_y * scale),
(i.pnt[2].x + i.pnt[2].tot_displ_x * scale,
i.pnt[2].y + i.pnt[2].tot_displ_y * scale),
(i.pnt[3].x + i.pnt[3].tot_displ_x * scale,
i.pnt[3].y + i.pnt[3].tot_displ_y * scale),
(0, 0)]
codes_quad += [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
polygons_plast = np.array(polygons_plast, float)
try:
path_plast = Path(polygons_plast, codes_plast)
pathpatch_PLAST = PathPatch(path_plast, facecolor='#fff000')
except ValueError:
pathpatch_PLAST = False
print('не получилось')
else:
path_plast = Path(polygons_plast, codes_plast)
pathpatch_PLAST = PathPatch(path_plast, facecolor='#fff000')
polygons_nonplast = np.array(polygons_nonplast, float)
try:
path_nonplast = Path(polygons_nonplast, codes_nonplast)
pathpatch_nonPLAST = PathPatch(path_nonplast, facecolor='#ffffff')
except ValueError:
pathpatch_nonPLAST = False
print('не получилось')
else:
path_nonplast = Path(polygons_nonplast, codes_nonplast)
pathpatch_nonPLAST = PathPatch(path_nonplast, facecolor='#ffffff')
polygons_quad = np.array(polygons_quad, float)
try:
path_quad = Path(polygons_quad, codes_quad)
pathpatch_quad = PathPatch(path_quad, facecolor='#ffffff')
except ValueError:
pathpatch_quad = False
print('не получилось')
else:
path_quad = Path(polygons_quad, codes_quad)
athpatch_quad = PathPatch(path_quad, facecolor='#ffffff')
return pathpatch_PLAST, pathpatch_nonPLAST, pathpatch_quad
def make_add_txt(elms, pnts, scale=0, num_elms=False, num_pnts=False,
displacement_y=False, displacement_x=False, sigma_eq=False,
sigma_x=False):
if num_elms == True:
for i in elms: # рисует номер элемента
plt.text((i.pnt[0].x + i.pnt[1].x + i.pnt[2].x)/3+0.001,
(i.pnt[0].y + i.pnt[1].y + i.pnt[2].y)/3,
i.num, color='red', fontsize=6)
if num_pnts == True:
for i in pnts: # рисует номер узла
plt.text(i.x + 0.1,
i.y + 0.1,
i.num, color='blue', fontsize=6)
if displacement_y == True:
for i in pnts:
plt.text(i.x + 0.1,
i.y + 0.1,
'{:.2f}'.format(i.tot_displ_y*1000), color='blue', fontsize=6)
if displacement_x == True:
for i in pnts:
plt.text(i.x + 0.001,
i.y + 0.001,
'{:.2f}'.format(i.tot_displ_x*1000), color='blue', fontsize=6)
if sigma_eq == True:
for i in elms:
plt.text((i.pnt[0].x + i.pnt[1].x + i.pnt[2].x)/3,
(i.pnt[0].y + i.pnt[1].y + i.pnt[2].y)/3,
'{:.0f}'.format(i.sigma_eq),
color='blue',
fontsize=6)
if sigma_x == True:
for i in elms:
plt.text((i.pnt[0].x + i.pnt[1].x + i.pnt[2].x)/3,
(i.pnt[0].y + i.pnt[1].y + i.pnt[2].y)/3,
'{:.0f}'.format(i.sigma_x),
color='blue',
fontsize=6)
def show_plast(le, lp):
fig, ax = plt.subplots()
a = make_plot_plast(le, 1)
make_add_txt(le, lp, scale=1, num_elms=True)
for i in range(3):
if a[i] is not False:
ax.add_patch(a[i])
#~ ax.set_title(
#~ 'Возникновение пластических деформации при {:.2f}P'.format(count))
ax.autoscale_view()
# fig.savefig('png/png{}.png'.format(int(count*100)))
plt.show()
def graph_sigma(l_s1, l_s2, sigma_pr):
fig, ax = plt.subplots()
x = np.arange(-sigma_pr/50,sigma_pr/50,1)
ax.plot(x, (x-np.sqrt(-3*x**2+4*(sigma_pr/100)**2))/2, label='se')
ax.plot(x, (x+np.sqrt(-3*x**2+4*(sigma_pr/100)**2))/2, label='se')
ax.plot(l_s1, l_s2, 'ro', label='se')
for i in range(len(l_s1)):
plt.text(l_s1[i] + 0.1,
l_s2[i] + 0.1,
'{}'.format(i), color='blue', fontsize=6)
plt.show()
def show_graph_P_u(P,u):
fig, ax = plt.subplots()
ax.plot(u, P, label='u(p)')
plt.show()
def graph_show(l_se, l_scr):
'''Show graphic of Se and Scr'''
fig, ax = plt.subplots()
x = np.arange(0, 100, 1)
ax.plot(x, l_se, 'ro', label='se')
ax.plot(x, l_scr, 'k', label='scr')
plt.show()
if __name__=="__main__":
graph_sigma([0],[0])
|
the-stack_106_22557 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.dsl.component_spec."""
from absl.testing import parameterized
from kfp.components import _structures as structures
from kfp.dsl import _pipeline_param
from kfp.dsl import component_spec as dsl_component_spec
from kfp.pipeline_spec import pipeline_spec_pb2
from google.protobuf import json_format
class ComponentSpecTest(parameterized.TestCase):
TEST_PIPELINE_PARAMS = [
_pipeline_param.PipelineParam(
name='output1', param_type='Dataset', op_name='op-1'),
_pipeline_param.PipelineParam(
name='output2', param_type='Integer', op_name='op-2'),
_pipeline_param.PipelineParam(
name='output3', param_type='Model', op_name='op-3'),
_pipeline_param.PipelineParam(
name='output4', param_type='Double', op_name='op-4'),
_pipeline_param.PipelineParam(
name='arg_input', param_type='String', op_name=None),
]
def setUp(self):
self.maxDiff = None
def test_build_component_spec_from_structure(self):
structure_component_spec = structures.ComponentSpec(
name='component1',
description='component1 desc',
inputs=[
structures.InputSpec(
name='input1', description='input1 desc', type='Dataset'),
structures.InputSpec(
name='input2', description='input2 desc', type='String'),
structures.InputSpec(
name='input3', description='input3 desc', type='Integer'),
structures.InputSpec(
name='input4', description='optional inputs', optional=True),
],
outputs=[
structures.OutputSpec(
name='output1', description='output1 desc', type='Model')
])
expected_dict = {
'inputDefinitions': {
'artifacts': {
'input1': {
'artifactType': {
'schemaTitle': 'system.Dataset'
}
}
},
'parameters': {
'input2': {
'type': 'STRING'
},
'input3': {
'type': 'INT'
}
}
},
'outputDefinitions': {
'artifacts': {
'output1': {
'artifactType': {
'schemaTitle': 'system.Model'
}
}
}
},
'executorLabel': 'exec-component1'
}
expected_spec = pipeline_spec_pb2.ComponentSpec()
json_format.ParseDict(expected_dict, expected_spec)
component_spec = (
dsl_component_spec.build_component_spec_from_structure(
component_spec=structure_component_spec,
executor_label='exec-component1',
actual_inputs=['input1', 'input2', 'input3'],
))
self.assertEqual(expected_spec, component_spec)
@parameterized.parameters(
{
'is_root_component': True,
'expected_result': {
'inputDefinitions': {
'artifacts': {
'input1': {
'artifactType': {
'schemaTitle': 'system.Dataset'
}
}
},
'parameters': {
'input2': {
'type': 'INT'
},
'input3': {
'type': 'STRING'
},
'input4': {
'type': 'DOUBLE'
}
}
}
}
},
{
'is_root_component': False,
'expected_result': {
'inputDefinitions': {
'artifacts': {
'pipelineparam--input1': {
'artifactType': {
'schemaTitle': 'system.Dataset'
}
}
},
'parameters': {
'pipelineparam--input2': {
'type': 'INT'
},
'pipelineparam--input3': {
'type': 'STRING'
},
'pipelineparam--input4': {
'type': 'DOUBLE'
}
}
}
}
},
)
def test_build_component_inputs_spec(self, is_root_component,
expected_result):
pipeline_params = [
_pipeline_param.PipelineParam(name='input1', param_type='Dataset'),
_pipeline_param.PipelineParam(name='input2', param_type='Integer'),
_pipeline_param.PipelineParam(name='input3', param_type='String'),
_pipeline_param.PipelineParam(name='input4', param_type='Float'),
]
expected_spec = pipeline_spec_pb2.ComponentSpec()
json_format.ParseDict(expected_result, expected_spec)
component_spec = pipeline_spec_pb2.ComponentSpec()
dsl_component_spec.build_component_inputs_spec(component_spec,
pipeline_params,
is_root_component)
self.assertEqual(expected_spec, component_spec)
def test_build_component_outputs_spec(self):
pipeline_params = [
_pipeline_param.PipelineParam(name='output1', param_type='Dataset'),
_pipeline_param.PipelineParam(name='output2', param_type='Integer'),
_pipeline_param.PipelineParam(name='output3', param_type='String'),
_pipeline_param.PipelineParam(name='output4', param_type='Float'),
]
expected_dict = {
'outputDefinitions': {
'artifacts': {
'output1': {
'artifactType': {
'schemaTitle': 'system.Dataset'
}
}
},
'parameters': {
'output2': {
'type': 'INT'
},
'output3': {
'type': 'STRING'
},
'output4': {
'type': 'DOUBLE'
}
}
}
}
expected_spec = pipeline_spec_pb2.ComponentSpec()
json_format.ParseDict(expected_dict, expected_spec)
component_spec = pipeline_spec_pb2.ComponentSpec()
dsl_component_spec.build_component_outputs_spec(component_spec,
pipeline_params)
self.assertEqual(expected_spec, component_spec)
@parameterized.parameters(
{
'is_parent_component_root': True,
'expected_result': {
'inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'taskOutputArtifact': {
'producerTask': 'task-op-1',
'outputArtifactKey': 'output1'
}
},
'pipelineparam--op-3-output3': {
'componentInputArtifact': 'op-3-output3'
}
},
'parameters': {
'pipelineparam--op-2-output2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
},
'pipelineparam--op-4-output4': {
'componentInputParameter': 'op-4-output4'
},
'pipelineparam--arg_input': {
'componentInputParameter': 'arg_input'
}
}
}
}
},
{
'is_parent_component_root': False,
'expected_result': {
'inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'taskOutputArtifact': {
'producerTask': 'task-op-1',
'outputArtifactKey': 'output1'
}
},
'pipelineparam--op-3-output3': {
'componentInputArtifact':
'pipelineparam--op-3-output3'
}
},
'parameters': {
'pipelineparam--op-2-output2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
},
'pipelineparam--op-4-output4': {
'componentInputParameter':
'pipelineparam--op-4-output4'
},
'pipelineparam--arg_input': {
'componentInputParameter': 'pipelineparam--arg_input'
}
}
}
}
},
)
def test_build_task_inputs_spec(self, is_parent_component_root,
expected_result):
pipeline_params = self.TEST_PIPELINE_PARAMS
tasks_in_current_dag = ['task-op-1', 'task-op-2']
expected_spec = pipeline_spec_pb2.PipelineTaskSpec()
json_format.ParseDict(expected_result, expected_spec)
task_spec = pipeline_spec_pb2.PipelineTaskSpec()
dsl_component_spec.build_task_inputs_spec(task_spec, pipeline_params,
tasks_in_current_dag,
is_parent_component_root)
self.assertEqual(expected_spec, task_spec)
@parameterized.parameters(
{
'original_task_spec': {},
'parent_component_inputs': {},
'tasks_in_current_dag': [],
'input_parameters_in_current_dag': [],
'input_artifacts_in_current_dag': [],
'expected_result': {},
},
{ # Depending on tasks & inputs within the current DAG.
'original_task_spec': {
'inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'taskOutputArtifact': {
'producerTask': 'task-op-1',
'outputArtifactKey': 'output1'
}
},
'artifact1': {
'componentInputArtifact': 'artifact1'
},
},
'parameters': {
'pipelineparam--op-2-output2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
},
'param1': {
'componentInputParameter': 'param1'
},
}
}
},
'parent_component_inputs': {
'artifacts': {
'artifact1': {
'artifactType': {
'instanceSchema': 'dummy_schema'
}
},
},
'parameters': {
'param1': {
'type': 'STRING'
},
}
},
'tasks_in_current_dag': ['task-op-1', 'task-op-2'],
'input_parameters_in_current_dag': ['param1'],
'input_artifacts_in_current_dag': ['artifact1'],
'expected_result': {
'inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'taskOutputArtifact': {
'producerTask': 'task-op-1',
'outputArtifactKey': 'output1'
}
},
'artifact1': {
'componentInputArtifact': 'artifact1'
},
},
'parameters': {
'pipelineparam--op-2-output2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
},
'param1': {
'componentInputParameter': 'param1'
},
}
}
},
},
{ # Depending on tasks and inputs not available in the current DAG.
'original_task_spec': {
'inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'taskOutputArtifact': {
'producerTask': 'task-op-1',
'outputArtifactKey': 'output1'
}
},
'artifact1': {
'componentInputArtifact': 'artifact1'
},
},
'parameters': {
'pipelineparam--op-2-output2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
},
'param1': {
'componentInputParameter': 'param1'
},
}
}
},
'parent_component_inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'artifactType': {
'instanceSchema': 'dummy_schema'
}
},
'pipelineparam--artifact1': {
'artifactType': {
'instanceSchema': 'dummy_schema'
}
},
},
'parameters': {
'pipelineparam--op-2-output2' : {
'type': 'INT'
},
'pipelineparam--param1': {
'type': 'STRING'
},
}
},
'tasks_in_current_dag': ['task-op-3'],
'input_parameters_in_current_dag': ['pipelineparam--op-2-output2', 'pipelineparam--param1'],
'input_artifacts_in_current_dag': ['pipelineparam--op-1-output1', 'pipelineparam--artifact1'],
'expected_result': {
'inputs': {
'artifacts': {
'pipelineparam--op-1-output1': {
'componentInputArtifact':
'pipelineparam--op-1-output1'
},
'artifact1': {
'componentInputArtifact': 'pipelineparam--artifact1'
},
},
'parameters': {
'pipelineparam--op-2-output2': {
'componentInputParameter':
'pipelineparam--op-2-output2'
},
'param1': {
'componentInputParameter': 'pipelineparam--param1'
},
}
}
},
},
)
def test_update_task_inputs_spec(self, original_task_spec,
parent_component_inputs,
tasks_in_current_dag,
input_parameters_in_current_dag,
input_artifacts_in_current_dag,
expected_result):
pipeline_params = self.TEST_PIPELINE_PARAMS
expected_spec = pipeline_spec_pb2.PipelineTaskSpec()
json_format.ParseDict(expected_result, expected_spec)
task_spec = pipeline_spec_pb2.PipelineTaskSpec()
json_format.ParseDict(original_task_spec, task_spec)
parent_component_inputs_spec = pipeline_spec_pb2.ComponentInputsSpec()
json_format.ParseDict(parent_component_inputs, parent_component_inputs_spec)
dsl_component_spec.update_task_inputs_spec(task_spec,
parent_component_inputs_spec,
pipeline_params,
tasks_in_current_dag,
input_parameters_in_current_dag,
input_artifacts_in_current_dag)
self.assertEqual(expected_spec, task_spec)
def test_pop_input_from_component_spec(self):
component_spec = pipeline_spec_pb2.ComponentSpec(
executor_label='exec-component1')
component_spec.input_definitions.artifacts[
'input1'].artifact_type.schema_title = 'system.Dataset'
component_spec.input_definitions.parameters[
'input2'].type = pipeline_spec_pb2.PrimitiveType.STRING
component_spec.input_definitions.parameters[
'input3'].type = pipeline_spec_pb2.PrimitiveType.DOUBLE
# pop an artifact, and there're other inputs left
dsl_component_spec.pop_input_from_component_spec(component_spec, 'input1')
expected_dict = {
'inputDefinitions': {
'parameters': {
'input2': {
'type': 'STRING'
},
'input3': {
'type': 'DOUBLE'
}
}
},
'executorLabel': 'exec-component1'
}
expected_spec = pipeline_spec_pb2.ComponentSpec()
json_format.ParseDict(expected_dict, expected_spec)
self.assertEqual(expected_spec, component_spec)
# pop an parameter, and there're other inputs left
dsl_component_spec.pop_input_from_component_spec(component_spec, 'input2')
expected_dict = {
'inputDefinitions': {
'parameters': {
'input3': {
'type': 'DOUBLE'
}
}
},
'executorLabel': 'exec-component1'
}
expected_spec = pipeline_spec_pb2.ComponentSpec()
json_format.ParseDict(expected_dict, expected_spec)
self.assertEqual(expected_spec, component_spec)
# pop the last input, expect no inputDefinitions
dsl_component_spec.pop_input_from_component_spec(component_spec, 'input3')
expected_dict = {'executorLabel': 'exec-component1'}
expected_spec = pipeline_spec_pb2.ComponentSpec()
json_format.ParseDict(expected_dict, expected_spec)
self.assertEqual(expected_spec, component_spec)
# pop an input that doesn't exist, expect no-op.
dsl_component_spec.pop_input_from_component_spec(component_spec, 'input4')
self.assertEqual(expected_spec, component_spec)
def test_pop_input_from_task_spec(self):
task_spec = pipeline_spec_pb2.PipelineTaskSpec()
task_spec.component_ref.name = 'comp-component1'
task_spec.inputs.artifacts[
'input1'].task_output_artifact.producer_task = 'task-op-1'
task_spec.inputs.artifacts[
'input1'].task_output_artifact.output_artifact_key = 'output1'
task_spec.inputs.parameters[
'input2'].task_output_parameter.producer_task = 'task-op-2'
task_spec.inputs.parameters[
'input2'].task_output_parameter.output_parameter_key = 'output2'
task_spec.inputs.parameters[
'input3'].component_input_parameter = 'op3-output3'
# pop an parameter, and there're other inputs left
dsl_component_spec.pop_input_from_task_spec(task_spec, 'input3')
expected_dict = {
'inputs': {
'artifacts': {
'input1': {
'taskOutputArtifact': {
'producerTask': 'task-op-1',
'outputArtifactKey': 'output1'
}
}
},
'parameters': {
'input2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
}
}
},
'component_ref': {
'name': 'comp-component1'
}
}
expected_spec = pipeline_spec_pb2.PipelineTaskSpec()
json_format.ParseDict(expected_dict, expected_spec)
self.assertEqual(expected_spec, task_spec)
# pop an artifact, and there're other inputs left
dsl_component_spec.pop_input_from_task_spec(task_spec, 'input1')
expected_dict = {
'inputs': {
'parameters': {
'input2': {
'taskOutputParameter': {
'producerTask': 'task-op-2',
'outputParameterKey': 'output2'
}
}
}
},
'component_ref': {
'name': 'comp-component1'
}
}
expected_spec = pipeline_spec_pb2.PipelineTaskSpec()
json_format.ParseDict(expected_dict, expected_spec)
self.assertEqual(expected_spec, task_spec)
# pop the last input, expect no inputDefinitions
dsl_component_spec.pop_input_from_task_spec(task_spec, 'input2')
expected_dict = {'component_ref': {'name': 'comp-component1'}}
expected_spec = pipeline_spec_pb2.PipelineTaskSpec()
json_format.ParseDict(expected_dict, expected_spec)
self.assertEqual(expected_spec, task_spec)
# pop an input that doesn't exist, expect no-op.
dsl_component_spec.pop_input_from_task_spec(task_spec, 'input4')
self.assertEqual(expected_spec, task_spec)
def test_additional_input_name_for_pipelineparam(self):
self.assertEqual(
'pipelineparam--op1-param1',
dsl_component_spec.additional_input_name_for_pipelineparam(
_pipeline_param.PipelineParam(name='param1', op_name='op1')))
self.assertEqual(
'pipelineparam--param2',
dsl_component_spec.additional_input_name_for_pipelineparam(
_pipeline_param.PipelineParam(name='param2')))
self.assertEqual(
'pipelineparam--param3',
dsl_component_spec.additional_input_name_for_pipelineparam('param3'))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22558 | from ..credentials import create_proof_jwt, create_proof, verify_proof
from aries_cloudagent.wallet.basic import BasicWallet
from asynctest import TestCase as AsyncTestCase
class TestJWT(AsyncTestCase):
async def setUp(self):
self.wallet = BasicWallet()
self.example_schema = {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://www.w3.org/2018/credentials/examples/v1",
],
"type": ["VerifiablePresentation"],
"verifiableCredential": [{}],
}
# async def test_create_proof_jwt(self):
# abc = await create_proof_jwt(self.wallet, self.example_schema)
# assert (
# abc
# == "eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ"
# )
# assert abc == "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9"
async def test_create_proof(self):
proof = await create_proof(self.wallet, self.example_schema, Exception)
self.example_schema["proof"] = proof
assert await verify_proof(self.wallet, self.example_schema) == True |
the-stack_106_22559 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, ignore_warnings,
override_settings,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
try:
import pytz
except ImportError:
pytz = None
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError, "No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class('django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
# Tests for TZ-aware time methods need pytz.
requires_pytz = unittest.skipIf(pytz is None, "this test requires pytz")
class FileStorageTests(TestCase):
# Changing TIME_ZONE may issue a query to set the database's timezone,
# hence TestCase.
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
# Use a fixed offset timezone so we don't need pytz.
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# Check that the three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
# Use a fixed offset timezone so we don't need pytz.
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# Check that the three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
@requires_pytz
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_pytz
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_file_accessed_time(self):
"""
File storage returns a datetime for the last accessed time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
@requires_pytz
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_pytz
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_file_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.addCleanup(self.storage.delete, f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
@requires_pytz
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_pytz
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_file_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
with self.assertRaises(OSError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(OSError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class CustomStorageLegacyDatetimeHandling(FileSystemStorage):
# Use the legacy accessed_time() et al from FileSystemStorage and the
# shim get_accessed_time() et al from the Storage baseclass. Both of those
# raise warnings, so the testcase class ignores them all.
def get_accessed_time(self, name):
return super(FileSystemStorage, self).get_accessed_time(name)
def get_created_time(self, name):
return super(FileSystemStorage, self).get_created_time(name)
def get_modified_time(self, name):
return super(FileSystemStorage, self).get_modified_time(name)
@ignore_warnings(category=RemovedInDjango20Warning)
class CustomStorageLegacyDatetimeHandlingTests(FileStorageTests):
storage_class = CustomStorageLegacyDatetimeHandling
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super(DiscardingFalseContentStorage, self)._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = six.StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
the-stack_106_22560 | from smt.surrogate_models import RMTB
from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step
xt, yt, xlimits = get_one_d_step()
interp = RMTB(
num_ctrl_pts=100,
xlimits=xlimits,
nonlinear_maxiter=20,
solver_tolerance=1e-16,
energy_weight=1e-14,
regularization_weight=0.0,
)
interp.set_training_values(xt, yt)
interp.train()
plot_one_d_step(xt, yt, xlimits, interp)
|
the-stack_106_22561 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Plugin for SGE.
This has been tested on GE 6.2u3.
Plugin originally written by Marco Dorigo.
Email: marco(DOT)dorigo(AT)rub(DOT)de
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import xml.parsers.expat
import xml.dom.minidom
from aiida.common.escaping import escape_for_bash
import aiida.schedulers
from aiida.schedulers import SchedulerError, SchedulerParsingError
from aiida.schedulers.datastructures import (JobInfo, JobState, ParEnvJobResource)
# 'http://www.loni.ucla.edu/twiki/bin/view/Infrastructure/GridComputing?skin=plain':
# Jobs Status:
# 'qw' - Queued and waiting,
# 'w' - Job waiting,
# 's' - Job suspended,
# 't' - Job transferring and about to start,
# 'r' - Job running,
# 'h' - Job hold,
# 'R' - Job restarted,
# 'd' - Job has been marked for deletion,
# 'Eqw' - An error occurred with the job.
#
# 'http://confluence.rcs.griffith.edu.au:8080/display/v20zCluster/
# Sun+Grid+Engine+SGE+state+letter+symbol+codes+meanings':
#
# Category State SGE Letter Code
# Pending: pending qw
# Pending: pending, user hold qw
# Pending: pending, system hold hqw
# Pending: pending, user and system hold hqw
# Pending: pending, user hold, re-queue hRwq
# Pending: pending, system hold, re-queue hRwq
# Pending: pending, user and system hold, re-queue hRwq
# Pending: pending, user hold qw
# Pending: pending, user hold qw
# Running running r
# Running transferring t
# Running running, re-submit Rr
# Running transferring, re-submit Rt
# Suspended job suspended s, ts
# Suspended queue suspended S, tS
# Suspended queue suspended by alarm T, tT
# Suspended all suspended with re-submit Rs, Rts, RS, RtS, RT, RtT
# Error all pending states with error Eqw, Ehqw, EhRqw
# Deleted all running and suspended states with deletion dr, dt, dRr, dRt,
# ds, dS, dT, dRs,
# dRS, dRT
_MAP_STATUS_SGE = {
'qw': JobState.QUEUED,
'w': JobState.QUEUED,
'hqw': JobState.QUEUED_HELD,
'hRwq': JobState.QUEUED_HELD,
'r': JobState.RUNNING,
't': JobState.RUNNING,
'R': JobState.RUNNING,
'Rr': JobState.RUNNING,
'Rt': JobState.RUNNING,
's': JobState.SUSPENDED,
'st': JobState.SUSPENDED,
'Rs': JobState.SUSPENDED,
'Rts': JobState.SUSPENDED,
'dr': JobState.UNDETERMINED,
'dt': JobState.UNDETERMINED,
'ds': JobState.UNDETERMINED,
'dRr': JobState.UNDETERMINED,
'dRt': JobState.UNDETERMINED,
'dRs': JobState.UNDETERMINED,
'Eqw': JobState.UNDETERMINED,
'Ehqw': JobState.UNDETERMINED,
'EhRqw': JobState.UNDETERMINED
}
class SgeJobResource(ParEnvJobResource):
pass
class SgeScheduler(aiida.schedulers.Scheduler):
"""
Support for the Sun Grid Engine scheduler and its variants/forks (Son of Grid Engine, Oracle Grid Engine, ...)
"""
_logger = aiida.schedulers.Scheduler._logger.getChild('sge')
# For SGE, we can have a good qstat xml output by querying by
# user, but not by job id
_features = {
'can_query_by_user': True,
}
# The class to be used for the job resource.
_job_resource_class = SgeJobResource
def _get_joblist_command(self, jobs=None, user=None):
"""
The command to report full information on existing jobs.
TODO: in the case of job arrays, decide what to do (i.e., if we want
to pass the -t options to list each subjob).
!!!ALL COPIED FROM PBSPRO!!!
TODO: understand if it is worth escaping the username,
or rather leave it unescaped to allow to pass $USER
"""
from aiida.common.exceptions import FeatureNotAvailable
if jobs:
raise FeatureNotAvailable('Cannot query by jobid in SGE')
command = 'qstat -ext -urg -xml '
if user:
command += '-u {}'.format(str(user))
else:
# All users if no user is specified
command += "-u '*'"
self.logger.debug('qstat command: {}'.format(command))
return command
# raise NotImplementedError
def _get_detailed_jobinfo_command(self, jobid):
command = 'qacct -j {}'.format(escape_for_bash(jobid))
return command
def _get_submit_script_header(self, job_tmpl):
"""
Return the submit script header, using the parameters from the
job_tmpl.
Args:
job_tmpl: an JobTemplate instance with relevant parameters set.
TODO: truncate the title if too long
"""
import re
import string
empty_line = ''
lines = []
# SGE provides flags for wd and cwd
if job_tmpl.working_directory:
lines.append('#$ -wd {}'.format(job_tmpl.working_directory))
else:
lines.append('#$ -cwd')
# Enforce bash shell
lines.append('#$ -S /bin/bash')
if job_tmpl.submit_as_hold:
# if isinstance(job_tmpl.submit_as_hold, str):
lines.append('#$ -h {}'.format(job_tmpl.submit_as_hold))
if job_tmpl.rerunnable:
# if isinstance(job_tmpl.rerunnable, str):
lines.append('#$ -r {}'.format(job_tmpl.rerunnable))
if job_tmpl.email:
# If not specified, but email events are set, PBSPro
# sends the mail to the job owner by default
lines.append('#$ -M {}'.format(job_tmpl.email))
email_events = ''
if job_tmpl.email_on_started:
email_events += 'b'
if job_tmpl.email_on_terminated:
email_events += 'ea'
if email_events:
lines.append('#$ -m {}'.format(email_events))
if not job_tmpl.email:
self.logger.info('Email triggers provided to SGE script for job,'
'but no email field set; will send emails to '
'the job owner as set in the scheduler')
else:
lines.append('#$ -m n')
# From the qsub man page:
# "The name may be any arbitrary alphanumeric ASCII string, but
# may not contain "\n", "\t", "\r", "/", ":", "@", "\", "*",
# or "?"."
if job_tmpl.job_name:
job_title = re.sub(r'[^a-zA-Z0-9_.-]+', '', job_tmpl.job_name)
# prepend a 'j' (for 'job') before the string if the string
# is now empty or does not start with a valid character
# (the first symbol cannot be digit, at least in some versions
# of the scheduler)
if not job_title or (job_title[0] not in string.ascii_letters):
job_title = 'j' + job_title
lines.append('#$ -N {}'.format(job_tmpl.job_name))
if job_tmpl.import_sys_environment:
lines.append('#$ -V')
if job_tmpl.sched_output_path:
lines.append('#$ -o {}'.format(job_tmpl.sched_output_path))
if job_tmpl.sched_join_files:
# from qsub man page:
# 'y': Standard error and standard output are merged into
# standard output
# 'n' : Standard error and standard output are not merged (default)
lines.append('#$ -j y')
if job_tmpl.sched_error_path:
self.logger.info('sched_join_files is True, but sched_error_path is set in '
'SGE script; ignoring sched_error_path')
else:
if job_tmpl.sched_error_path:
lines.append('#$ -e {}'.format(job_tmpl.sched_error_path))
if job_tmpl.queue_name:
lines.append('#$ -q {}'.format(job_tmpl.queue_name))
if job_tmpl.account:
lines.append('#$ -P {}'.format(job_tmpl.account))
if job_tmpl.priority:
# Priority of the job. Format: host-dependent integer. Default:
# zero. Range: [-1023, +1024]. Sets job's Priority
# attribute to priority.
lines.append('#$ -p {}'.format(job_tmpl.priority))
if not job_tmpl.job_resource:
raise ValueError('Job resources (as the tot_num_mpiprocs) are required for the SGE scheduler plugin')
# Setting up the parallel environment
lines.append('#$ -pe {} {}'. \
format(str(job_tmpl.job_resource.parallel_env), \
int(job_tmpl.job_resource.tot_num_mpiprocs)))
if job_tmpl.max_wallclock_seconds is not None:
try:
tot_secs = int(job_tmpl.max_wallclock_seconds)
if tot_secs <= 0:
raise ValueError
except ValueError:
raise ValueError('max_wallclock_seconds must be '
"a positive integer (in seconds)! It is instead '{}'"
''.format((job_tmpl.max_wallclock_seconds)))
hours = tot_secs // 3600
tot_minutes = tot_secs % 3600
minutes = tot_minutes // 60
seconds = tot_minutes % 60
lines.append('#$ -l h_rt={:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds))
if job_tmpl.custom_scheduler_commands:
lines.append(job_tmpl.custom_scheduler_commands)
# TAKEN FROM PBSPRO:
# Job environment variables are to be set on one single line.
# This is a tough job due to the escaping of commas, etc.
# moreover, I am having issues making it work.
# Therefore, I assume that this is bash and export variables by
# and.
if job_tmpl.job_environment:
lines.append(empty_line)
lines.append('# ENVIRONMENT VARIABLES BEGIN ###')
if not isinstance(job_tmpl.job_environment, dict):
raise ValueError('If you provide job_environment, it must be a dictionary')
for key, value in job_tmpl.job_environment.items():
lines.append('export {}={}'.format(key.strip(), escape_for_bash(value)))
lines.append('# ENVIRONMENT VARIABLES END ###')
lines.append(empty_line)
return '\n'.join(lines)
def _get_submit_command(self, submit_script):
"""
Return the string to execute to submit a given script.
Args:
submit_script: the path of the submit script relative to the working
directory.
IMPORTANT: submit_script should be already escaped.
"""
submit_command = 'qsub -terse {}'.format(submit_script)
self.logger.info('submitting with: ' + submit_command)
return submit_command
def _parse_joblist_output(self, retval, stdout, stderr):
if retval != 0:
self.logger.error('Error in _parse_joblist_output: retval={}; '
'stdout={}; stderr={}'.format(retval, stdout, stderr))
raise SchedulerError('Error during joblist retrieval, retval={}'. \
format(retval))
if stderr.strip():
self.logger.warning('in _parse_joblist_output for {}: '
'there was some text in stderr: {}'.format(str(self.transport), stderr))
if stdout:
try:
xmldata = xml.dom.minidom.parseString(stdout)
except xml.parsers.expat.ExpatError:
self.logger.error('in sge._parse_joblist_output: xml parsing of stdout failed:' '{}'.format(stdout))
raise SchedulerParsingError('Error during joblist retrieval,' 'xml parsing of stdout failed')
else:
self.logger.error('Error in sge._parse_joblist_output: retval={}; '
'stdout={}; stderr={}'.format(retval, stdout, stderr))
raise SchedulerError('Error during joblist retrieval,' 'no stdout produced')
try:
first_child = xmldata.firstChild
second_childs = first_child.childNodes
tag_names_sec = [elem.tagName for elem in second_childs \
if elem.nodeType == 1]
if 'queue_info' not in tag_names_sec:
self.logger.error('Error in sge._parse_joblist_output: '
'no queue_info: {}'. \
format(stdout))
raise SchedulerError
if 'job_info' not in tag_names_sec:
self.logger.error('Error in sge._parse_joblist_output: '
'no job_info: {}'. \
format(stdout))
raise SchedulerError
except SchedulerError:
self.logger.error('Error in sge._parse_joblist_output: stdout={}' \
.format(stdout))
raise SchedulerError('Error during xml processing, of stdout:'
"There is no 'job_info' or no 'queue_info'"
'element or there are no jobs!')
# If something weird happens while firstChild, pop, etc:
except Exception:
self.logger.error('Error in sge._parse_joblist_output: stdout={}' \
.format(stdout))
raise SchedulerError('Error during xml processing, of stdout')
jobs = [i for i in first_child.getElementsByTagName('job_list')]
# jobs = [i for i in jobinfo.getElementsByTagName('job_list')]
# print [i[0].childNodes[0].data for i in job_numbers if i]
joblist = []
for job in jobs:
this_job = JobInfo()
# In case the user needs more information the xml-data for
# each job is stored:
this_job.raw_data = job.toxml()
try:
job_element = job.getElementsByTagName('JB_job_number').pop(0)
element_child = job_element.childNodes.pop(0)
this_job.job_id = str(element_child.data).strip()
if not this_job.job_id:
raise SchedulerError
except SchedulerError:
self.logger.error('Error in sge._parse_joblist_output:'
'no job id is given, stdout={}' \
.format(stdout))
raise SchedulerError('Error in sge._parse_joblist_output:' 'no job id is given')
except IndexError:
self.logger.error("No 'job_number' given for job index {} in "
'job list, stdout={}'.format(jobs.index(job) \
, stdout))
raise IndexError('Error in sge._parse_joblist_output:' 'no job id is given')
try:
job_element = job.getElementsByTagName('state').pop(0)
element_child = job_element.childNodes.pop(0)
job_state_string = str(element_child.data).strip()
try:
this_job.job_state = _MAP_STATUS_SGE[job_state_string]
except KeyError:
self.logger.warning("Unrecognized job_state '{}' for job "
'id {}'.format(job_state_string, this_job.job_id))
this_job.job_state = JobState.UNDETERMINED
except IndexError:
self.logger.warning("No 'job_state' field for job id {} in" 'stdout={}'.format(this_job.job_id, stdout))
this_job.job_state = JobState.UNDETERMINED
try:
job_element = job.getElementsByTagName('JB_owner').pop(0)
element_child = job_element.childNodes.pop(0)
this_job.job_owner = str(element_child.data).strip()
except IndexError:
self.logger.warning("No 'job_owner' field for job id {}".format(this_job.job_id))
try:
job_element = job.getElementsByTagName('JB_name').pop(0)
element_child = job_element.childNodes.pop(0)
this_job.title = str(element_child.data).strip()
except IndexError:
self.logger.warning("No 'title' field for job id {}".format(this_job.job_id))
try:
job_element = job.getElementsByTagName('queue_name').pop(0)
element_child = job_element.childNodes.pop(0)
this_job.queue_name = str(element_child.data).strip()
except IndexError:
if this_job.job_state == JobState.RUNNING:
self.logger.warning("No 'queue_name' field for job id {}".format(this_job.job_id))
try:
job_element = job.getElementsByTagName('JB_submission_time').pop(0)
element_child = job_element.childNodes.pop(0)
time_string = str(element_child.data).strip()
try:
this_job.submission_time = self._parse_time_string(time_string)
except ValueError:
self.logger.warning("Error parsing 'JB_submission_time' "
"for job id {} ('{}')".format(this_job.job_id, time_string))
except IndexError:
try:
job_element = job.getElementsByTagName('JAT_start_time').pop(0)
element_child = job_element.childNodes.pop(0)
time_string = str(element_child.data).strip()
try:
this_job.dispatch_time = self._parse_time_string(time_string)
except ValueError:
self.logger.warning("Error parsing 'JAT_start_time'"
"for job id {} ('{}')".format(this_job.job_id, time_string))
except IndexError:
self.logger.warning("No 'JB_submission_time' and no "
"'JAT_start_time' field for job "
'id {}'.format(this_job.job_id))
# There is also cpu_usage, mem_usage, io_usage information available:
if this_job.job_state == JobState.RUNNING:
try:
job_element = job.getElementsByTagName('slots').pop(0)
element_child = job_element.childNodes.pop(0)
this_job.num_mpiprocs = str(element_child.data).strip()
except IndexError:
self.logger.warning("No 'slots' field for job id {}".format(this_job.job_id))
joblist.append(this_job)
# self.logger.debug("joblist final: {}".format(joblist))
return joblist
def _parse_submit_output(self, retval, stdout, stderr):
"""
Parse the output of the submit command, as returned by executing the
command returned by _get_submit_command command.
To be implemented by the plugin.
Return a string with the JobID.
"""
if retval != 0:
self.logger.error('Error in _parse_submit_output: retval={}; '
'stdout={}; stderr={}'.format(retval, stdout, stderr))
raise SchedulerError('Error during submission, retval={}\n'
'stdout={}\nstderr={}'.format(retval, stdout, stderr))
if stderr.strip():
self.logger.warning('in _parse_submit_output for {}: '
'there was some text in stderr: {}'.format(str(self.transport), stderr))
return stdout.strip()
def _parse_time_string(self, string, fmt='%Y-%m-%dT%H:%M:%S'):
"""
Parse a time string in the format returned from qstat -xml -ext and
returns a datetime object.
Example format: 2013-06-13T11:53:11
"""
import time
import datetime
try:
time_struct = time.strptime(string, fmt)
except Exception as exc:
self.logger.debug('Unable to parse time string {}, the message was {}'.format(string, exc))
raise ValueError('Problem parsing the time string.')
# I convert from a time_struct to a datetime object going through
# the seconds since epoch, as suggested on stackoverflow:
# http://stackoverflow.com/questions/1697815
return datetime.datetime.fromtimestamp(time.mktime(time_struct))
def _get_kill_command(self, jobid):
"""
Return the command to kill the job with specified jobid.
"""
submit_command = 'qdel {}'.format(jobid)
self.logger.info('killing job {}'.format(jobid))
return submit_command
def _parse_kill_output(self, retval, stdout, stderr):
"""
Parse the output of the kill command.
To be implemented by the plugin.
:return: True if everything seems ok, False otherwise.
"""
if retval != 0:
self.logger.error('Error in _parse_kill_output: retval={}; '
'stdout={}; stderr={}'.format(retval, stdout, stderr))
return False
if stderr.strip():
self.logger.warning('in _parse_kill_output for {}: '
'there was some text in stderr: {}'.format(str(self.transport), stderr))
if stdout.strip():
self.logger.info('in _parse_kill_output for {}: '
'there was some text in stdout: {}'.format(str(self.transport), stdout))
return True
|
the-stack_106_22562 | # PyTorch
from torch import cuda
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# Data science tools
from os import path
#
def create_paths(data_path, train_path, test_path,
save_net_path):
data_dir = path.abspath(data_path)
train_dir = data_dir + train_path
test_dir = data_dir + test_path
# val_dir = data_dir + val_path
save_net_path = 'src/models/' + save_net_path
checkpoint_path = save_net_path + 'h'
return data_dir, train_dir, test_dir, save_net_path, checkpoint_path
#
def check_on_gpu():
train_on_gpu = cuda.is_available()
return train_on_gpu
#
def check_number_gpu():
if check_on_gpu():
gpu_count = cuda.device_count()
print(f'{gpu_count} gpus detected.')
if gpu_count > 1:
multi_gpu = True
else:
multi_gpu = False
#
def check_size_features_and_labels(data_loaders, dataset):
train_iter = iter(data_loaders['train'])
features, labels = next(train_iter)
print(features.shape, '\n', labels.shape, '\n')
n_classes = len(dataset['train'].classes)
print(f'There are {n_classes} different classes.')
#
def check_total_params(model):
total_params = sum(p.numel() for p in model.parameters())
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return print(f'{total_params:,} total parameters.\n {total_trainable_params:,} training parameters \n')
#
def check_params_to_learn(model):
params_to_update = model.parameters()
print("\nParams to learn:")
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad:
params_to_update.append(param)
print("\t", name)
|
the-stack_106_22565 | import collections
import pickle
import os
def make_char_vocab(filename, output_path):
chars = []
with open(filename, 'r', encoding='utf-8') as f:
data = f.read()
words = data.strip().split('\n')
print(len(words))
for word in words[4:]: # ignore ['<PAD>', '<UNK>', '<ROOT>', '<NUM>']
for char in word:
chars.append(char)
chars_counter = collections.Counter(chars).most_common()
char_vocab = {'<PAD>', '<UNK>', '<ROOT>', '<NUM>'}
char_vocab = ['<PAD>', '<UNK>', '<ROOT>', '<NUM>'] + [item[0] for item in chars_counter]
print(char_vocab)
char_to_idx = {char:idx for idx, char in enumerate(char_vocab)}
idx_to_char = {idx:char for idx, char in enumerate(char_vocab)}
print(char_to_idx)
vocab_path = os.path.join(output_path,'char.vocab')
char2idx_path = os.path.join(output_path,'char2idx.bin')
idx2char_path = os.path.join(output_path,'idx2char.bin')
with open(vocab_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(char_vocab))
with open(char2idx_path, 'wb') as f:
pickle.dump(char_to_idx, f)
with open(idx2char_path, 'wb') as f:
pickle.dump(idx_to_char, f)
make_char_vocab('temp/word.vocab', 'temp') |
the-stack_106_22570 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import logging
import traceback
from thrift.server.TServer import TServerEventHandler
from thrift.Thrift import (
TException,
TProcessor,
)
from thrift.async_common import (
AsyncioRpcConnectionContext,
FramedProtocol,
THeaderProtocol,
ThriftHeaderClientProtocolBase,
TReadWriteBuffer,
WrappedTransport,
)
__all__ = [
'ThriftAsyncServerFactory', 'ThriftClientProtocolFactory',
'ThriftServerProtocolFactory',
]
logger = logging.getLogger(__name__)
#
# Thrift server support
#
async def ThriftAsyncServerFactory(
processor, *, interface=None, port=0, loop=None, nthreads=None, sock=None,
backlog=100, ssl=None, event_handler=None, protocol_factory=None
):
"""
ThriftAsyncServerFactory(processor) -> asyncio.Server
asyncio.Server factory for Thrift processors. In the spirit of "there
should be one obvious way to do it", this server only supports the new
THeader protocol.
If `interface` is None (the default), listens on all interfaces. `port` is
0 by default, which makes the OS allocate the port. Enumerate the returned
server's "sockets" attribute to know the port in this case.
If not given, the default event loop is used. If `nthreads` is given, the
default executor for the event loop is set to a thread pool of up to
`nthreads`.
ssl is an instance of ssl.SSLContext. If None (default) or False SSL/TLS is
not used.
event_handler must be a subclass of thrift.server.TServer. If None,
thrift.server.TServer.TServerEventHandler is used. Specify a custom handler
for custom event handling (e.g. handling new connections)
protocol_factory is a function that takes a triplet of
(processor, event_handler, loop=None) and returns a `asyncio.Protocol` instance
that will be passed to a call to `asyncio.create_server`. processor will be a
subclass of `TProcessor`, event_handler will be a subclass of `TServer`, and
loop is an `Optional[asyncio.AbstractEventLoop]`. If protocol_factory is None
`ThriftHeaderServerProtocol` is used.
Notes about the processor method handling:
1. By default all methods are executed synchronously on the event loop.
This can lead to poor performance if a single run takes long to process.
2. Mark coroutines with `async def` if you wish to use `await`
to call async services, schedule tasks with customized executors, etc.
3. Mark methods with @run_on_thread if you wish to run them on the thread
pool executor. Note that unless you're accessing C extensions which free
the GIL, this is not going to win you any performance.
Use this to initialize multiple servers asynchronously::
loop = asyncio.get_event_loop()
servers = [
ThriftAsyncServerFactory(handler1, port=9090, loop=loop),
ThriftAsyncServerFactory(handler2, port=9091, loop=loop),
]
loop.run_until_complete(asyncio.wait(servers))
try:
loop.run_forever() # Servers are initialized now
finally:
for server in servers:
server.close()
"""
if loop is None:
loop = asyncio.get_event_loop()
if not isinstance(processor, TProcessor):
try:
processor = processor._processor_type(processor, loop=loop)
except AttributeError:
raise TypeError(
"Unsupported processor type: {}".format(type(processor)),
)
if nthreads:
from concurrent.futures import ThreadPoolExecutor
loop.set_default_executor(
ThreadPoolExecutor(max_workers=nthreads),
)
ehandler = TServerEventHandler() if event_handler is None else event_handler
protocol_factory = protocol_factory or ThriftHeaderServerProtocol
pfactory = functools.partial(protocol_factory, processor, ehandler, loop)
server = await loop.create_server(
pfactory,
interface,
port,
sock=sock,
backlog=backlog,
ssl=ssl,
)
if server.sockets:
for socket in server.sockets:
ehandler.preServe(socket.getsockname())
return server
def ThriftServerProtocolFactory(processor, server_event_handler, loop=None):
return functools.partial(
ThriftHeaderServerProtocol, processor, server_event_handler, loop,
)
class ThriftHeaderServerProtocol(FramedProtocol):
def __init__(self, processor, server_event_handler, loop=None):
super().__init__(loop=loop)
self.processor = processor
self.server_event_handler = server_event_handler
self.server_context = None
async def message_received(self, frame):
# Note: we are using a single `prot` for in and out so that
# we can support legacy clients that only understand FRAMED.
# The discovery of what the client supports happens in iprot's
# transport so we have to reuse a single one here.
buf = TReadWriteBuffer(frame)
prot = THeaderProtocol(buf)
try:
await self.processor.process(
prot, prot, self.server_context,
)
msg = buf.getvalue()
if len(msg) > 0:
self.transport.write(msg)
except TException as e:
logger.warning("TException while processing request: %s", str(e))
msg = buf.getvalue()
if len(msg) > 0:
self.transport.write(msg)
except asyncio.CancelledError:
self.transport.close()
except BaseException as e:
logger.error("Exception while processing request: %s", str(e))
logger.error(traceback.format_exc())
self.transport.close()
def connection_made(self, transport):
self.upgrade_transport(transport)
def upgrade_transport(self, transport):
self.transport = transport
socket = self.transport.get_extra_info("socket")
if socket is not None:
self.server_context = AsyncioRpcConnectionContext(socket)
self.server_event_handler.newConnection(self.server_context)
def connection_lost(self, exc):
self.server_event_handler.connectionDestroyed(self.server_context)
#
# Thrift client support
#
def ThriftClientProtocolFactory(
client_class,
loop=None,
timeouts=None,
client_type=None,
):
return functools.partial(
ThriftHeaderClientProtocol,
client_class,
loop,
timeouts,
client_type,
)
class SenderTransport(WrappedTransport):
async def _send(self):
while True:
msg = await self._queue.get()
self._clean_producers()
self._trans.write(msg)
class ThriftHeaderClientProtocol(ThriftHeaderClientProtocolBase):
async def timeout_task(self, fname, seqid, delay):
await asyncio.sleep(delay, loop=self.loop)
self._handle_timeout(fname, seqid)
def wrapAsyncioTransport(self, asyncio_transport):
return SenderTransport(asyncio_transport, self, self.loop)
|
the-stack_106_22571 | import json
import logging
import os
import shutil
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Union, Dict, Any, Iterable, List, IO, Tuple, TextIO, Optional
import cv2
import numpy as np
import pyproj
from numpy import ndarray
from opensfm import context, features, geo, pygeometry, pymap, types
from PIL import Image
logger: logging.Logger = logging.getLogger(__name__)
def camera_from_json(key: str, obj: Dict[str, Any]) -> pygeometry.Camera:
"""
Read camera from a json object
"""
camera = None
pt = obj.get("projection_type", "perspective")
if pt == "perspective":
camera = pygeometry.Camera.create_perspective(
obj["focal"], obj.get("k1", 0.0), obj.get("k2", 0.0)
)
elif pt == "brown":
camera = pygeometry.Camera.create_brown(
obj["focal_x"],
obj["focal_y"] / obj["focal_x"],
np.array([obj.get("c_x", 0.0), obj.get("c_y", 0.0)]),
np.array(
[
obj.get("k1", 0.0),
obj.get("k2", 0.0),
obj.get("k3", 0.0),
obj.get("p1", 0.0),
obj.get("p2", 0.0),
]
),
)
elif pt == "fisheye":
camera = pygeometry.Camera.create_fisheye(
obj["focal"], obj.get("k1", 0.0), obj.get("k2", 0.0)
)
elif pt == "fisheye_opencv":
camera = pygeometry.Camera.create_fisheye_opencv(
obj["focal_x"],
obj["focal_y"] / obj["focal_x"],
np.array([obj.get("c_x", 0.0), obj.get("c_y", 0.0)]),
np.array(
[
obj.get("k1", 0.0),
obj.get("k2", 0.0),
obj.get("k3", 0.0),
obj.get("k4", 0.0),
]
),
)
elif pt == "fisheye62":
camera = pygeometry.Camera.create_fisheye62(
obj["focal_x"],
obj["focal_y"] / obj["focal_x"],
np.array([obj.get("c_x", 0.0), obj.get("c_y", 0.0)]),
np.array(
[
obj.get("k1", 0.0),
obj.get("k2", 0.0),
obj.get("k3", 0.0),
obj.get("k4", 0.0),
obj.get("k5", 0.0),
obj.get("k6", 0.0),
obj.get("p1", 0.0),
obj.get("p2", 0.0),
]
),
)
elif pt == "fisheye624":
camera = pygeometry.Camera.create_fisheye624(
obj["focal_x"],
obj["focal_y"] / obj["focal_x"],
np.array([obj.get("c_x", 0.0), obj.get("c_y", 0.0)]),
np.array(
[
obj.get("k1", 0.0),
obj.get("k2", 0.0),
obj.get("k3", 0.0),
obj.get("k4", 0.0),
obj.get("k5", 0.0),
obj.get("k6", 0.0),
obj.get("p1", 0.0),
obj.get("p2", 0.0),
obj.get("s0", 0.0),
obj.get("s1", 0.0),
obj.get("s2", 0.0),
obj.get("s3", 0.0),
]
),
)
elif pt == "radial":
camera = pygeometry.Camera.create_radial(
obj["focal_x"],
obj["focal_y"] / obj["focal_x"],
np.array([obj.get("c_x", 0.0), obj.get("c_y", 0.0)]),
np.array(
[
obj.get("k1", 0.0),
obj.get("k2", 0.0),
]
),
)
elif pt == "simple_radial":
camera = pygeometry.Camera.create_simple_radial(
obj["focal_x"],
obj["focal_y"] / obj["focal_x"],
np.array([obj.get("c_x", 0.0), obj.get("c_y", 0.0)]),
obj.get("k1", 0.0),
)
elif pt == "dual":
camera = pygeometry.Camera.create_dual(
obj.get("transition", 0.5),
obj["focal"],
obj.get("k1", 0.0),
obj.get("k2", 0.0),
)
elif pygeometry.Camera.is_panorama(pt):
camera = pygeometry.Camera.create_spherical()
else:
raise NotImplementedError
camera.id = key
camera.width = int(obj.get("width", 0))
camera.height = int(obj.get("height", 0))
return camera
def pose_from_json(obj: Dict[str, Any]) -> pygeometry.Pose:
pose = pygeometry.Pose()
pose.rotation = obj["rotation"]
if "translation" in obj:
pose.translation = obj["translation"]
return pose
def bias_from_json(obj: Dict[str, Any]) -> pygeometry.Similarity:
return pygeometry.Similarity(obj["rotation"], obj["translation"], obj["scale"])
def assign_shot_attributes(obj: Dict[str, Any], shot: pymap.Shot) -> None:
shot.metadata = json_to_pymap_metadata(obj)
if "scale" in obj:
shot.scale = obj["scale"]
if "covariance" in obj:
shot.covariance = np.array(obj["covariance"])
if "merge_cc" in obj:
shot.merge_cc = obj["merge_cc"]
if "vertices" in obj and "faces" in obj:
shot.mesh.vertices = obj["vertices"]
shot.mesh.faces = obj["faces"]
def shot_in_reconstruction_from_json(
reconstruction: types.Reconstruction,
key: str,
obj: Dict[str, Any],
rig_instance_id: Optional[str] = None,
rig_camera_id: Optional[str] = None,
is_pano_shot: bool = False,
) -> pymap.Shot:
"""
Read shot from a json object and append it to a reconstruction
"""
pose = pose_from_json(obj)
if is_pano_shot:
shot = reconstruction.create_pano_shot(key, obj["camera"], pose)
else:
shot = reconstruction.create_shot(
key, obj["camera"], pose, rig_camera_id, rig_instance_id
)
assign_shot_attributes(obj, shot)
return shot
def single_shot_from_json(
key: str, obj: Dict[str, Any], camera: pygeometry.Camera
) -> pymap.Shot:
"""
Read shot from a json object
"""
pose = pose_from_json(obj)
shot = pymap.Shot(key, camera, pose)
assign_shot_attributes(obj, shot)
return shot
def point_from_json(
reconstruction: types.Reconstruction, key: str, obj: Dict[str, Any]
) -> pymap.Landmark:
"""
Read a point from a json object
"""
point = reconstruction.create_point(key, obj["coordinates"])
point.color = obj["color"]
return point
def rig_camera_from_json(key: str, obj: Dict[str, Any]) -> pymap.RigCamera:
"""
Read a rig cameras from a json object
"""
pose = pygeometry.Pose()
pose.rotation = obj["rotation"]
pose.translation = obj["translation"]
rig_camera = pymap.RigCamera(pose, key)
return rig_camera
def rig_cameras_from_json(obj: Dict[str, Any]) -> Dict[str, pymap.RigCamera]:
"""
Read rig cameras from a json object
"""
rig_cameras = {}
for key, value in obj.items():
rig_cameras[key] = rig_camera_from_json(key, value)
return rig_cameras
def rig_instance_from_json(
reconstruction: types.Reconstruction, instance_id: str, obj: Dict[str, Any]
) -> None:
"""
Read any rig instance from a json shot object
"""
reconstruction.add_rig_instance(pymap.RigInstance(instance_id))
pose = pygeometry.Pose()
pose.rotation = obj["rotation"]
pose.translation = obj["translation"]
reconstruction.rig_instances[instance_id].pose = pose
def rig_instance_camera_per_shot(obj: Dict[str, Any]) -> Dict[str, Tuple[str, str]]:
"""
Given JSON root data, return (rig_instance_id, rig_camera_id) per shot.
"""
panoshots = set(obj["pano_shots"].keys()) if "pano_shots" in obj else {}
rig_shots = {}
if "rig_instances" in obj:
rig_shots = {
s_key: (i_key, c_key)
for i_key, ri in obj["rig_instances"].items()
for s_key, c_key in ri["rig_camera_ids"].items()
if s_key not in panoshots
}
return rig_shots
def reconstruction_from_json(obj: Dict[str, Any]) -> types.Reconstruction:
"""
Read a reconstruction from a json object
"""
reconstruction = types.Reconstruction()
# Extract cameras
for key, value in obj["cameras"].items():
camera = camera_from_json(key, value)
reconstruction.add_camera(camera)
# Extract camera biases
if "biases" in obj:
for key, value in obj["biases"].items():
transform = bias_from_json(value)
reconstruction.set_bias(key, transform)
# Extract rig models
if "rig_cameras" in obj:
for key, value in obj["rig_cameras"].items():
reconstruction.add_rig_camera(rig_camera_from_json(key, value))
# Extract rig instances from shots
if "rig_instances" in obj:
for key, value in obj["rig_instances"].items():
rig_instance_from_json(reconstruction, key, value)
# Extract shots
rig_shots = rig_instance_camera_per_shot(obj)
for key, value in obj["shots"].items():
shot_in_reconstruction_from_json(
reconstruction,
key,
value,
rig_camera_id=rig_shots[key][1] if key in rig_shots else None,
rig_instance_id=rig_shots[key][0] if key in rig_shots else None,
is_pano_shot=False,
)
# Extract points
if "points" in obj:
for key, value in obj["points"].items():
point_from_json(reconstruction, key, value)
# Extract pano_shots
if "pano_shots" in obj:
for key, value in obj["pano_shots"].items():
shot_in_reconstruction_from_json(
reconstruction, key, value, is_pano_shot=True
)
# Extract reference topocentric frame
if "reference_lla" in obj:
lla = obj["reference_lla"]
reconstruction.reference = geo.TopocentricConverter(
lla["latitude"], lla["longitude"], lla["altitude"]
)
return reconstruction
def reconstructions_from_json(obj: List[Dict[str, Any]]) -> List[types.Reconstruction]:
"""
Read all reconstructions from a json object
"""
return [reconstruction_from_json(i) for i in obj]
def cameras_from_json(obj: Dict[str, Any]) -> Dict[str, pygeometry.Camera]:
"""
Read cameras from a json object
"""
cameras = {}
for key, value in obj.items():
cameras[key] = camera_from_json(key, value)
return cameras
def camera_to_json(camera) -> Dict[str, Any]:
"""
Write camera to a json object
"""
if camera.projection_type == "perspective":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal": camera.focal,
"k1": camera.k1,
"k2": camera.k2,
}
elif camera.projection_type == "brown":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal_x": camera.focal,
"focal_y": camera.focal * camera.aspect_ratio,
"c_x": camera.principal_point[0],
"c_y": camera.principal_point[1],
"k1": camera.k1,
"k2": camera.k2,
"p1": camera.p1,
"p2": camera.p2,
"k3": camera.k3,
}
elif camera.projection_type == "fisheye":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal": camera.focal,
"k1": camera.k1,
"k2": camera.k2,
}
elif camera.projection_type == "fisheye_opencv":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal_x": camera.focal,
"focal_y": camera.focal * camera.aspect_ratio,
"c_x": camera.principal_point[0],
"c_y": camera.principal_point[1],
"k1": camera.k1,
"k2": camera.k2,
"k3": camera.k3,
"k4": camera.k4,
}
elif camera.projection_type == "fisheye62":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal_x": camera.focal,
"focal_y": camera.focal * camera.aspect_ratio,
"c_x": camera.principal_point[0],
"c_y": camera.principal_point[1],
"k1": camera.k1,
"k2": camera.k2,
"k3": camera.k3,
"k4": camera.k4,
"k5": camera.k5,
"k6": camera.k6,
"p1": camera.p1,
"p2": camera.p2,
}
elif camera.projection_type == "fisheye624":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal_x": camera.focal,
"focal_y": camera.focal * camera.aspect_ratio,
"c_x": camera.principal_point[0],
"c_y": camera.principal_point[1],
"k1": camera.k1,
"k2": camera.k2,
"k3": camera.k3,
"k4": camera.k4,
"k5": camera.k5,
"k6": camera.k6,
"p1": camera.p1,
"p2": camera.p2,
"s0": camera.s0,
"s1": camera.s1,
"s2": camera.s2,
"s3": camera.s3,
}
elif camera.projection_type == "simple_radial":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal_x": camera.focal,
"focal_y": camera.focal * camera.aspect_ratio,
"c_x": camera.principal_point[0],
"c_y": camera.principal_point[1],
"k1": camera.k1,
}
elif camera.projection_type == "radial":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal_x": camera.focal,
"focal_y": camera.focal * camera.aspect_ratio,
"c_x": camera.principal_point[0],
"c_y": camera.principal_point[1],
"k1": camera.k1,
"k2": camera.k2,
}
elif camera.projection_type == "dual":
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
"focal": camera.focal,
"k1": camera.k1,
"k2": camera.k2,
"transition": camera.transition,
}
elif pygeometry.Camera.is_panorama(camera.projection_type):
return {
"projection_type": camera.projection_type,
"width": camera.width,
"height": camera.height,
}
else:
raise NotImplementedError
def shot_to_json(shot: pymap.Shot) -> Dict[str, Any]:
"""
Write shot to a json object
"""
obj: Dict[str, Any] = {
"rotation": list(shot.pose.rotation),
"translation": list(shot.pose.translation),
"camera": shot.camera.id,
}
if shot.metadata is not None:
obj.update(pymap_metadata_to_json(shot.metadata))
if shot.mesh is not None:
obj["vertices"] = [list(vertice) for vertice in shot.mesh.vertices]
obj["faces"] = [list(face) for face in shot.mesh.faces]
if hasattr(shot, "scale"):
obj["scale"] = shot.scale
if hasattr(shot, "covariance"):
obj["covariance"] = shot.covariance.tolist()
if hasattr(shot, "merge_cc"):
obj["merge_cc"] = shot.merge_cc
return obj
def rig_instance_to_json(rig_instance: pymap.RigInstance) -> Dict[str, Any]:
"""
Write a rig instance to a json object
"""
return {
"translation": list(rig_instance.pose.translation),
"rotation": list(rig_instance.pose.rotation),
"rig_camera_ids": rig_instance.rig_camera_ids,
}
def rig_camera_to_json(rig_camera: pymap.RigCamera) -> Dict[str, Any]:
"""
Write a rig camera to a json object
"""
obj = {
"rotation": list(rig_camera.pose.rotation),
"translation": list(rig_camera.pose.translation),
}
return obj
def pymap_metadata_to_json(metadata: pymap.ShotMeasurements) -> Dict[str, Any]:
obj = {}
if metadata.orientation.has_value:
obj["orientation"] = metadata.orientation.value
if metadata.capture_time.has_value:
obj["capture_time"] = metadata.capture_time.value
if metadata.gps_accuracy.has_value:
obj["gps_dop"] = metadata.gps_accuracy.value
if metadata.gps_position.has_value:
obj["gps_position"] = list(metadata.gps_position.value)
if metadata.accelerometer.has_value:
obj["accelerometer"] = list(metadata.accelerometer.value)
if metadata.compass_angle.has_value and metadata.compass_accuracy.has_value:
obj["compass"] = {
"angle": metadata.compass_angle.value,
"accuracy": metadata.compass_accuracy.value,
}
else:
if metadata.compass_angle.has_value:
obj["compass"] = {"angle": metadata.compass_angle.value}
elif metadata.compass_accuracy.has_value:
obj["compass"] = {"accuracy": metadata.compass_accuracy.value}
if metadata.sequence_key.has_value:
obj["skey"] = metadata.sequence_key.value
return obj
def json_to_pymap_metadata(obj: Dict[str, Any]) -> pymap.ShotMeasurements:
metadata = pymap.ShotMeasurements()
if obj.get("orientation") is not None:
metadata.orientation.value = obj.get("orientation")
if obj.get("capture_time") is not None:
metadata.capture_time.value = obj.get("capture_time")
if obj.get("gps_dop") is not None:
metadata.gps_accuracy.value = obj.get("gps_dop")
if obj.get("gps_position") is not None:
metadata.gps_position.value = obj.get("gps_position")
if obj.get("skey") is not None:
metadata.sequence_key.value = obj.get("skey")
if obj.get("accelerometer") is not None:
metadata.accelerometer.value = obj.get("accelerometer")
if obj.get("compass") is not None:
compass = obj.get("compass")
if "angle" in compass:
metadata.compass_angle.value = compass["angle"]
if "accuracy" in compass:
metadata.compass_accuracy.value = compass["accuracy"]
return metadata
def point_to_json(point: pymap.Landmark) -> Dict[str, Any]:
"""
Write a point to a json object
"""
return {
"color": list(point.color.astype(float)),
"coordinates": list(point.coordinates),
}
def reconstruction_to_json(reconstruction: types.Reconstruction) -> Dict[str, Any]:
"""
Write a reconstruction to a json object
"""
obj = {"cameras": {}, "shots": {}, "points": {}, "biases": {}}
# Extract cameras
for camera in reconstruction.cameras.values():
obj["cameras"][camera.id] = camera_to_json(camera)
# Extract cameras biases
for camera_id, bias in reconstruction.biases.items():
obj["biases"][camera_id] = bias_to_json(bias)
# Extract rig models
if len(reconstruction.rig_cameras):
obj["rig_cameras"] = {}
for rig_camera in reconstruction.rig_cameras.values():
obj["rig_cameras"][rig_camera.id] = rig_camera_to_json(rig_camera)
if len(reconstruction.rig_instances):
obj["rig_instances"] = {}
for rig_instance in reconstruction.rig_instances.values():
obj["rig_instances"][rig_instance.id] = rig_instance_to_json(rig_instance)
# Extract shots
for shot in reconstruction.shots.values():
obj["shots"][shot.id] = shot_to_json(shot)
# Extract points
for point in reconstruction.points.values():
obj["points"][point.id] = point_to_json(point)
# Extract pano_shots
if hasattr(reconstruction, "pano_shots"):
if len(reconstruction.pano_shots) > 0:
obj["pano_shots"] = {}
for shot in reconstruction.pano_shots.values():
obj["pano_shots"][shot.id] = shot_to_json(shot)
# Extract reference topocentric frame
if reconstruction.reference:
ref = reconstruction.reference
obj["reference_lla"] = {
"latitude": ref.lat,
"longitude": ref.lon,
"altitude": ref.alt,
}
return obj
def reconstructions_to_json(
reconstructions: Iterable[types.Reconstruction],
) -> List[Dict[str, Any]]:
"""
Write all reconstructions to a json object
"""
return [reconstruction_to_json(i) for i in reconstructions]
def cameras_to_json(cameras: Dict[str, pygeometry.Camera]) -> Dict[str, Dict[str, Any]]:
"""
Write cameras to a json object
"""
obj = {}
for camera in cameras.values():
obj[camera.id] = camera_to_json(camera)
return obj
def bias_to_json(bias: pygeometry.Similarity) -> Dict[str, Any]:
return {
"rotation": list(bias.rotation),
"translation": list(bias.translation),
"scale": bias.scale,
}
def rig_cameras_to_json(
rig_cameras: Dict[str, pymap.RigCamera]
) -> Dict[str, Dict[str, Any]]:
"""
Write rig cameras to a json object
"""
obj = {}
for rig_camera in rig_cameras.values():
obj[rig_camera.id] = rig_camera_to_json(rig_camera)
return obj
def camera_from_vector(
camera_id: str,
width: int,
height: int,
projection_type: str,
parameters: List[float],
) -> pygeometry.Camera:
"""Build a camera from a serialized vector of parameters."""
if projection_type == "perspective":
focal, k1, k2 = parameters
camera = pygeometry.Camera.create_perspective(focal, k1, k2)
elif projection_type == "brown":
fx, fy, cx, cy, k1, k2, p1, p2, k3 = parameters
camera = pygeometry.Camera.create_brown(
fx, fy / fx, np.array([cx, cy]), np.array([k1, k2, k3, p1, p2])
)
elif projection_type == "fisheye":
focal, k1, k2 = parameters
camera = pygeometry.Camera.create_fisheye(focal, k1, k2)
elif projection_type == "fisheye_opencv":
fx, fy, cx, cy, k1, k2, k3, k4 = parameters
camera = pygeometry.Camera.create_fisheye_opencv(
fx, fy / fx, np.array([cx, cy]), np.array([k1, k2, k3, k4])
)
elif projection_type == "fisheye62":
fx, fy, cx, cy, k1, k2, k3, k4, k5, k6, p1, p2 = parameters
camera = pygeometry.Camera.create_fisheye62(
fx, fy / fx, np.array([cx, cy]), np.array([k1, k2, k3, k4, k5, k6, p1, p2])
)
elif projection_type == "fisheye624":
fx, fy, cx, cy, k1, k2, k3, k4, k5, k6, p1, p2, s0, s1, s2, s3 = parameters
camera = pygeometry.Camera.create_fisheye624(
fx,
fy / fx,
np.array([cx, cy]),
np.array([k1, k2, k3, k4, k5, k6, p1, p2, s0, s1, s2, s3]),
)
elif projection_type == "radial":
fx, fy, cx, cy, k1, k2 = parameters
camera = pygeometry.Camera.create_radial(
fx, fy / fx, np.array([cx, cy]), np.array([k1, k2])
)
elif projection_type == "simple_radial":
fx, fy, cx, cy, k1 = parameters
camera = pygeometry.Camera.create_simple_radial(
fx, fy / fx, np.array([cx, cy]), k1
)
elif projection_type == "dual":
focal, k1, k2, transition = parameters
camera = pygeometry.Camera.create_dual(transition, focal, k1, k2)
elif pygeometry.Camera.is_panorama(projection_type):
camera = pygeometry.Camera.create_spherical()
else:
raise NotImplementedError
camera.id = camera_id
camera.width = width
camera.height = height
return camera
def camera_to_vector(camera: pygeometry.Camera) -> List[float]:
"""Serialize camera parameters to a vector of floats."""
if camera.projection_type == "perspective":
parameters = [camera.focal, camera.k1, camera.k2]
elif camera.projection_type == "brown":
parameters = [
camera.focal,
camera.focal * camera.aspect_ratio,
camera.principal_point[0],
camera.principal_point[1],
camera.k1,
camera.k2,
camera.p1,
camera.p2,
camera.k3,
]
elif camera.projection_type == "fisheye":
parameters = [camera.focal, camera.k1, camera.k2]
elif camera.projection_type == "fisheye_opencv":
parameters = [
camera.focal,
camera.focal * camera.aspect_ratio,
camera.principal_point[0],
camera.principal_point[1],
camera.k1,
camera.k2,
camera.k3,
camera.k4,
]
elif camera.projection_type == "fisheye62":
parameters = [
camera.focal,
camera.focal * camera.aspect_ratio,
camera.principal_point[0],
camera.principal_point[1],
camera.k1,
camera.k2,
camera.k3,
camera.k4,
camera.k5,
camera.k6,
camera.p1,
camera.p2,
]
elif camera.projection_type == "fisheye624":
parameters = [
camera.focal,
camera.focal * camera.aspect_ratio,
camera.principal_point[0],
camera.principal_point[1],
camera.k1,
camera.k2,
camera.k3,
camera.k4,
camera.k5,
camera.k6,
camera.p1,
camera.p2,
camera.s0,
camera.s1,
camera.s2,
camera.s3,
]
elif camera.projection_type == "radial":
parameters = [
camera.focal,
camera.focal * camera.aspect_ratio,
camera.principal_point[0],
camera.principal_point[1],
camera.k1,
camera.k2,
]
elif camera.projection_type == "simple_radial":
parameters = [
camera.focal,
camera.focal * camera.aspect_ratio,
camera.principal_point[0],
camera.principal_point[1],
camera.k1,
]
elif camera.projection_type == "dual":
parameters = [
camera.focal,
camera.k1,
camera.k2,
camera.transition,
]
elif pygeometry.Camera.is_panorama(camera.projection_type):
parameters = []
else:
raise NotImplementedError
return parameters
def _read_gcp_list_lines(
lines: Iterable[str],
projection,
exifs: Dict[str, Dict[str, Any]],
) -> List[pymap.GroundControlPoint]:
points = {}
for line in lines:
words = line.split(None, 5)
easting, northing, alt, pixel_x, pixel_y = map(float, words[:5])
key = (easting, northing, alt)
shot_tokens = words[5].split(None)
shot_id = shot_tokens[0]
if shot_id not in exifs:
continue
if key in points:
point = points[key]
else:
# Convert 3D coordinates
if np.isnan(alt):
alt = 0
has_altitude = False
else:
has_altitude = True
if projection is not None:
lat, lon = projection.transform(easting, northing)
else:
lon, lat = easting, northing
point = pymap.GroundControlPoint()
point.id = "unnamed-%d" % len(points)
point.lla = {"latitude": lat, "longitude": lon, "altitude": alt}
point.has_altitude = has_altitude
points[key] = point
# Convert 2D coordinates
d = exifs[shot_id]
coordinates = features.normalized_image_coordinates(
np.array([[pixel_x, pixel_y]]), d["width"], d["height"]
)[0]
o = pymap.GroundControlPointObservation()
o.shot_id = shot_id
o.projection = coordinates
point.add_observation(o)
return list(points.values())
def _parse_utm_projection_string(line: str) -> str:
"""Convert strings like 'WGS84 UTM 32N' to a proj4 definition."""
words = line.lower().split()
assert len(words) == 3
zone = line.split()[2].upper()
if zone[-1] == "N":
zone_number = int(zone[:-1])
zone_hemisphere = "north"
elif zone[-1] == "S":
zone_number = int(zone[:-1])
zone_hemisphere = "south"
else:
zone_number = int(zone)
zone_hemisphere = "north"
s = "+proj=utm +zone={} +{} +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
return s.format(zone_number, zone_hemisphere)
def _parse_projection(line: str) -> Optional[pyproj.Transformer]:
"""Build a proj4 from the GCP format line."""
crs_4326 = pyproj.CRS.from_epsg(4326)
if line.strip() == "WGS84":
return None
elif line.upper().startswith("WGS84 UTM"):
return pyproj.Transformer.from_proj(
pyproj.CRS(_parse_utm_projection_string(line)), crs_4326
)
elif "+proj" in line:
return pyproj.Transformer.from_proj(pyproj.CRS(line), crs_4326)
elif line.upper().startswith("EPSG:"):
return pyproj.Transformer.from_proj(
pyproj.CRS.from_epsg(int(line.split(":")[1])), crs_4326
)
else:
raise ValueError("Un-supported geo system definition: {}".format(line))
def _valid_gcp_line(line: str) -> bool:
stripped = line.strip()
return stripped != "" and stripped[0] != "#"
def read_gcp_list(fileobj, exif: Dict[str, Any]) -> List[pymap.GroundControlPoint]:
"""Read a ground control points from a gcp_list.txt file.
It requires the points to be in the WGS84 lat, lon, alt format.
If reference is None, topocentric data won't be initialized.
"""
all_lines = fileobj.readlines()
lines = iter(filter(_valid_gcp_line, all_lines))
projection = _parse_projection(next(lines))
points = _read_gcp_list_lines(lines, projection, exif)
return points
def read_ground_control_points(fileobj: IO) -> List[pymap.GroundControlPoint]:
"""Read ground control points from json file"""
obj = json_load(fileobj)
points = []
for point_dict in obj["points"]:
point = pymap.GroundControlPoint()
point.id = point_dict["id"]
lla = point_dict.get("position")
if lla:
point.lla = lla
point.has_altitude = "altitude" in point.lla
observations = []
observing_images = set()
for o_dict in point_dict["observations"]:
o = pymap.GroundControlPointObservation()
o.shot_id = o_dict["shot_id"]
if o.shot_id in observing_images:
logger.warning(
"GCP {} has multiple observations in image {}".format(
point.id, o.shot_id
)
)
observing_images.add(o.shot_id)
if "projection" in o_dict:
o.projection = np.array(o_dict["projection"])
observations.append(o)
point.observations = observations
points.append(point)
return points
def write_ground_control_points(
gcp: List[pymap.GroundControlPoint],
fileobj: IO,
) -> None:
"""Write ground control points to json file."""
obj = {"points": []}
for point in gcp:
point_obj = {}
point_obj["id"] = point.id
if point.lla:
point_obj["position"] = {
"latitude": point.lla["latitude"],
"longitude": point.lla["longitude"],
}
if point.has_altitude:
point_obj["position"]["altitude"] = point.lla["altitude"]
point_obj["observations"] = []
for observation in point.observations:
point_obj["observations"].append(
{
"shot_id": observation.shot_id,
"projection": tuple(observation.projection),
}
)
obj["points"].append(point_obj)
json_dump(obj, fileobj)
def json_dump_kwargs(minify: bool = False) -> Dict[str, Any]:
if minify:
indent, separators = None, (",", ":")
else:
indent, separators = 4, None
return {"indent": indent, "ensure_ascii": False, "separators": separators}
def json_dump(data, fout: IO[str], minify: bool = False) -> None:
kwargs = json_dump_kwargs(minify)
return json.dump(data, fout, **kwargs)
def json_dumps(data, minify: bool = False) -> str:
kwargs = json_dump_kwargs(minify)
return json.dumps(data, **kwargs)
def json_load(fp: Union[IO[str], IO[bytes]]) -> Any:
return json.load(fp)
def json_loads(text: Union[str, bytes]) -> Any:
return json.loads(text)
# PLY
def ply_header(
count_vertices: int, with_normals: bool = False, point_num_views: bool = False
) -> List[str]:
if with_normals:
header = [
"ply",
"format ascii 1.0",
"element vertex {}".format(count_vertices),
"property float x",
"property float y",
"property float z",
"property float nx",
"property float ny",
"property float nz",
"property uchar diffuse_red",
"property uchar diffuse_green",
"property uchar diffuse_blue",
]
else:
header = [
"ply",
"format ascii 1.0",
"element vertex {}".format(count_vertices),
"property float x",
"property float y",
"property float z",
"property uchar diffuse_red",
"property uchar diffuse_green",
"property uchar diffuse_blue",
]
if point_num_views:
header += ["property uchar views"]
header += ["end_header"]
return header
def points_to_ply_string(vertices: List[str], point_num_views: bool = False) -> str:
header = ply_header(len(vertices), point_num_views=point_num_views)
return "\n".join(header + vertices + [""])
def reconstruction_to_ply(
reconstruction: types.Reconstruction,
tracks_manager: Optional[pymap.TracksManager] = None,
no_cameras: bool = False,
no_points: bool = False,
point_num_views: bool = False,
) -> str:
"""Export reconstruction points as a PLY string."""
vertices = []
if not no_points:
for point in reconstruction.points.values():
p, c = point.coordinates, point.color
s = "{} {} {} {} {} {}".format(
p[0], p[1], p[2], int(c[0]), int(c[1]), int(c[2])
)
if point_num_views and tracks_manager:
obs_count = point.number_of_observations()
if obs_count == 0:
obs_count = len(tracks_manager.get_track_observations(point.id))
s += " {}".format(obs_count)
vertices.append(s)
if not no_cameras:
for shot in reconstruction.shots.values():
o = shot.pose.get_origin()
R = shot.pose.get_rotation_matrix()
for axis in range(3):
c = 255 * np.eye(3)[axis]
for depth in np.linspace(0, 2, 10):
p = o + depth * R[axis]
s = "{} {} {} {} {} {}".format(
p[0], p[1], p[2], int(c[0]), int(c[1]), int(c[2])
)
if point_num_views:
s += " 0"
vertices.append(s)
return points_to_ply_string(vertices, point_num_views)
def point_cloud_from_ply(
fp: TextIO,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Load point cloud from a PLY file."""
all_lines = fp.read().splitlines()
start = all_lines.index("end_header") + 1
lines = all_lines[start:]
n = len(lines)
points = np.zeros((n, 3), dtype=np.float32)
normals = np.zeros((n, 3), dtype=np.float32)
colors = np.zeros((n, 3), dtype=np.uint8)
labels = np.zeros((n,), dtype=np.uint8)
for i, row in enumerate(lines):
words = row.split()
label = int(words[9])
points[i] = list(map(float, words[0:3]))
normals[i] = list(map(float, words[3:6]))
colors[i] = list(map(int, words[6:9]))
labels[i] = label
return points, normals, colors, labels
def point_cloud_to_ply(
points: np.ndarray,
normals: np.ndarray,
colors: np.ndarray,
labels: np.ndarray,
fp: TextIO,
) -> None:
fp.write("ply\n")
fp.write("format ascii 1.0\n")
fp.write("element vertex {}\n".format(len(points)))
fp.write("property float x\n")
fp.write("property float y\n")
fp.write("property float z\n")
fp.write("property float nx\n")
fp.write("property float ny\n")
fp.write("property float nz\n")
fp.write("property uchar diffuse_red\n")
fp.write("property uchar diffuse_green\n")
fp.write("property uchar diffuse_blue\n")
fp.write("property uchar class\n")
fp.write("end_header\n")
template = "{:.4f} {:.4f} {:.4f} {:.3f} {:.3f} {:.3f} {} {} {} {}\n"
for i in range(len(points)):
p, n, c, l = points[i], normals[i], colors[i], labels[i]
fp.write(
template.format(
p[0],
p[1],
p[2],
n[0],
n[1],
n[2],
int(c[0]),
int(c[1]),
int(c[2]),
int(l),
)
)
# Filesystem interaction methods
def mkdir_p(path: str) -> None:
"""Make a directory including parent directories."""
os.makedirs(path, exist_ok=True)
def open_wt(path: str) -> IO[Any]:
"""Open a file in text mode for writing utf-8."""
return open(path, "w", encoding="utf-8")
def open_rt(path: str) -> IO[Any]:
"""Open a file in text mode for reading utf-8."""
return open(path, "r", encoding="utf-8")
def imread(
path: str, grayscale: bool = False, unchanged: bool = False, anydepth: bool = False
) -> ndarray:
with open(path, "rb") as fb:
return imread_from_fileobject(fb, grayscale, unchanged, anydepth)
def imread_from_fileobject(
fb, grayscale: bool = False, unchanged: bool = False, anydepth: bool = False
) -> np.ndarray:
"""Load image as an array ignoring EXIF orientation."""
if context.OPENCV3:
if grayscale:
flags = cv2.IMREAD_GRAYSCALE
elif unchanged:
flags = cv2.IMREAD_UNCHANGED
else:
flags = cv2.IMREAD_COLOR
try:
flags |= cv2.IMREAD_IGNORE_ORIENTATION
except AttributeError:
logger.warning(
"OpenCV version {} does not support loading images without "
"rotating them according to EXIF. Please upgrade OpenCV to "
"version 3.2 or newer.".format(cv2.__version__)
)
if anydepth:
flags |= cv2.IMREAD_ANYDEPTH
else:
if grayscale:
flags = cv2.CV_LOAD_IMAGE_GRAYSCALE
elif unchanged:
flags = cv2.CV_LOAD_IMAGE_UNCHANGED
else:
flags = cv2.CV_LOAD_IMAGE_COLOR
if anydepth:
flags |= cv2.CV_LOAD_IMAGE_ANYDEPTH
im_buffer = np.asarray(bytearray(fb.read()), dtype=np.uint8)
image = cv2.imdecode(im_buffer, flags)
if image is None:
raise IOError("Unable to load image")
if len(image.shape) == 3:
image[:, :, :3] = image[:, :, [2, 1, 0]] # Turn BGR to RGB (or BGRA to RGBA)
return image
@classmethod
def imwrite(cls, path: str, image: np.ndarray) -> None:
with cls.open(path, "wb") as fwb:
imwrite(fwb, image, path)
def imwrite(path: str, image: np.ndarray) -> None:
with open(path, "wb") as fwb:
return imwrite_from_fileobject(fwb, image, path)
def imwrite_from_fileobject(fwb, image: np.ndarray, ext: str) -> None:
"""Write an image to a file object"""
if len(image.shape) == 3:
image[:, :, :3] = image[:, :, [2, 1, 0]] # Turn RGB to BGR (or RGBA to BGRA)
_, im_buffer = cv2.imencode(ext, image)
fwb.write(im_buffer)
def image_size_from_fileobject(
fb: Union[IO[bytes], bytes, Path, str, TextIO]
) -> Tuple[int, int]:
"""Height and width of an image."""
if isinstance(fb, TextIO):
image = imread(fb.name)
return image.shape[:2]
else:
with Image.open(fb) as img:
width, height = img.size
return height, width
def image_size(path: str) -> Tuple[int, int]:
"""Height and width of an image."""
with open(path, "rb") as fb:
return image_size_from_fileobject(fb)
# IO Filesystem
class IoFilesystemBase(ABC):
@classmethod
@abstractmethod
def exists(cls, path: str):
pass
@classmethod
def ls(cls, path: str):
pass
@classmethod
@abstractmethod
def isfile(cls, path: str):
pass
@classmethod
@abstractmethod
def isdir(cls, path: str):
pass
@classmethod
def rm_if_exist(cls, filename: str):
pass
@classmethod
def symlink(cls, src_path: str, dst_path: str, **kwargs):
pass
@classmethod
@abstractmethod
def open(cls, *args, **kwargs) -> IO[Any]:
pass
@classmethod
@abstractmethod
def open_wt(cls, path: str):
pass
@classmethod
@abstractmethod
def open_rt(cls, path: str):
pass
@classmethod
@abstractmethod
def mkdir_p(cls, path: str):
pass
@classmethod
@abstractmethod
def imwrite(cls, path: str, image):
pass
@classmethod
@abstractmethod
def imread(cls, path: str, grayscale=False, unchanged=False, anydepth=False):
pass
@classmethod
@abstractmethod
def image_size(cls, path: str):
pass
@classmethod
@abstractmethod
def timestamp(cls, path: str):
pass
class IoFilesystemDefault(IoFilesystemBase):
def __init__(self) -> None:
self.type = "default"
@classmethod
def exists(cls, path: str) -> str:
# pyre-fixme[7]: Expected `str` but got `bool`.
return os.path.exists(path)
@classmethod
def ls(cls, path: str) -> List[str]:
return os.listdir(path)
@classmethod
def isfile(cls, path: str) -> str:
# pyre-fixme[7]: Expected `str` but got `bool`.
return os.path.isfile(path)
@classmethod
def isdir(cls, path: str) -> str:
# pyre-fixme[7]: Expected `str` but got `bool`.
return os.path.isdir(path)
@classmethod
def rm_if_exist(cls, filename: str) -> None:
if os.path.islink(filename):
os.unlink(filename)
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@classmethod
def symlink(cls, src_path: str, dst_path: str, **kwargs):
os.symlink(src_path, dst_path, **kwargs)
@classmethod
def open(cls, *args, **kwargs) -> IO[Any]:
return open(*args, **kwargs)
@classmethod
def open_wt(cls, path: str):
return cls.open(path, "w", encoding="utf-8")
@classmethod
def open_rt(cls, path: str):
return cls.open(path, "r", encoding="utf-8")
@classmethod
def mkdir_p(cls, path: str):
return os.makedirs(path, exist_ok=True)
@classmethod
def imread(
cls,
path: str,
grayscale: bool = False,
unchanged: bool = False,
anydepth: bool = False,
):
with cls.open(path, "rb") as fb:
return imread_from_fileobject(fb, grayscale, unchanged, anydepth)
@classmethod
def imwrite(cls, path: str, image) -> None:
with cls.open(path, "wb") as fwb:
imwrite_from_fileobject(fwb, image, path)
@classmethod
def image_size(cls, path: str) -> Tuple[int, int]:
with cls.open(path, "rb") as fb:
return image_size_from_fileobject(fb)
@classmethod
def timestamp(cls, path: str) -> str:
# pyre-fixme[7]: Expected `str` but got `float`.
return os.path.getmtime(path)
|
the-stack_106_22581 | import pandas as pd
from sklearn.metrics import mean_squared_error
import numpy as np
# path = "./results/results_2.csv"
# MAX_CARD = 501012
# path = './results/results_dmv_all.csv'
# MAX_CARD = 9406943
path = './results/tpch_result.csv'
df = pd.read_csv(path)
MAX_CARD = 6000003 * 0.2
# print(df)
est_card = df['est_card'].values/MAX_CARD
true_card = df['true_card'].values/MAX_CARD
print("RMSE error:",np.sqrt(mean_squared_error(est_card,true_card)))
# %%
def print_qerror(pred, label):
qerror = []
for i in range(len(pred)):
if pred[i]==0 and float(label[i])==0:
qerror.append(1)
elif pred[i]==0:
qerror.append(label[i])
elif label[i]==0:
qerror.append(pred[i])
elif pred[i] > float(label[i]):
qerror.append(float(pred[i]) / float(label[i]))
else:
qerror.append(float(label[i]) / float(pred[i]))
print("Median: {}".format(np.median(qerror)))
print("90th percentile: {}".format(np.percentile(qerror, 90)))
print("95th percentile: {}".format(np.percentile(qerror, 95)))
print("99th percentile: {}".format(np.percentile(qerror, 99)))
print("Max: {}".format(np.max(qerror)))
print("Mean: {}".format(np.mean(qerror)))
print_qerror(est_card,true_card)
# %%
|
the-stack_106_22582 | from collections import OrderedDict
from pathlib import Path
from typing import List, Tuple, Optional
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Conv1D,
MaxPooling1D,
Input,
Flatten,
Dense,
BatchNormalization,
LeakyReLU,
ReLU,
Dropout,
concatenate,
LSTM,
Lambda
)
# Reference name of model
MODEL_NAME = str(Path(__file__).resolve().stem)
# Default inputs
# Dictionary with {"Reference-name-of-input": {"len_input": <int>, "leads": <list with name of leads>}}
INPUTS = OrderedDict(
TS=dict(
len_input=30,
input_features=['venta_unidades_dia', 'venta_clp_dia', 'is_promo'],
)
)
class ModelTS:
'''
Generate an instance of a keras model
'''
def __init__(self, n_output: int=1, output_layer: Optional[str]=None,):
self.inputs = INPUTS
self.model_name = MODEL_NAME
self.n_output = n_output
self.output_layer = output_layer
# Load model
def get_model(self,):
shape_inputs = [(value.get("len_input"), len(value.get("input_features"))) for value in self.inputs.values()]
# Inputs
input_ecg = Input(shape=shape_inputs[0])
# LSTM
x = LSTM(15, activation='tanh',recurrent_activation="sigmoid", kernel_initializer="glorot_uniform",dropout=0.5, recurrent_dropout=0.0)(input_ecg)
#x = BatchNormalization()(x)
# ---------------------------- Dense layer ------------------------------------
output = Flatten()(x)
output = Dense(32, kernel_initializer='glorot_normal')(output)
output = Dropout(0.4)(output)
output = ReLU()(output)
output = Dense(self.n_output, activation=self.output_layer, dtype = tf.float32)(output)
model = Model(inputs = input_ecg, outputs = output)
return model |
the-stack_106_22583 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import urllib
import httplib2
from py_utils import retry_util # pylint: disable=import-error
from services import luci_auth
# Some services pad JSON responses with a security prefix to prevent against
# XSSI attacks. If found, the prefix is stripped off before attempting to parse
# a JSON response.
# See e.g.: https://gerrit-review.googlesource.com/Documentation/rest-api.html#output
JSON_SECURITY_PREFIX = ")]}'"
class RequestError(OSError):
"""Exception class for errors while making a request."""
def __init__(self, request, response, content):
self.request = request
self.response = response
self.content = content
message = u'%s returned HTTP Error %d: %s' % (
self.request, self.response.status, self.error_message)
# Note: the message is a unicode object, possibly with special characters,
# so it needs to be turned into a str as expected by the constructor of
# the base class.
super(RequestError, self).__init__(message.encode('utf-8'))
def __reduce__(self):
# Method needed to make the exception pickleable [1], otherwise it causes
# the multiprocess pool to hang when raised by a worker [2].
# [1]: https://stackoverflow.com/a/36342588
# [2]: https://github.com/uqfoundation/multiprocess/issues/33
return (type(self), (self.request, self.response, self.content))
@property
def json(self):
"""Attempt to load the content as a json object."""
try:
return json.loads(self.content)
except StandardError:
return None
@property
def error_message(self):
"""Returns a unicode object with the error message found in the content."""
try:
# Try to find error message within json content.
return self.json['error']
except StandardError:
# Otherwise fall back to entire content itself, converting str to unicode.
return self.content.decode('utf-8')
class ClientError(RequestError):
"""Exception for 4xx HTTP client errors."""
pass
class ServerError(RequestError):
"""Exception for 5xx HTTP server errors."""
pass
def BuildRequestError(request, response, content):
"""Build the correct RequestError depending on the response status."""
if response['status'].startswith('4'):
error = ClientError
elif response['status'].startswith('5'):
error = ServerError
else: # Fall back to the base class.
error = RequestError
return error(request, response, content)
@retry_util.RetryOnException(ServerError, retries=3)
def Request(url, method='GET', params=None, data=None, accept=None,
content_type='urlencoded', use_auth=False, retries=None):
"""Perform an HTTP request of a given resource.
Args:
url: A string with the URL to request.
method: A string with the HTTP method to perform, e.g. 'GET' or 'POST'.
params: An optional dict or sequence of key, value pairs to be added as
a query to the url.
data: An optional dict or sequence of key, value pairs to send as payload
data in the body of the request.
accept: An optional string to specify the expected response format.
Currently only 'json' is supported, which attempts to parse the response
content as json. If ommitted, the default is to return the raw response
content as a string.
content_type: A string specifying how to encode the payload data,
can be either 'urlencoded' (default) or 'json'.
use_auth: A boolean indecating whether to send authorized requests, if True
luci-auth is used to get an access token for the logged in user.
retries: Number of times to retry the request in case of ServerError. Note,
the request is _not_ retried if the response is a ClientError.
Returns:
A string with the content of the response when it has a successful status.
Raises:
A ClientError if the response has a 4xx status, or ServerError if the
response has a 5xx status.
"""
del retries # Handled by the decorator.
if params:
url = '%s?%s' % (url, urllib.urlencode(params))
body = None
headers = {}
if accept == 'json':
headers['Accept'] = 'application/json'
elif accept is not None:
raise NotImplementedError('Invalid accept format: %s' % accept)
if data is not None:
if content_type == 'json':
body = json.dumps(data, sort_keys=True, separators=(',', ':'))
headers['Content-Type'] = 'application/json'
elif content_type == 'urlencoded':
body = urllib.urlencode(data)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
else:
raise NotImplementedError('Invalid content type: %s' % content_type)
else:
headers['Content-Length'] = '0'
if use_auth:
headers['Authorization'] = 'Bearer %s' % luci_auth.GetAccessToken()
logging.info('Making API request: %s', url)
http = httplib2.Http()
response, content = http.request(
url, method=method, body=body, headers=headers)
if response.status != 200:
raise BuildRequestError(url, response, content)
if accept == 'json':
if content[:4] == JSON_SECURITY_PREFIX:
content = content[4:] # Strip off security prefix if found.
content = json.loads(content)
return content
|
the-stack_106_22587 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class ExperimentMaskDistillationLossCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
report_accuracy: bool = field(
default=False,
metadata={"help": "report accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
kd_alpha: float = field(
default=0.9,
metadata={"help": "..."},
)
sentence_avg: bool = II("optimization.sentence_avg")
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion(
"experiment_mask_distillation_loss", dataclass=ExperimentMaskDistillationLossCriterionConfig
)
class ExperimentMaskDistillationLossCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
kd_alpha=0.9,
ignore_prefix_size=0,
report_accuracy=False,
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
self.MSE_loss = torch.nn.MSELoss(reduce=False, reduction="sum")
self.alpha = kd_alpha
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output, ret = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
mask_bert_out, mask_encoder_out = ret['mask_bert_out'], ret['mask_encoder_out']
mask_loss = ret['mask_loss']
#bert_labels = ret['BERT_bert_labels']
loss_kd = self.MSE_loss(mask_bert_out, mask_encoder_out)
loss_kd = torch.mean(loss_kd, dim=-1)
loss_kd = loss_kd.sum()
loss = loss + mask_loss + loss_kd * (1. - self.alpha)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
the-stack_106_22590 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import cos
from math import pi
from math import sin
from compas.geometry import matrix_from_frame
from compas.geometry import transform_points
from compas.geometry import Circle
from compas.geometry import Frame
from compas.geometry import Plane
from compas.geometry._shapes import Shape
__all__ = ['Cylinder']
class Cylinder(Shape):
"""A cylinder is defined by a circle and a height.
Parameters
----------
circle: :class:`compas.geometry.Circle`
The circle of the cylinder.
height: float
The height of the cylinder.
Attributes
----------
plane : :class:`compas.geometry.Plane`
The plane containing the circle.
circle : :class:`compas.geometry.Circle`
The base circle of the cylinder.
radius : float
The radius of the base circle.
height : float
The height of the cylinder.
normal (read-only) : :class:`compas.geometry.Vector`
The normal of the base plane.
diameter : float
The diameter of the cylinder.
Examples
--------
>>> from compas.geometry import Plane
>>> from compas.geometry import Cylinder
>>> plane = Plane([0, 0, 0], [0, 0, 1])
>>> circle = Circle(plane, 5)
>>> cylinder = Cylinder(circle, 7)
"""
__slots__ = ['_circle', '_height']
def __init__(self, circle, height):
self._circle = None
self._height = None
self.circle = circle
self.height = height
@property
def data(self):
"""Returns the data dictionary that represents the cylinder.
Returns
-------
dict
The cylinder data.
"""
return {'circle': self.circle.data,
'height': self.height}
@data.setter
def data(self, data):
self.circle = Circle.from_data(data['circle'])
self.height = data['height']
@property
def plane(self):
"""Plane: The plane of the cylinder."""
return self.circle.plane
@plane.setter
def plane(self, plane):
self.circle.plane = Plane(plane[0], plane[1])
@property
def circle(self):
"""float: The circle of the cylinder."""
return self._circle
@circle.setter
def circle(self, circle):
self._circle = Circle(circle[0], circle[1])
@property
def radius(self):
"""float: The radius of the cylinder."""
return self.circle.radius
@radius.setter
def radius(self, radius):
self.circle.radius = float(radius)
@property
def height(self):
"""float: The height of the cylinder."""
return self._height
@height.setter
def height(self, height):
self._height = float(height)
@property
def normal(self):
"""Vector: The normal of the cylinder."""
return self.plane.normal
@property
def diameter(self):
"""float: The diameter of the cylinder."""
return self.circle.diameter
@property
def center(self):
"""Point: The center of the cylinder."""
return self.circle.center
@center.setter
def center(self, point):
self.circle.center = point
@property
def area(self):
"""Float: The surface area of the cylinder."""
return (self.circle.area * 2) + (self.circle.circumference * self.height)
@property
def volume(self):
"""Float: The volume of the cylinder."""
return self.circle.area * self.height
# ==========================================================================
# customisation
# ==========================================================================
def __repr__(self):
return 'Cylinder({0}, {1})'.format(self.circle, self.height)
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.circle
elif key == 1:
return self.height
else:
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.circle = value
elif key == 1:
self.height = value
else:
raise KeyError
def __iter__(self):
return iter([self.circle, self.height])
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_data(cls, data):
"""Construct a cylinder from its data representation.
Parameters
----------
data : :obj:`dict`
The data dictionary.
Returns
-------
Cylinder
The constructed cylinder.
Examples
--------
>>> from compas.geometry import Cylinder
>>> from compas.geometry import Circle
>>> from compas.geometry import Plane
>>> data = {'circle': Circle(Plane.worldXY(), 5).data, 'height': 7.}
>>> cylinder = Cylinder.from_data(data)
"""
cylinder = cls(Circle(Plane.worldXY(), 1), 1)
cylinder.data = data
return cylinder
# ==========================================================================
# methods
# ==========================================================================
def to_vertices_and_faces(self, **kwargs):
"""Returns a list of vertices and faces"""
u = kwargs.get('u') or 10
if u < 3:
raise ValueError('The value for u should be u > 3.')
vertices = []
a = 2 * pi / u
z = self.height / 2
for i in range(u):
x = self.circle.radius * cos(i * a)
y = self.circle.radius * sin(i * a)
vertices.append([x, y, z])
vertices.append([x, y, -z])
# add v in bottom and top's circle center
vertices.append([0, 0, z])
vertices.append([0, 0, -z])
# transform vertices to cylinder's plane
frame = Frame.from_plane(self.circle.plane)
M = matrix_from_frame(frame)
vertices = transform_points(vertices, M)
faces = []
# side faces
for i in range(0, u * 2, 2):
faces.append([i, i + 1, (i + 3) % (u * 2), (i + 2) % (u * 2)])
# top and bottom circle faces
for i in range(0, u * 2, 2):
top = [i, (i + 2) % (u * 2), len(vertices) - 2]
bottom = [i + 1, (i + 3) % (u * 2), len(vertices) - 1]
faces.append(top)
faces.append(bottom[::-1])
return vertices, faces
def transform(self, transformation):
"""Transform the cylinder.
Parameters
----------
transformation : :class:`Transformation`
The transformation used to transform the cylinder.
Examples
--------
>>> from compas.geometry import Frame
>>> from compas.geometry import Transformation
>>> from compas.geometry import Plane
>>> from compas.geometry import Circle
>>> from compas.geometry import Cylinder
>>> circle = Circle(Plane.worldXY(), 5)
>>> cylinder = Cylinder(circle, 7)
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(frame)
>>> cylinder.transform(T)
"""
self.circle.transform(transformation)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
|
the-stack_106_22591 | import glob
from facenet_pytorch import MTCNN
import os
from PIL import Image
image_dir = r"./origin_images/original"
# MTCNN()で顔認識+トリミング
mtcnn = MTCNN()
# glob.glob(directory)⇨ファイル一覧をディレクトリとして取得
list = glob.glob(os.path.join(image_dir, "*.jpg"))
print(list)
print('トリミング開始')
for i, path in enumerate(list):
img = Image.open(path)
try:
mtcnn(img, save_path=r"./origin_images/croped/{}.jpg".format(str(i))) # 画像と保存先を渡す
except KeyError as e:
print(e)
print('トリミング終了')
|
the-stack_106_22592 | ## @file
# This file is used to be the warning class of ECC tool
#
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## The exception class that used to report error messages when preprocessing
#
# Currently the "ToolName" is set to be "ECC PP".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
self.message = Str
self.FileName = File
self.LineNumber = Line
self.ToolName = 'ECC PP'
|
the-stack_106_22593 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
from math import abs
from time import sleep
import settings
class No_economizer:
"""Detects problems with economizer controls or operations.
Usage: ...
The purpose of this proactive diagnostic measure is to identify faulty
economizer systems on a rooftop unit (RTU). If the economizer does not
operate when outdoor conditions are favorable for economizing there are
missed opportunities for free cooling, thus causing an energy penalty during
periods when free cooling is available.
When a call for cooling comes from the space thermostat and conditions are favorable
for economizing (outdoor air temperature is less than return air temperature) the outdoor
air damper should fully open. The outdoor air fraction, an indication of the relative amount of
outdoor air brought into the RTU, should ideally be close to a value of one.
Then, the process checks if the conditions are favorable for the proactive
testing
"""
def __init__(self, parent):
"""parent is the afddagent or whatever object creating an instance of this class
"""
self._parent = parent
config_json = resource_string(__name__, 'afdd_config.ini')
self._config = json.loads(config_json)
def run(self, voltron_data):
#Data from Voltron
voltron_data = self._parent.get_new_data()
mixed_air_temperature = float(voltron_data["MixedAirTemperature"])
return_air_temperature = float(voltron_data["ReturnAirTemperature"])
outdoor_air_temperature = float(voltron_data["OutsideAirTemperature"])
zone_temp=float(voltron_data["ZoneTemp"])
zone_tempsp=float(voltron_data["ZoneTempSP"])
damper=float(voltron_data["DamperSignal"])
cool_call=float(voltron_data["CoolCall1"])
oatemp_vpoint =float(voltron_data["OutsideAirTemperatureVirtualPoint"])
cfm=5000 #constant for now
offset=2.0
#settings.py file
SecondsToSteadyState = settings.seconds_to_steady_state
sleeptime = settings.sleeptime
economizertype = settings.economizertype
minutes_to_average=settings.minutes_to_average
if economizertype==0:
highlimit=return_air_temperature
else:
highlimit=self._config["highlimit"]
# check Prerequisites
if(heat_call==1 or math.fabs(outdoor_air_temperature-return_air_temperature)<settings.afdd_threshold):
afdd3=38
self.log_status("Conditions not favorable for proactive economizer fault detection")
return afdd3
# Main Algorithm
if cool_call==1:
if (outdoor_air_temperature-offset) < highlimit:
if damper==100:
oaf=self.calculate_oaf(self,minutes_to_average, sleeptime)
if 1.0-oaf > afdd3_threshold:
afdd3=32
potential_cooling_savings = 1.08*cfm*(mixed_air_temperature-outdoor_air_temperature) #sensible cooling load estimation in BTU/hr
self.log_status("Insufficient outdoor air when economizing")
return afdd3
else:
afdd3=30
self.log_status("Economizer functioning properly")
return afdd3
else:
afdd3=33
potential_cooling_savings = 1.08*cfm*(mixed_air_temperature-outdoor_air_temperature) #sensible cooling load estimation in BTU/hr
self.log_status("RTU not economizing when outdoor conditions are favorable for economizing")
return afdd3
else:
status=self.command_outdoor_air_temperature_vpoint(return_air_temperature-10)
if not (status):
afdd3=39
self.log_status("Lock not received from Catalyst")
return afdd3
self.sleep(seconds_to_steady_state)
voltron_data = self._parent.get_new_data()
damper=float(voltron_data["DamperSignal"])
if damper==100:
oaf=self.calculate_oaf(self,minutes_to_average, sleeptime)
if 1.0-oaf > afdd3_threshold:
afdd3=32
potential_cooling_savings = 1.08*cfm*(mixed_air_temperature-outdoor_air_temperature) #sensible cooling load estimation in BTU/hr
self.log_status("Insufficient outdoor air when economizing")
return afdd3
else:
afdd3=30
self.log_status("Economizer functioning properly")
return afdd3
else:
afdd3=33
potential_cooling_savings = 1.08*cfm*(mixed_air_temperature-outdoor_air_temperature) #sensible cooling load estimation in BTU/hr
self.log_status("RTU not economizing when outdoor conditions are favorable for economizing")
return afdd3
afdd3=31
return afdd3
###################################################################################################
def log_message(self,msg):
_log.debug(code)
def sleep(self,sleeptime):
self._parent.sleep(sleeptime)
def log_status(self,code):
# print or log code and exit
# need to release OAtemp_vpoint and CoolCall1
_log.debug(code)
def command_outdoor_air_temperature_vpoint(self,value):
""" Command outdoor air damper to a new position """
status = 0
status = self._parent.command_equip("OutsideAirTemperaturevirtualpoint",value)
if not status:
return False
return True
def calculate_oaf(self,num_minutes, sleeptime):
oaf = 0.
return_air_temperature = 0.
mixed_air_temperature = 0.
outdoor_air_temperature-0
n = 0
for n in range(1, num_minutes):
voltron_data = self._parent.get_new_data()
mixed_air_temperature = float(voltron_data["MixedAirTemperature"]) # Point name follows RTUNetwork wiki)
return_air_temperature = float(voltron_data["ReturnAirTemperature"]) # Point name follows RTUNetwork wiki)
outdoor_air_temperature = float(voltron_data["OutsideAirTemperature"])
oaf +=(mixed_air_temperature-return_air_temperature)/(outdoor_air_temperature-return_air_temperature)
sleep(sleeptime) # Pause for 60 seconds
oaf = oaf/n
return oaf
|
the-stack_106_22595 | import sys
from math import copysign
def distance(pos1, pos2):
(xstart, ystart) = pos1
(xend, yend) = pos2
dx = xend - xstart
dy = yend - ystart
if copysign(1, dx) == copysign(1, dy):
return abs(dx + dy)
else:
return max(abs(dx), abs(dy))
def doit(input):
way = [((0,0),0)]
for x in input.split(','):
newpos = None
if x == 'n':
newpos = (way[-1][0][0], way[-1][0][1] + 1)
elif x == 'ne':
newpos = (way[-1][0][0]+1, way[-1][0][1])
elif x == 'se':
newpos = (way[-1][0][0]+1, way[-1][0][1] - 1)
elif x == 's':
newpos = (way[-1][0][0], way[-1][0][1] - 1)
elif x == 'sw':
newpos = (way[-1][0][0]-1, way[-1][0][1])
else:
assert x == 'nw'
newpos = (way[-1][0][0]-1, way[-1][0][1] + 1)
way.append((newpos, distance(way[0][0], newpos)))
return max([x[1] for x in way]), way[-1]
if __name__ == "__main__":
print(doit(sys.stdin.read().strip()))
|
the-stack_106_22599 | """
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import time
from typing import Union, Dict
from opsbot import CommandSession
from opsbot.plugins import GenericTask
from opsbot.log import logger
from opsbot.exceptions import ActionFailed, HttpFailed
from component import DevOps, RedisClient, BK_DEVOPS_DOMAIN
class DevOpsTask(GenericTask):
def __init__(self, session: CommandSession, bk_biz_id: Union[str, int] = None):
super().__init__(session, bk_biz_id, RedisClient(env='prod'))
self._devops = DevOps()
async def _get_devops_project_list(self):
data = await self._devops.v3_app_project_list(self.user_id)
data.sort(key=lambda x: x['updatedAt'], reverse=True)
return [{'id': str(project['projectCode']), 'text': project['projectName'], 'is_checked': False}
for project in data[:20]]
async def _get_devops_pipeline_list(self, project_id: str):
data = (await self._devops.v3_app_pipeline_list(project_id, self.user_id)).get('records', [])
data.sort(key=lambda x: x['latestBuildStartTime'], reverse=True)
return [{'id': f'{project_id}|{project["pipelineId"]}|{project["pipelineName"]}',
'text': project['pipelineName'], 'is_checked': False} for project in data[:20]]
async def _get_devops_build_start_info(self, project_id: str, pipeline_id: str):
start_infos = await self._devops.v3_app_build_start_info(project_id, pipeline_id, self.user_id)
filter_infos = [{'keyname': var['id'], 'value': var['defaultValue'] if var['defaultValue'] else '待输入'}
for var in start_infos.get('properties', []) if not var.get('propertyType')]
return filter_infos
async def render_devops_project_list(self):
if not self.biz_id:
return None
bk_devops_projects = await self._get_devops_project_list()
template_card = {
'card_type': 'vote_interaction',
'source': {
'desc': 'CI'
},
'main_title': {
'title': '欢迎使用蓝盾平台',
'desc': '请选择蓝盾项目'
},
'task_id': str(int(time.time() * 100000)),
'checkbox': {
'question_key': 'bk_devops_project_id',
'option_list': bk_devops_projects
},
'submit_button': {
'text': '确认',
'key': 'bk_devops_project_select'
}
}
return template_card
async def render_devops_pipeline_list(self):
try:
bk_devops_project_id = self._session.ctx['SelectedItems']['SelectedItem']['OptionIds']['OptionId']
except KeyError:
return None
bk_devops_pipelines = await self._get_devops_pipeline_list(bk_devops_project_id)
template_card = {
'card_type': 'vote_interaction',
'source': {
'desc': 'CI'
},
'main_title': {
'title': '欢迎使用蓝盾平台',
'desc': f'请选择「{bk_devops_project_id}」下流水线'
},
'task_id': str(int(time.time() * 100000)),
'checkbox': {
'question_key': 'bk_devops_pipeline_id',
'option_list': bk_devops_pipelines
},
'submit_button': {
'text': '确认',
'key': 'bk_devops_pipeline_select'
}
}
return template_card
async def render_devops_pipeline_detail(self):
if self._session.is_first_run:
try:
bk_devops_pipeline_info = self._session.ctx['SelectedItems']['SelectedItem']['OptionIds']['OptionId']
bk_devops_project_id, bk_devops_pipeline_id, bk_devops_pipeline_name = \
bk_devops_pipeline_info.split('|')
except (KeyError, ValueError):
return None
start_infos = await self._get_devops_build_start_info(bk_devops_project_id, bk_devops_pipeline_id)
else:
bk_devops_pipeline = self._session.state['bk_devops_pipeline']
bk_devops_project_id = bk_devops_pipeline['bk_devops_project_id']
bk_devops_pipeline_id = bk_devops_pipeline['bk_devops_pipeline_id']
bk_devops_pipeline_name = bk_devops_pipeline['bk_devops_pipeline_name']
start_infos = bk_devops_pipeline['start_infos']
info = {
'bk_devops_project_id': bk_devops_project_id,
'bk_devops_pipeline_id': bk_devops_pipeline_id,
'bk_devops_pipeline_name': bk_devops_pipeline_name,
'start_infos': start_infos
}
template_card = {
'card_type': 'button_interaction',
'source': {
'desc': 'CI'
},
'main_title': {
'title': f'蓝盾流水线_{bk_devops_pipeline_name}'
},
'task_id': str(int(time.time() * 100000)),
'sub_title_text': '参数确认',
'horizontal_content_list': start_infos,
'button_list': [
{
"text": "执行",
"style": 1,
"key": f"bk_devops_pipeline_execute|{json.dumps(info)}"
},
{
"text": "修改",
"style": 2,
"key": f"bk_devops_pipeline_update|{json.dumps(info)}"
},
{
"text": "取消",
"style": 3,
"key": f"bk_devops_pipeline_cancel|{bk_devops_pipeline_name}"
}
]
}
return template_card
async def execute_task(self, bk_devops_pipeline: Dict):
bk_devops_project_id = bk_devops_pipeline['bk_devops_project_id']
bk_devops_pipeline_id = bk_devops_pipeline['bk_devops_pipeline_id']
bk_devops_pipeline_name = bk_devops_pipeline['bk_devops_pipeline_name']
params = {item['keyname']: item['value'] for item in bk_devops_pipeline['start_infos']}
try:
await self._devops.v3_app_build_start(bk_devops_project_id, bk_devops_pipeline_id, self.user_id, **params)
msg = f'{bk_devops_pipeline_name} {params} 任务启动成功'
return True
except ActionFailed as e:
msg = f'{bk_devops_pipeline_id} {params} error: 参数有误 {e}'
except HttpFailed as e:
msg = f'{bk_devops_pipeline_id} {params} error: 第三方服务异常 {e}'
finally:
logger.info(msg)
return False
def render_devops_pipeline_execute_msg(self, result: bool, bk_devops_pipeline: Dict):
return self.render_execute_msg('CI', result, bk_devops_pipeline['bk_devops_pipeline_name'],
bk_devops_pipeline['start_infos'], BK_DEVOPS_DOMAIN)
|
the-stack_106_22602 | """
ExcursionSet.py
Author: Jordan Mirocha
Affiliation: McGill
Created on: Mon 18 Feb 2019 10:38:06 EST
Description:
"""
import numpy as np
from .Constants import rho_cgs
from .Cosmology import Cosmology
from ..util.Math import central_difference
from ..util.ParameterFile import ParameterFile
from scipy.integrate import simps, quad
from scipy.interpolate import interp1d
from scipy.misc import derivative
two_pi = 2. * np.pi
four_pi = 4. * np.pi
two_pi_sq = 2. * np.pi**2
class ExcursionSet(object):
def __init__(self, cosm=None, **kwargs):
self.pf = ParameterFile(**kwargs)
if cosm is not None:
self._cosm = cosm
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(pf=self.pf, **self.pf)
return self._cosm
@cosm.setter
def cosm(self, value):
self._cosm = value
@property
def tab_sigma(self):
if not hasattr(self, '_tab_sigma'):
raise AttributeError('must set by hand for now')
return self._tab_sigma
@tab_sigma.setter
def tab_sigma(self, value):
self._tab_sigma = value
@property
def tab_M(self):
if not hasattr(self, '_tab_M'):
raise AttributeError('must set by hand for now')
return self._tab_M
@tab_M.setter
def tab_M(self, value):
self._tab_M = value
@property
def tab_z(self):
if not hasattr(self, '_tab_z'):
raise AttributeError('must set by hand for now')
return self._tab_z
@tab_z.setter
def tab_z(self, value):
self._tab_z = value
@property
def tab_k(self):
if not hasattr(self, '_tab_k'):
raise AttributeError('must set by hand for now')
return self._tab_k
@tab_k.setter
def tab_k(self, value):
self._tab_k = value
@property
def tab_ps(self):
if not hasattr(self, '_tab_ps'):
raise AttributeError('must set by hand for now')
return self._tab_ps
@tab_ps.setter
def tab_ps(self, value):
self._tab_ps = value
@property
def tab_growth(self):
if not hasattr(self, '_tab_growth'):
raise AttributeError('must set by hand for now')
return self._tab_growth
@tab_growth.setter
def tab_growth(self, value):
self._tab_growth = value
def _growth_factor(self, z):
return np.interp(z, self.tab_z, self.tab_growth,
left=np.inf, right=np.inf)
def Mass(self, R):
return self.cosm.rho_m_z0 * rho_cgs * self.WindowVolume(R)
def PDF(self, delta, R):
pass
def WindowReal(self, x, R):
"""
Return real-space window function.
"""
assert type(x) == np.ndarray
if self.pf['xset_window'] == 'tophat-real':
W = np.zeros_like(x)
W[x <= R] = 3. / four_pi / R**3
elif self.pf['xset_window'] == 'tophat-fourier':
W = (np.sin(x / R) - (x / R) * np.cos(x / R)) \
/ R**3 / two_pi_sq / (x / R)**3
else:
raise NotImplemented('help')
return W
def WindowFourier(self, k, R):
if self.pf['xset_window'] == 'sharp-fourier':
W = np.zeros_like(k)
ok = 1. - k * R >= 0.
W[ok == 1] = 1.
elif self.pf['xset_window'] == 'tophat-real':
W = 3. * (np.sin(k * R) - k * R * np.cos(k * R)) / (k * R)**3
elif self.pf['xset_window'] == 'tophat-fourier':
W = np.zeros_like(k)
W[k <= 1./R] = 1.
else:
raise NotImplemented('help')
return W
def WindowVolume(self, R):
if self.pf['xset_window'] == 'sharp-fourier':
# Sleight of hand
return four_pi * R**3 / 3.
elif self.pf['xset_window'] == 'tophat-real':
return four_pi * R**3 / 3.
elif self.pf['xset_window'] == 'tophat-fourier':
return four_pi * R**3 / 3.
else:
raise NotImplemented('help')
def Variance(self, z, R):
"""
Compute the variance in the field on some scale `R`.
"""
iz = np.argmin(np.abs(z - self.tab_z))
# Window function
W = self.WindowFourier(self.tab_k, R)
# Dimensionless power spectrum
D = self.tab_k**3 * self.tab_ps[iz,:] / two_pi_sq
return np.trapz(D * np.abs(W)**2, x=np.log(self.tab_k))
def CollapsedFraction(self):
pass
def SizeDistribution(self, z, R, dcrit=1.686, dzero=0.0):
"""
Compute the size distribution of objects.
Parameters
----------
z: int, float
Redshift of interest.
Returns
-------
Tuple containing (in order) the radii, masses, and the
differential size distribution. Each is an array of length
self.tab_M, i.e., with elements corresponding to the masses
used to compute the variance of the density field.
"""
# Comoving matter density
rho0_m = self.cosm.rho_m_z0 * rho_cgs
M = self.Mass(R)
S = np.array([self.Variance(z, RR) for RR in R])
_M, _dlnSdlnM = central_difference(np.log(M[-1::-1]), np.log(S[-1::-1]))
_M = _M[-1::-1]
dlnSdlnM = _dlnSdlnM[-1::-1]
dSdM = dlnSdlnM * (S[1:-1] / M[1:-1])
dFdM = self.FCD(z, R, dcrit, dzero)[1:-1] * np.abs(dSdM)
# This is, e.g., Eq. 17 in Zentner (2006)
# or Eq. 9.38 in Loeb and Furlanetto (2013)
dndm = rho0_m * np.abs(dFdM) / M[1:-1]
return R[1:-1], M[1:-1], dndm
def FCD(self, z, R, dcrit=1.686, dzero=0.0):
"""
First-crossing distribution function.
i.e., dF/dS where S=sigma^2.
"""
S = np.array([self.Variance(z, RR) for RR in R])
norm = (dcrit - dzero) / np.sqrt(two_pi) / S**1.5
p = norm * np.exp(-(dcrit - dzero)**2 / 2. / S)
return p
|
the-stack_106_22604 | # Copyright (c) 2010-2014 openpyxl
from io import BytesIO
from zipfile import ZipFile
import pytest
from openpyxl.tests.helper import compare_xml
from openpyxl.reader.workbook import read_rels
from openpyxl.xml.constants import (
ARC_CONTENT_TYPES,
ARC_WORKBOOK_RELS,
PKG_REL_NS,
REL_NS,
)
from openpyxl.xml.functions import tostring
def test_read_external_ref(datadir):
datadir.chdir()
archive = ZipFile(BytesIO(), "w")
with open("[Content_Types].xml") as src:
archive.writestr(ARC_CONTENT_TYPES, src.read())
with open("workbook.xml.rels") as src:
archive.writestr(ARC_WORKBOOK_RELS, src.read())
rels = read_rels(archive)
for _, pth in rels:
if pth['type'] == '%s/externalLink' % REL_NS:
assert pth['path'] == 'xl/externalLinks/externalLink1.xml'
def test_read_external_link(datadir):
from .. external import parse_books
datadir.chdir()
with open("externalLink1.xml.rels") as src:
xml = src.read()
book = parse_books(xml)
assert book.Id == 'rId1'
def test_read_external_ranges(datadir):
from .. external import parse_ranges
datadir.chdir()
with open("externalLink1.xml") as src:
xml = src.read()
names = tuple(parse_ranges(xml))
assert names[0].name == 'B2range'
assert names[0].refersTo == "='Sheet1'!$A$1:$A$10"
def test_dict_external_book():
from .. external import ExternalBook
book = ExternalBook('rId1', "book1.xlsx")
assert dict(book) == {'Id':'rId1', 'Target':'book1.xlsx',
'TargetMode':'External',
'Type':'http://schemas.openxmlformats.org/officeDocument/2006/relationships/externalLinkPath'}
def test_dict_external_range():
from .. external import ExternalRange
rng = ExternalRange("something_special", "='Sheet1'!$A$1:$B$2")
assert dict(rng) == {'name':'something_special', 'refersTo':"='Sheet1'!$A$1:$B$2"}
def test_write_external_link():
from .. external import ExternalRange
from .. external import write_external_link
link1 = ExternalRange('r1', 'over_there!$A$1:$B$2')
link2 = ExternalRange('r2', 'somewhere_else!$C$10:$D$12')
links = [link1, link2]
el = write_external_link(links)
xml = tostring(el)
expected = """
<externalLink xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<externalBook xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1">
<definedNames>
<definedName name="r1" refersTo="over_there!$A$1:$B$2"/>
<definedName name="r2" refersTo="somewhere_else!$C$10:$D$12"/>
</definedNames>
</externalBook>
</externalLink>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_external_book_rel():
from .. external import ExternalBook
from .. external import write_external_book_rel
book = ExternalBook("rId1", "book2.xlsx")
rel = write_external_book_rel(book)
xml = tostring(rel)
expected = """
<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
<Relationship Id="rId1" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/externalLinkPath" Target="book2.xlsx" TargetMode="External"/>
</Relationships>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_read_archive(datadir):
from openpyxl.reader.workbook import read_rels
from .. external import detect_external_links
datadir.chdir()
archive = ZipFile("book1.xlsx")
rels = read_rels(archive)
books = detect_external_links(rels, archive)
book = tuple(books)[0]
assert book.Target == "book2.xlsx"
expected = ["='Sheet1'!$A$1:$A$10", ]
for link, exp in zip(book.links, expected):
assert link.refersTo == exp
def test_load_workbook(datadir):
datadir.chdir()
from openpyxl import load_workbook
wb = load_workbook('book1.xlsx')
assert len(wb._external_links) == 1
def test_write_workbook(datadir, tmpdir):
datadir.chdir()
src = ZipFile("book1.xlsx")
orig_files = set(src.namelist())
src.close()
from openpyxl import load_workbook
wb = load_workbook("book1.xlsx")
tmpdir.chdir()
wb.save("book1.xlsx")
src = ZipFile("book1.xlsx")
out_files = set(src.namelist())
src.close()
# remove files from archive that the other can't have
out_files.discard("xl/sharedStrings.xml")
orig_files.discard("xl/calcChain.xml")
|
the-stack_106_22609 | import typer
valid_completion_items = [
("Camila", "The reader of books."),
("Carlos", "The writer of scripts."),
("Sebastian", "The type hints guy."),
]
def complete_name(incomplete: str):
for name, help_text in valid_completion_items:
if name.startswith(incomplete):
yield (name, help_text)
def main(
name: str = typer.Option(
"World", help="The name to say hi to.", autocompletion=complete_name
)
):
typer.echo(f"Hello {name}")
if __name__ == "__main__":
typer.run(main)
|
the-stack_106_22610 | # -*- coding: utf-8-*-
from typing import Dict, Any
import numpy as np
import tensorflow as tf
import tensorflow_addons as tf_ad
from tensorflow.keras.layers import Embedding, Bidirectional, LSTM, TimeDistributed, Conv1D, GlobalMaxPooling1D
from tensorflow.python.keras.layers import add
from feature_extractor import Vocabulary
from train_param import TrainParam
class BiLstmCrfModel(tf.keras.Model):
def __init__(self, train_param: TrainParam, vocab: Vocabulary):
super().__init__()
self.train_param = train_param
if train_param.use_elmo:
# tf2 not support tf-hub version elmo
'''
elmo = hub.KerasLayer("https://tfhub.dev/google/elmo/3",
trainable=True,
signature="tokens",
output_key=train_param.elmo_output)
def elmo_embedding(x):
tokens, sequence_len = x
return elmo(
inputs={
'tokens': tf.squeeze(tf.cast(tokens, tf.string)),
'sequence_len': sequence_len
}
)[train_param.elmo_output]
self.embedding = Lambda(elmo_embedding, output_shape=(train_param.max_word, train_param.emb_dim),
name='word_embedding')
'''
else:
self.embedding = Embedding(len(vocab.token), train_param.emb_dim, name='word_embedding')
self.pos_embedding = Embedding(len(vocab.pos), train_param.pos_emb_dim, name='pos_embedding')
self.char_embedding = Embedding(len(vocab.char), train_param.char_emb_dim,
input_length=(train_param.max_word, train_param.max_char,),
name='char_embedding')
if train_param.char_embedding_type == 'lstm':
self.char_lstm = TimeDistributed(Bidirectional(LSTM(
units=train_param.char_lstm_units,
recurrent_dropout=train_param.char_lstm_recurrent_dropout,
dropout=train_param.char_lstm_dropout,
), name='char_blstm'))
else:
self.char_cnn = TimeDistributed(
Conv1D(train_param.char_cnn_filter_num, train_param.char_cnn_window_size, padding='same'),
name="char_cnn")
self.char_max_pool = TimeDistributed(GlobalMaxPooling1D(), name="char_pooling")
self.char_dropout = tf.keras.layers.Dropout(train_param.char_dropout)
self.token_dropout = tf.keras.layers.Dropout(train_param.token_dropout)
self.pos_dropout = tf.keras.layers.Dropout(train_param.pos_dropout)
label_size = len(vocab.label)
self.bilstm_layers = []
for idx in range(train_param.lstm_layer_size):
self.bilstm_layers.append(Bidirectional(LSTM(
units=train_param.lstm_units,
recurrent_dropout=train_param.lstm_recurrent_dropout,
dropout=train_param.lstm_dropout,
return_sequences=True
), name=f"blstm-{idx}"))
self.dense = tf.keras.layers.Dense(label_size, name='logits')
self.transition_params = tf.Variable(tf.random.uniform(shape=(label_size, label_size)))
def load_embedding_weights(self, wv_path: str, vocab: Vocabulary):
word_vectors = {}
with open(wv_path) as f:
for line in f.readlines()[1:]:
line = line.split()
word, vector = line[0], np.array(line[1:], dtype='float32')
word_vectors[word] = vector
self.embedding.build((None,))
matrix = self.embedding.get_weights()[0]
oov_count = 0
for word, idx in vocab.token.items():
target = word.lower() if self.train_param.do_lowercase else word
if target in word_vectors:
matrix[idx] = word_vectors[target]
else:
oov_count += 1
print("wv vector oov rate: {}".format(oov_count / len(vocab.token)))
self.embedding.set_weights([matrix])
@tf.function(experimental_relax_shapes=True)
def call(self, inputs: Dict[str, Any], labels=None, training=None):
# token
if self.train_param.use_elmo:
# tf2 not support tf-hub version elmo
'''
token_embedding = self.embedding([inputs['raw_token'], inputs['sequence_lengths']])
'''
token_embedding = inputs['token']
else:
token_embedding = self.embedding(inputs['token'])
token_embedding = self.token_dropout(token_embedding, training=training)
# pos
pos_embedding = self.pos_embedding(inputs['pos'])
pos_embedding = self.pos_dropout(pos_embedding, training=training)
# char
char_embedding = self.char_embedding(inputs['char'])
if self.train_param.char_embedding_type == 'lstm':
char_embedding = self.char_lstm(char_embedding, training=training)
else:
char_embedding = self.char_cnn(char_embedding)
char_embedding = self.char_max_pool(char_embedding)
char_embedding = self.char_dropout(char_embedding, training=training)
feature = tf.concat([token_embedding, char_embedding, pos_embedding], axis=-1)
immediate_feature = []
for layer in self.bilstm_layers:
immediate_feature.append(layer(feature, training=training))
feature = immediate_feature[-1]
if len(self.bilstm_layers) > 1:
feature = add(immediate_feature) # residual
logits = self.dense(feature)
if labels is not None:
log_likelihood, self.transition_params = tf_ad.text.crf_log_likelihood(logits,
labels,
inputs['sequence_lengths'],
transition_params=self.transition_params)
return logits, log_likelihood
else:
return logits
|
the-stack_106_22611 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from email.headerregistry import Address
from celery.schedules import crontab
from first import first
from warehouse import tasks
from warehouse.accounts.interfaces import ITokenService, IUserService
from warehouse.accounts.models import Email
from warehouse.email.interfaces import IEmailSender
from warehouse.email.services import EmailMessage
from warehouse.email.ses.tasks import cleanup as ses_cleanup
def _compute_recipient(user, email):
# We want to try and use the user's name, then their username, and finally
# nothing to display a "Friendly" name for the recipient.
return str(Address(first([user.name, user.username], default=""), addr_spec=email))
@tasks.task(bind=True, ignore_result=True, acks_late=True)
def send_email(task, request, recipient, msg, success_event):
msg = EmailMessage(**msg)
sender = request.find_service(IEmailSender)
try:
sender.send(recipient, msg)
user_service = request.find_service(IUserService, context=None)
user_service.record_event(**success_event)
except Exception as exc:
task.retry(exc=exc)
def _send_email_to_user(request, user, msg, *, email=None, allow_unverified=False):
# If we were not given a specific email object, then we'll default to using
# the User's primary email address.
if email is None:
email = user.primary_email
# If we were not able to locate an email address for this user, then we will just
# have to skip sending email to them. If we have an email for them, then we will
# check to see if it is verified, if it is not then we will also skip sending email
# to them **UNLESS** we've been told to allow unverified emails.
if email is None or not (email.verified or allow_unverified):
return
# We should only store/display IP address of an 'email sent' event if the user
# who triggered the email event is the one who receives the email. Else display
# 'Redacted' to prevent user privacy concerns. If we don't know the user who
# triggered the action, default to showing the IP of the source.
user_email = request.db.query(Email).filter(Email.email == email.email).one()
redact_ip = user_email.user_id != request.user.id if request.user else False
request.task(send_email).delay(
_compute_recipient(user, email.email),
{
"subject": msg.subject,
"body_text": msg.body_text,
"body_html": msg.body_html,
},
{
"tag": "account:email:sent",
"user_id": user.id,
"ip_address": request.remote_addr,
"additional": {
"from_": request.registry.settings.get("mail.sender"),
"to": email.email,
"subject": msg.subject,
"redact_ip": redact_ip,
},
},
)
def _email(name, *, allow_unverified=False):
"""
This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object.
"""
def inner(fn):
@functools.wraps(fn)
def wrapper(request, user_or_users, **kwargs):
if isinstance(user_or_users, (list, set)):
recipients = user_or_users
else:
recipients = [user_or_users]
context = fn(request, user_or_users, **kwargs)
msg = EmailMessage.from_template(name, context, request=request)
for recipient in recipients:
if isinstance(recipient, tuple):
user, email = recipient
else:
user, email = recipient, None
_send_email_to_user(
request, user, msg, email=email, allow_unverified=allow_unverified
)
return context
return wrapper
return inner
@_email("password-reset", allow_unverified=True)
def send_password_reset_email(request, user_and_email):
user, _ = user_and_email
token_service = request.find_service(ITokenService, name="password")
token = token_service.dumps(
{
"action": "password-reset",
"user.id": str(user.id),
"user.last_login": str(user.last_login),
"user.password_date": str(user.password_date),
}
)
return {
"token": token,
"username": user.username,
"n_hours": token_service.max_age // 60 // 60,
}
@_email("verify-email", allow_unverified=True)
def send_email_verification_email(request, user_and_email):
user, email = user_and_email
token_service = request.find_service(ITokenService, name="email")
token = token_service.dumps({"action": "email-verify", "email.id": email.id})
return {
"token": token,
"email_address": email.email,
"n_hours": token_service.max_age // 60 // 60,
}
@_email("password-change")
def send_password_change_email(request, user):
return {"username": user.username}
@_email("password-compromised", allow_unverified=True)
def send_password_compromised_email(request, user):
return {}
@_email("password-compromised-hibp", allow_unverified=True)
def send_password_compromised_email_hibp(request, user):
return {}
@_email("account-deleted")
def send_account_deletion_email(request, user):
return {"username": user.username}
@_email("primary-email-change")
def send_primary_email_change_email(request, user_and_email):
user, email = user_and_email
return {
"username": user.username,
"old_email": email.email,
"new_email": user.email,
}
@_email("collaborator-added")
def send_collaborator_added_email(
request, email_recipients, *, user, submitter, project_name, role
):
return {
"username": user.username,
"project": project_name,
"submitter": submitter.username,
"role": role,
}
@_email("verify-project-role", allow_unverified=True)
def send_project_role_verification_email(
request,
user,
desired_role,
initiator_username,
project_name,
email_token,
token_age,
):
return {
"desired_role": desired_role,
"email_address": user.email,
"initiator_username": initiator_username,
"n_hours": token_age // 60 // 60,
"project_name": project_name,
"token": email_token,
}
@_email("added-as-collaborator")
def send_added_as_collaborator_email(request, user, *, submitter, project_name, role):
return {"project": project_name, "submitter": submitter.username, "role": role}
@_email("collaborator-removed")
def send_collaborator_removed_email(
request, email_recipients, *, user, submitter, project_name
):
return {
"username": user.username,
"project": project_name,
"submitter": submitter.username,
}
@_email("removed-as-collaborator")
def send_removed_as_collaborator_email(request, user, *, submitter, project_name):
return {
"project": project_name,
"submitter": submitter.username,
}
@_email("collaborator-role-changed")
def send_collaborator_role_changed_email(
request, recipients, *, user, submitter, project_name, role
):
return {
"username": user.username,
"project": project_name,
"submitter": submitter.username,
"role": role,
}
@_email("role-changed-as-collaborator")
def send_role_changed_as_collaborator_email(
request, user, *, submitter, project_name, role
):
return {
"project": project_name,
"submitter": submitter.username,
"role": role,
}
@_email("two-factor-added")
def send_two_factor_added_email(request, user, method):
pretty_methods = {"totp": "TOTP", "webauthn": "WebAuthn"}
return {"method": pretty_methods[method], "username": user.username}
@_email("two-factor-removed")
def send_two_factor_removed_email(request, user, method):
pretty_methods = {"totp": "TOTP", "webauthn": "WebAuthn"}
return {"method": pretty_methods[method], "username": user.username}
@_email("removed-project")
def send_removed_project_email(
request, user, *, project_name, submitter_name, submitter_role, recipient_role
):
recipient_role_descr = "an owner"
if recipient_role == "Maintainer":
recipient_role_descr = "a maintainer"
return {
"project_name": project_name,
"submitter_name": submitter_name,
"submitter_role": submitter_role.lower(),
"recipient_role_descr": recipient_role_descr,
}
@_email("yanked-project-release")
def send_yanked_project_release_email(
request, user, *, release, submitter_name, submitter_role, recipient_role
):
recipient_role_descr = "an owner"
if recipient_role == "Maintainer":
recipient_role_descr = "a maintainer"
return {
"project": release.project.name,
"release": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter": submitter_name,
"submitter_role": submitter_role.lower(),
"recipient_role_descr": recipient_role_descr,
"yanked_reason": release.yanked_reason,
}
@_email("unyanked-project-release")
def send_unyanked_project_release_email(
request, user, *, release, submitter_name, submitter_role, recipient_role
):
recipient_role_descr = "an owner"
if recipient_role == "Maintainer":
recipient_role_descr = "a maintainer"
return {
"project": release.project.name,
"release": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter": submitter_name,
"submitter_role": submitter_role.lower(),
"recipient_role_descr": recipient_role_descr,
}
@_email("removed-project-release")
def send_removed_project_release_email(
request, user, *, release, submitter_name, submitter_role, recipient_role
):
recipient_role_descr = "an owner"
if recipient_role == "Maintainer":
recipient_role_descr = "a maintainer"
return {
"project_name": release.project.name,
"release_version": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter_name": submitter_name,
"submitter_role": submitter_role.lower(),
"recipient_role_descr": recipient_role_descr,
}
@_email("removed-project-release-file")
def send_removed_project_release_file_email(
request, user, *, file, release, submitter_name, submitter_role, recipient_role
):
recipient_role_descr = "an owner"
if recipient_role == "Maintainer":
recipient_role_descr = "a maintainer"
return {
"file": file,
"project_name": release.project.name,
"release_version": release.version,
"submitter_name": submitter_name,
"submitter_role": submitter_role.lower(),
"recipient_role_descr": recipient_role_descr,
}
def includeme(config):
email_sending_class = config.maybe_dotted(config.registry.settings["mail.backend"])
config.register_service_factory(email_sending_class.create_service, IEmailSender)
# Add a periodic task to cleanup our EmailMessage table. We're going to
# do this cleanup, regardless of if we're configured to use SES to send
# or not, because even if we stop using SES, we'll want to remove any
# emails that had been sent, and the cost of doing this is very low.
config.add_periodic_task(crontab(minute=0, hour=0), ses_cleanup)
|
the-stack_106_22613 | from src.pipeline_constructor.core.PipelineModel import PipelineModel
from src.pipeline_constructor.core.Model3D import Model3D
from src.graph_creator.core.PipelineGraph import PipelineGraph, PipelinePart
from src.core.PartType import PartType
import open3d as o3d
import numpy as np
class PipelineConstructor:
def __init__(self):
self._part_dictionary = {part_type: part_type.part_model_file() for part_type in PartType}
def construct_pipeline(self, pipeline: PipelineGraph) -> PipelineModel:
graph = pipeline.graph
model = PipelineModel()
for node in graph.nodes:
part_type = graph.nodes[node]['type']
coordinates = graph.nodes[node]['coordinates']
direction = graph.nodes[node]['direction']
part = Model3D(self.create_mesh(part_type, coordinates, direction))
model.add_element(part)
model.compute_normals()
return model
def create_mesh_from_part(self, part: PipelinePart) -> o3d.geometry.TriangleMesh:
return self.create_mesh(part.part_type, part.coordinates, part.direction)
def create_mesh(self, part_type: PartType, coordinates: np.ndarray,
direction: np.ndarray) -> o3d.geometry.TriangleMesh:
mesh = o3d.io.read_triangle_mesh(str(self._part_dictionary[part_type]))
rotation = o3d.geometry.get_rotation_matrix_from_xyz(direction)
mesh.translate([0., 0., 0.], relative=True)
mesh.rotate(rotation, center=[0., 0., 0.])
mesh.translate(coordinates, relative=True)
return mesh
|
the-stack_106_22614 | import view.ui.configuration as configuration
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import pyqtSlot
from model.config import Config
class Configuration(QtWidgets.QWidget):
def __init__(self, window):
super().__init__()
self.MainWindow = window
ui = configuration.Ui_Form()
ui.setupUi(self)
self.findChild(QtWidgets.QPushButton, "saveButton").clicked.connect(self.configSave)
self.findChild(QtWidgets.QLineEdit, "threadcountLineEdit").setValidator(QtGui.QIntValidator(1, 100))
self.findChild(QtWidgets.QLineEdit, "plasterintervalLineEdit").setValidator(QtGui.QDoubleValidator(1, 100, 3))
@pyqtSlot()
def configSave(self):
threadCount = int(self.findChild(QtWidgets.QLineEdit, "threadcountLineEdit").text())
if threadCount == 0:
self.findChild(QtWidgets.QLineEdit, "threadcountLineEdit").setText("1")
threadCount = 1
Config.All.threadCount = threadCount
Config.Delete.printIdFlag = self.findChild(QtWidgets.QCheckBox, "printidCheckBox").isChecked()
Config.Delete.printTextFlag = self.findChild(QtWidgets.QCheckBox, "printtextCheckBox").isChecked()
Config.Delete.printOriginFlag = self.findChild(QtWidgets.QCheckBox, "printoriginCheckBox").isChecked()
Config.Search.printBoardSearchEndFlag = self.findChild(QtWidgets.QCheckBox, "printboardsearchendCheckBox").isChecked()
plasterInterval = int(self.findChild(QtWidgets.QLineEdit, "plasterintervalLineEdit").text())
if plasterInterval == 0:
self.MainWindow.messageDialog("failed", "도배 간격은 0초가 될 수 없습니다")
return
else:
Config.Plaster.plasterInterval = plasterInterval
Config.Plaster.printPlasterFlag = self.findChild(QtWidgets.QCheckBox, "printplasterCheckBox").isChecked()
self.MainWindow.messageDialog("save", "저장되었습니다") |
the-stack_106_22615 | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-09-12 13:44:24 +0200 (Mon, 12 Sep 2016)
#
# https://github.com/harisekhon/devops-python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
# NOTE: 'flush' is not supported in Happybase as it's not in the Thrift API,
# only available in the Java API, see HBaseAdmin.flush(table)
# I'll write a JVM native alternative to this later as I needed this quickly
"""
Tool to iterate on and flush all HBase tables
Written for flushing bulk imports skipping the WAL, for example OpenTSDB bulk import.
The Thrift API doesn't support this action so it uses the HBase shell locally which must be in the $PATH
There is also a shell script version of this in the adjacent DevOps-Perl-Tools repo
Tested on Hortonworks HDP 2.3 (HBase 1.1.2) and Apache HBase 1.0.3, 1.1.6, 1.2.1, 1.2.2, 1.3.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import logging
import os
import re
import sys
import traceback
import subprocess
PIPE = subprocess.PIPE
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pylib'))
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, die, autoflush
from harisekhon.utils import validate_regex
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.1'
class HBaseFlushTables(CLI):
def __init__(self):
# Python 2.x
super(HBaseFlushTables, self).__init__()
# Python 3.x
# super().__init__()
self.table_list_header_regex = re.compile('TABLE')
self.table_list_end_regex = re.compile(r'row.*\sin\s.*\sseconds')
self.table_regex = None
self.timeout_default = 6 * 3600
autoflush()
def add_options(self):
self.add_opt('-r', '--regex', help='Regex of tables to flush')
self.add_opt('-l', '--list-tables', action='store_true', help='List tables and exit')
def process_args(self):
log.setLevel(logging.INFO)
self.no_args()
regex = self.get_opt('regex')
if regex:
validate_regex(regex)
self.table_regex = re.compile(regex, re.I)
log.info('filtering to flush only tables matching regex \'{0}\''.format(regex))
def get_tables(self):
log.info('getting table list')
try:
process = subprocess.Popen(['hbase', 'shell'], stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT)
(stdout, _) = process.communicate('list')
process.wait()
if process.returncode != 0:
print('ERROR:', end='')
die(stdout)
lines = stdout.split('\n')
lineno = 1
for line in lines:
if self.table_list_header_regex.search(line):
break
lineno += 1
if lineno > len(lines):
die("Failed to parse table list output (couldn't find the starting line TABLE)")
tables = set()
for line in lines[lineno:]:
if self.table_list_end_regex.search(line):
break
line = line.strip()
if not line:
continue
tables.add(line)
return tables
except OSError as _:
die("OSError running hbase shell to list tables: {0}".format(_))
except subprocess.CalledProcessError as _:
print('Failed to get tables using HBase shell:\n')
print(_.output)
sys.exit(_.returncode)
def run(self):
tables = self.get_tables()
if not tables:
die('No Tables Found')
if self.get_opt('list_tables'):
print('Tables:\n\n' + '\n'.join(tables))
sys.exit(3)
tables_to_flush = set()
if self.table_regex:
log.info('filtering tables based on regex')
for table in sorted(list(tables)):
if self.table_regex.search(table):
tables_to_flush.add(table)
else:
tables_to_flush = sorted(list(tables))
if log.isEnabledFor(logging.INFO):
log.info('Flushing tables:\n\n%s\n', '\n'.join(tables_to_flush))
flush_commands = '\n'.join(["flush '{0}'".format(table) for table in tables_to_flush])
try:
# by having stdout and stderr go to the same place more likely the output will be in a sane order
process = subprocess.Popen(['hbase', 'shell'], stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT)
(stdout, _) = process.communicate(input=flush_commands)
process.wait()
if process.returncode != 0:
print('ERROR:', end='')
die(stdout)
print(stdout)
except OSError as _:
die("OSError running hbase shell to flush tables: {0}".format(_))
except subprocess.CalledProcessError as _:
print('Failed to get tables using HBase shell:\n')
print(_.output)
sys.exit(_.returncode)
if __name__ == '__main__':
HBaseFlushTables().main()
|
the-stack_106_22616 | import os
import yaml
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivymd.app import MDApp
from gui.core import Navigation
from core.connectors import HomeAssistant
from core.platform import Shpi
if os.path.exists('config.yaml'):
config_file = 'config.yaml'
else:
config_file = 'config.example.yaml'
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
class MainScreen(FloatLayout):
def __init__(self, config, **kwargs):
super(MainScreen, self).__init__(**kwargs)
self.app = App.get_running_app()
self.navigation = Navigation(config['rooms'])
self.size = Window.size
self.backlight_trigger = Clock.schedule_once(self.turn_display_off, 5)
self.backlight = True
self.add_widget(self.navigation)
def on_touch_down(self, touch):
if not self.backlight:
print(touch)
self.turn_display_on()
print(self.backlight)
Clock.unschedule(self.backlight_trigger)
self.backlight_trigger = Clock.schedule_once(self.turn_display_off, 5)
return super().on_touch_down(touch)
def turn_display_off(self, dt):
print('display is off')
self.backlight = False
if self.app.platform:
self.app.platform.turn_display_off()
def turn_display_on(self):
print('display is on')
self.backlight = True
if self.app.platform:
self.app.platform.turn_display_on()
class MainApp(MDApp):
def __init__(self, **kwargs):
self.register_event_type('on_state_changed')
# self.theme_cls.theme_style = "Dark"
# self.theme_cls.primary_palette = "Gray"
super().__init__(**kwargs)
self.config = config
self.connectors = {}
self.platform = None
if 'homeassistant' in config:
self.connectors['homeassistant'] = HomeAssistant(self, self.config['homeassistant'])
if 'shpi' in config:
self.platform = Shpi(self, self.config['shpi'])
def build(self):
self.root = MainScreen(config)
Window.size = (800, 480)
for connector in self.connectors:
self.connectors[connector].connect_to_server()
return self.root
def change_state(self, connector, entity_id, state):
if connector in self.connectors:
self.connectors[connector].change_state(entity_id, state)
def change_temperature(self, connector, entity_id, temperature):
if connector in self.connectors:
self.connectors[connector].change_temperature(entity_id, temperature)
def on_state_changed(self, *args, **kwargs):
pass
if __name__ == "__main__":
MainApp().run()
|
the-stack_106_22617 | #!/usr/bin/env python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the s-crow data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/s-CROW/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "s-CROW")
return os.path.expanduser("~/.s-crow")
def read_bitcoin_config(dbdir):
"""Read the s-crow.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "s-crow.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a s-crow JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19712 if testnet else 9712
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get s-crows from")
parser.add_option("--to", dest="to", default=None,
help="address to get send s-crows to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of s-crow.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_22628 | import sys , random
from functions import split , listToString
NumberOfWrites = 0
def Main():
argv1 = str(sys.argv[1])
if argv1 == None:
print("Error - No argument Email Provided")
Email = split(argv1)
numOfElementInList = len(Email)
RandomNumberGenerator = random.randint(1, (numOfElementInList-1))
Email.insert(RandomNumberGenerator,".")
Email = listToString(list=Email)
with open ("output.env", "r") as fileRead:
data=fileRead.read()
with open ("output.env", 'a') as fileWrite:
if Email in data:
print("End")
else:
fileWrite.write(Email)
fileWrite.write("@gmail.com")
fileWrite.write("\n")
Main()
Main()
|
the-stack_106_22629 | import os
from PIL import Image
# Fill these variable before running this tool
image_path = r"./crops.png"
image_id = 7
image = Image.open(image_path)
os.mkdir(r'./output_images')
if image.width % 16 != 0 or image.height % 16 != 0:
print('Error: image could not be cropped to 16x16 files exactly.')
for i in range(0, image.width // 16):
for j in range(0, image.height // 16):
cropped_image = image.crop((i * 16, j * 16, i * 16 + 16, j * 16 + 16))
all_blank = True
for x in range(0, 16):
for y in range(0, 16):
if cropped_image.load()[x, y] != (0, 0, 0, 0):
all_blank = False
# Only blocks with images could be saved into files.
if not all_blank:
cropped_image.save(open(r'./output_images/' + str(image_id) + '_' + str(i) + '_' + str(j) + '.png', 'wb'))
pixels = image.load()
for i in range(0, image.width // 16):
for j in range(0, image.height // 16):
# Draw this block
for x in range(0, 16):
pixels[i * 16 + 15, j * 16 + x] = (255, 255, 255, 255)
pixels[i * 16 + x, j * 16 + 15] = (255, 255, 255, 255)
# Draw number for this block
image.save(open(r'proc_crops_tiles.png', 'wb'))
|
the-stack_106_22630 | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
from unittest import mock
import pytest
from click.testing import Result
from vdk.plugin.snowflake import snowflake_plugin
from vdk.plugin.snowflake.snowflake_connection import SnowflakeConnection
from vdk.plugin.test_utils.util_funcs import cli_assert_equal
from vdk.plugin.test_utils.util_funcs import CliEntryBasedTestRunner
@pytest.fixture
def mocked_connection(monkeypatch):
def mock_execute_query(*args, **kwargs):
return [["Query successfully executed."]]
monkeypatch.delattr(
"vdk.plugin.snowflake.snowflake_connection.SnowflakeConnection._connect"
)
monkeypatch.setattr(SnowflakeConnection, "execute_query", mock_execute_query)
def test_snowflake_plugin(mocked_connection):
"""
Test if the configuration of the Snowflake plugin
and its general setup work as expected.
"""
with mock.patch.dict(
os.environ,
{
"VDK_DB_DEFAULT_TYPE": "SNOWFLAKE",
"VDK_SNOWFLAKE_ACCOUNT": "testaccount",
"VDK_SNOWFLAKE_USER": "testuser",
"VDK_SNOWFLAKE_PASSWORD": "testpassword",
},
):
runner = CliEntryBasedTestRunner(snowflake_plugin)
query_result: Result = runner.invoke(
["snowflake-query", "--query", f"SELECT 1"]
)
cli_assert_equal(0, query_result)
assert "Query successfully executed." in query_result.output
|
the-stack_106_22631 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/perf/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
USE_PYTHON3 = True
def _CommonChecks(input_api, output_api, block_on_failure=False):
"""Performs common checks, which includes running pylint.
block_on_failure: For some failures, we would like to warn the
user but still allow them to upload the change. However, we
don't want them to commit code with those failures, so we
need to block the change on commit.
"""
results = []
results.extend(_CheckExpectations(input_api, output_api))
results.extend(_CheckJson(input_api, output_api))
results.extend(
_CheckPerfDataCurrentness(input_api, output_api, block_on_failure))
results.extend(
_CheckPerfJsonConfigs(input_api, output_api, block_on_failure))
results.extend(_CheckWprShaFiles(input_api, output_api))
results.extend(_CheckShardMaps(input_api, output_api, block_on_failure))
results.extend(_CheckVersionsInSmokeTests(input_api, output_api))
results.extend(input_api.RunTests(input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
pylintrc='pylintrc')))
return results
def _GetPathsToPrepend(input_api):
perf_dir = input_api.PresubmitLocalPath()
chromium_src_dir = input_api.os_path.join(perf_dir, '..', '..')
telemetry_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'telemetry')
typ_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'third_party', 'typ')
experimental_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'experimental')
tracing_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'tracing')
py_utils_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'common', 'py_utils')
android_pylib_dir = input_api.os_path.join(
chromium_src_dir, 'build', 'android')
testing_dir = input_api.os_path.join(chromium_src_dir, 'testing')
return [
telemetry_dir,
typ_dir,
input_api.os_path.join(telemetry_dir, 'third_party', 'mock'),
experimental_dir,
tracing_dir,
py_utils_dir,
android_pylib_dir,
testing_dir,
]
def _RunArgs(args, input_api):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT)
out, _ = p.communicate()
return (out, p.returncode)
def _RunValidationScript(
input_api,
output_api,
script_path,
extra_args = None,
block_on_failure = None):
results = []
vpython = 'vpython3.bat' if input_api.is_windows else 'vpython3'
perf_dir = input_api.PresubmitLocalPath()
script_abs_path = input_api.os_path.join(perf_dir, script_path)
extra_args = extra_args if extra_args else []
args = [vpython, script_abs_path] + extra_args
out, return_code = _RunArgs(args, input_api)
if return_code:
error_msg = 'Script ' + script_path + ' failed.'
if block_on_failure is None or block_on_failure:
results.append(output_api.PresubmitError(
error_msg, long_text=out))
else:
results.append(output_api.PresubmitPromptWarning(
error_msg, long_text=out))
return results
def _CheckExpectations(input_api, output_api):
return _RunValidationScript(
input_api,
output_api,
'validate_story_expectation_data',
)
def _CheckPerfDataCurrentness(input_api, output_api, block_on_failure):
return _RunValidationScript(
input_api,
output_api,
'generate_perf_data',
['--validate-only'],
block_on_failure
)
def _CheckPerfJsonConfigs(input_api, output_api, block_on_failure):
return _RunValidationScript(
input_api,
output_api,
'validate_perf_json_config',
['--validate-only'],
block_on_failure
)
def _CheckWprShaFiles(input_api, output_api):
"""Check whether the wpr sha files have matching URLs."""
wpr_archive_shas = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if not filename.endswith('.sha1'):
continue
wpr_archive_shas.append(filename)
return _RunValidationScript(
input_api,
output_api,
'validate_wpr_archives',
wpr_archive_shas
)
def _CheckShardMaps(input_api, output_api, block_on_failure):
return _RunValidationScript(
input_api,
output_api,
'generate_perf_sharding.py',
['validate'],
block_on_failure
)
def _CheckJson(input_api, output_api):
"""Checks whether JSON files in this change can be parsed."""
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if os.path.splitext(filename)[1] != '.json':
continue
try:
input_api.json.load(open(filename))
except ValueError:
return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]
return []
def _CheckVersionsInSmokeTests(input_api, output_api):
return _RunValidationScript(
input_api,
output_api,
input_api.os_path.join(
'benchmarks', 'system_health_load_tests_smoke_test.py'),
)
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api, block_on_failure=True))
return report
|
the-stack_106_22632 | #!/usr/bin/env python
import sys
import os
import threading
from subprocess import call
import argparse
import gzip
## code adapted from PrepareAA.py commit 38e758c19776f02cd0e920bde2513536cccc489f
## https://github.com/jluebeck/PrepareAA
# Read the CNVkit .cns files
def convert_cnvkit_cnv_to_seeds(cnvkit_output_directory, bam):
base = os.path.splitext(os.path.basename(bam))[0]
with open(cnvkit_output_directory + base + ".segment.cns") as infile, open(cnvkit_output_directory + base + "_CNV_GAIN.bed",
'w') as outfile:
head = next(infile).rstrip().rsplit("\t")
for line in infile:
fields = line.rstrip().rsplit("\t")
s, e = int(fields[1]), int(fields[2])
cn_r = float(fields[4])
cn = 2 ** (cn_r + 1)
if cn >= args.cngain and e - s >= args.cnsize_min:
outline = "\t".join(fields[0:3] + ["CNVkit", str(cn)]) + "\n"
outfile.write(outline)
return cnvkit_output_directory + base + "_CNV_GAIN.bed"
# MAIN #
if __name__ == '__main__':
# Parses the command line arguments
parser = argparse.ArgumentParser(
description="A simple pipeline wrapper for AmpliconArchitect, invoking alignment, variant calling, "
"and CNV calling prior to AA. The CNV calling is necesary for running AA")
#parser.add_argument("-s", "--sample_name", help="sample name", required=True)
parser.add_argument("-o", "--output_directory", help="output directory names (will create if not already created)", required=True)
parser.add_argument("--sorted_bam", help="Sorted BAM file (aligned to an AA-supported reference.)", required=True)
parser.add_argument("--cngain", type=float, help="CN gain threshold to consider for AA seeding", default=4.5)
parser.add_argument("--cnsize_min", type=int, help="CN interval size (in bp) to consider for AA seeding",
default=50000)
args = parser.parse_args()
args.cnv_bed = convert_cnvkit_cnv_to_seeds(args.output_directory + "/", args.sorted_bam)
|
the-stack_106_22634 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
import llnl.util.tty as tty
import spack.compiler
import spack.util.executable
class Fj(Package):
"""The Fujitsu compiler system is a high performance, production quality
code generation tool designed for high performance parallel
computing workloads.
"""
homepage = "https://www.fujitsu.com/us/"
maintainers = ['t-karatsu']
def install(self, spec, prefix):
raise InstallError(
'Fujitsu compilers are not installable yet, but can be '
'detected on a system where they are supplied by vendor'
)
executables = ['^fcc', '^FCC', '^frt']
@classmethod
def determine_version(cls, exe):
version_regex = re.compile(r'\((?:FCC|FRT)\) ([a-z\d.]+)')
try:
output = spack.compiler.get_compiler_version_output(
exe, '--version'
)
match = version_regex.search(output)
if match:
return match.group(1)
except spack.util.executable.ProcessError:
pass
except Exception as e:
tty.debug(e)
@classmethod
def determine_variants(cls, exes, version_str):
compilers = {}
for exe in exes:
if 'fcc' in exe:
compilers['c'] = exe
if 'FCC' in exe:
compilers['cxx'] = exe
if 'frt' in exe:
compilers['fortran'] = exe
return '', {'compilers': compilers}
|
the-stack_106_22635 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue698-base", "issue698-v1"]
CONFIGS = [
IssueConfig(
"blind",
["--search", "astar(blind())"],
driver_options=["--search-time-limit", "60s"]
)
]
sys.path.append(BENCHMARKS_DIR)
import suites
SUITE = suites.suite_optimal_strips()
ENVIRONMENT = MaiaEnvironment(
priority=0, email="[email protected]")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command("parser", ["custom-parser.py"])
exp.add_comparison_table_step(
attributes=exp.DEFAULT_TABLE_ATTRIBUTES +
["successor_generator_time", "reopened_until_last_jump"])
exp.add_scatter_plot_step(attributes=["successor_generator_time"])
exp()
|
the-stack_106_22636 | from consoleme.config import config
from consoleme.handlers.base import BaseAPIV1Handler
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.auth import (
can_admin_policies,
can_create_roles,
can_delete_roles,
can_edit_dynamic_config,
)
from consoleme.lib.generic import get_random_security_logo, is_in_group
from consoleme.lib.plugins import get_plugin_by_name
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
log = config.get_logger()
class UserProfileHandler(BaseAPIV1Handler):
async def get(self):
"""
Provide information about site configuration for the frontend
:return:
"""
is_contractor = config.config_plugin().is_contractor(self.user)
site_config = {
"consoleme_logo": await get_random_security_logo(),
"google_tracking_uri": config.get("google_analytics.tracking_url"),
"documentation_url": config.get("documentation_page"),
"support_contact": config.get("support_contact"),
"support_chat_url": config.get("support_chat_url"),
"security_logo": config.get("security_logo.image"),
"security_url": config.get("security_logo.url"),
}
user_profile = {
"site_config": site_config,
"user": self.user,
"can_logout": config.get("auth.set_auth_cookie"),
"is_contractor": is_contractor,
"employee_photo_url": config.config_plugin().get_employee_photo_url(
self.user
),
"employee_info_url": config.config_plugin().get_employee_info_url(
self.user
),
"authorization": {
"can_edit_policies": can_admin_policies(self.user, self.groups),
"can_create_roles": can_create_roles(self.user, self.groups),
"can_delete_roles": can_delete_roles(self.user, self.groups),
},
"pages": {
"header": {
"custom_header_message_title": config.get(
"headers.custom_header_message.title", ""
),
"custom_header_message_text": config.get(
"headers.custom_header_message.text", ""
),
},
"groups": {
"enabled": config.get("headers.group_access.enabled", False)
},
"users": {"enabled": config.get("headers.group_access.enabled", False)},
"policies": {
"enabled": config.get("headers.policies.enabled", True)
and not is_contractor
},
"self_service": {
"enabled": config.get("enable_self_service", True)
and not is_contractor
},
"api_health": {
"enabled": is_in_group(
self.user,
self.groups,
config.get("groups.can_edit_health_alert", []),
)
},
"audit": {
"enabled": is_in_group(
self.user, self.groups, config.get("groups.can_audit", [])
)
},
"config": {"enabled": can_edit_dynamic_config(self.user, self.groups)},
},
"accounts": await get_account_id_to_name_mapping(),
}
self.set_header("Content-Type", "application/json")
self.write(user_profile)
|
the-stack_106_22637 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For storing and accessing flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import json
import absl.flags
absl.flags.DEFINE_string("nn_flags", None, "Flags dict as b64-encoded JSON")
class Flags(dict):
"""For storing and accessing flags."""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def print_values(self, indent=1):
"""Print the values in this flags object."""
for k, v in sorted(self.items()):
if isinstance(v, Flags):
print("{}{}:".format("\t" * indent, k))
v.print_values(indent=indent + 1)
else:
print("{}{}: {}".format("\t" * indent, k, v))
def load(self, other):
"""Recursively copy values from another flags object into this one."""
def recursive_update(flags, dict_):
for k in dict_:
if isinstance(dict_[k], dict):
flags[k] = Flags()
recursive_update(flags[k], dict_[k])
else:
flags[k] = dict_[k]
recursive_update(self, other)
def load_json(self, json_):
"""Copy values from a JSON string into this flags object."""
if json_.startswith("b64"):
json_ = base64.b64decode(json_[3:])
other = json.loads(json_)
self.load(other)
def load_from_cmdline(self):
self.load_json(absl.flags.FLAGS.nn_flags)
def set_if_empty(self, key, val):
"""If there's no current value for the given key, assign the given value."""
if key not in self:
self[key] = val
|
the-stack_106_22638 | #!/bin/python
# -*- coding: utf-8 -*-
"""contains functions related to (re)compiling the model with different parameters
"""
import numpy as np
import time
from .stats import post_mean
def posterior_sampler(self, nsamples, seed=0, verbose=True):
"""Draw parameters from the posterior.
Parameters
----------
nsamples : int
Size of the sample
Returns
-------
array
Numpy array of parameters
"""
import random
random.seed(seed)
sample = self.get_chain()[-self.get_tune:]
sample = sample.reshape(-1, sample.shape[(-1)])
sample = random.choices(sample, k=nsamples)
return sample
def sample_box(self, dim0, dim1=None, bounds=None, lp_rule=None, verbose=False):
"""Sample from a hypercube
"""
import chaospy
bnd = bounds or np.array(self.fdict['prior_bounds'])
dim1 = dim1 or self.ndim
rule = lp_rule or 'S'
res = chaospy.Uniform(0, 1).sample(size=(dim0, dim1), rule=rule)
res = (bnd[1] - bnd[0]) * res + bnd[0]
return res
def prior_sampler(self, nsamples, seed=0, test_lprob=False, lks=None, verbose=True, debug=False, **args):
"""Draw parameters from prior.
Parameters
----------
nsamples : int
Size of the prior sample
seed : int, optional
Set the random seed (0 by default)
test_lprob : bool, optional
Whether to ensure that drawn parameters have a finite likelihood (False by default)
verbose : bool, optional
debug : bool, optional
Returns
-------
array
Numpy array of parameters
"""
import tqdm
from grgrlib.core import map2arr
from grgrlib.multiprocessing import serializer
l_max, k_max = lks or (None, None)
if test_lprob and not hasattr(self, 'ndim'):
self.prep_estim(load_R=True, verbose=verbose > 2)
frozen_prior = self.fdict.get('frozen_prior')
if not np.any(frozen_prior):
from .stats import get_prior
frozen_prior = get_prior(self.prior, verbose=verbose)[0]
self.debug |= debug
if hasattr(self, 'pool'):
from .estimation import create_pool
create_pool(self)
set_par = serializer(self.set_par)
get_par = serializer(self.get_par)
lprob = serializer(self.lprob) if test_lprob else None
def runner(locseed):
np.random.seed(seed+locseed)
done = False
no = 0
while not done:
no += 1
with np.warnings.catch_warnings(record=False):
try:
np.warnings.filterwarnings('error')
rst = np.random.randint(2**31) # win explodes with 2**32
pdraw = [pl.rvs(random_state=rst+sn)
for sn, pl in enumerate(frozen_prior)]
if test_lprob:
draw_prob = lprob(pdraw, linear=None,
verbose=verbose > 1)
done = not np.isinf(draw_prob)
else:
set_par(pdraw)
done = True
except Exception as e:
if verbose > 1:
print(str(e)+' (%s) ' % no)
return pdraw, no
if verbose > 1:
print('[prior_sample:]'.ljust(15, ' ') + ' Sampling from the pior...')
wrapper = tqdm.tqdm if verbose < 2 else (lambda x, **kwarg: x)
pmap_sim = wrapper(self.mapper(runner, range(nsamples)), total=nsamples)
draws, nos = map2arr(pmap_sim)
if verbose:
smess = ''
if test_lprob:
smess = 'of zero likelihood, '
print('[prior_sample:]'.ljust(
15, ' ') + ' Sampling done. %2.2f%% of the prior is either %sindetermined or explosive.' % (100*(sum(nos)-nsamples)/sum(nos), smess))
return draws
def get_par(self, dummy=None, npar=None, asdict=False, full=True, nsamples=1, verbose=False, roundto=5, debug=False, **args):
"""Get parameters. Tries to figure out what you want.
Parameters
----------
dummy : str, optional
Can be `None`, a parameter name, a parameter set out of {'calib', 'init', 'prior_mean', 'best', 'mode', 'mcmc_mode', 'post_mean', 'posterior_mean'} or one of {'prior', 'post', 'posterior'}.
If `None`, returns the current parameters (default). If there are no current parameters, this defaults to 'best'.
'calib' will return the calibration in the main body of the *.yaml (`parameters`).
'init' are the initial values (first column) in the `prior` section of the *.yaml.
'mode' is the highest known mode from any sort of parameter estimation.
'best' will default to 'mode' if it exists and otherwise fall back to 'init'.
'posterior_mean' and 'post_mean' are the same thing.
'posterior_mode', 'post_mode' and 'mcmc_mode' are the same thing.
'prior' or 'post'/'posterior' will draw random samples. Obviously, 'posterior', 'mode' etc are only available if a posterior/chain exists.
NOTE: calling get_par with a set of parameters is the only way to recover the calibrated parameters that are not included in the prior (if you have changed them). All other options will work incrementially on (potential) previous edits of these parameters.
asdict : bool, optional
Returns a dict of the values if `True` and an array otherwise (default is `False`).
full : bool, optional
Whether to return all parameters or the estimated ones only. (default: True)
nsamples : int, optional
Size of the sample. Defaults to 1
verbose : bool, optional
Print additional output infmormation (default is `False`)
roundto : int, optional
Rounding of additional output if verbose, defaults to 5
args : various, optional
Auxilliary arguments passed to `gen_sys` calls
Returns
-------
array or dict
Numpy array of parameters or dict of parameters
"""
from .gensys import gen_sys_from_yaml as gen_sys
if not hasattr(self, 'par'):
gen_sys(self, verbose=verbose, **args)
pfnames, pffunc = self.parafunc
pars_str = [str(p) for p in self.parameters]
pars = np.array(self.par) if hasattr(
self, 'par') else np.array(self.par_fix)
if npar is not None:
if len(npar) != len(self.par_fix):
pars[self.prior_arg] = npar
else:
pars = npar
if dummy is None:
try:
par_cand = np.array(pars)[self.prior_arg]
except:
par_cand = get_par(self, 'best', asdict=False, full=False,
verbose=verbose, **args)
elif not isinstance(dummy, str) and len(dummy) == len(self.par_fix):
par_cand = dummy[self.prior_arg]
elif not isinstance(dummy, str) and len(dummy) == len(self.prior_arg):
par_cand = dummy
else:
if dummy in pars_str:
p = pars[pars_str.index(dummy)]
if verbose:
print('[get_par:]'.ljust(15, ' ') + '%s = %s' % (dummy, p))
return p
if dummy in pfnames:
p = pffunc(pars)[pfnames.index(dummy)]
if verbose:
print('[get_par:]'.ljust(15, ' ') + '%s = %s' % (dummy, p))
return p
if dummy == 'cov_mat':
gen_sys(self, pars)
p = self.QQ(self.ppar)
if verbose:
print('[get_par:]'.ljust(15, ' ') + '%s = %s' % (dummy, p))
return p
if dummy == 'best':
try:
par_cand = get_par(self, 'mode', asdict=False, full=False,
verbose=verbose, **args)
except:
par_cand = get_par(self, 'init', asdict=False, full=False,
verbose=verbose, **args)
else:
old_par = self.par
pars = self.par_fix
self.par = self.par_fix
if dummy == 'prior':
par_cand = prior_sampler(self, nsamples=nsamples,
verbose=verbose, debug=debug, **args)
elif dummy in ('post', 'posterior'):
par_cand = posterior_sampler(self, nsamples=nsamples,
verbose=verbose, **args)
elif dummy == 'posterior_mean' or dummy == 'post_mean':
par_cand = post_mean(self)
elif dummy == 'mode':
par_cand = self.fdict['mode_x']
elif dummy in ('mcmc_mode', 'mode_mcmc', 'posterior_mode', 'post_mode'):
par_cand = self.fdict['mcmc_mode_x']
elif dummy == 'calib':
par_cand = self.par_fix[self.prior_arg].copy()
elif dummy == 'prior_mean':
par_cand = []
for pp in self.prior.keys():
if self.prior[pp][3] == 'uniform':
par_cand.append(
0.5 * self.prior[pp][(-2)] + 0.5 * self.prior[pp][(-1)])
else:
par_cand.append(self.prior[pp][(-2)])
elif dummy == 'adj_prior_mean':
par_cand = []
for pp in self.prior.keys():
if self.prior[pp][3] == 'inv_gamma_dynare':
par_cand.append(self.prior[pp][(-2)] * 10)
else:
if self.prior[pp][3] == 'uniform':
par_cand.append(
0.5 * self.prior[pp][(-2)] + 0.5 * self.prior[pp][(-1)])
else:
par_cand.append(self.prior[pp][(-2)])
elif dummy == 'init':
par_cand = self.fdict['init_value']
for i in range(self.ndim):
if par_cand[i] is None:
par_cand[i] = self.par_fix[self.prior_arg][i]
else:
self.par = old_par
raise KeyError(
"Parameter or parametrization '%s' does not fit/exist." % dummy)
if full:
if isinstance(dummy, str) and dummy in ('prior', 'post', 'posterior'):
par = np.tile(pars, (nsamples, 1))
for i in range(nsamples):
par[i][self.prior_arg] = par_cand[i]
else:
par = np.array(pars)
par[self.prior_arg] = par_cand
if not asdict:
return par
pdict = dict(zip(pars_str, np.round(par, roundto)))
pfdict = dict(zip(pfnames, np.round(pffunc(par), roundto)))
return (
pdict, pfdict)
if asdict:
return dict(zip(np.array(pars_str)[self.prior_arg], np.round(par_cand, roundto)))
if nsamples > 1:
if dummy not in ('prior', 'post', 'posterior'):
par_cand = par_cand * \
(1 + 0.001 * np.random.randn(nsamples, len(par_cand)))
return par_cand
def get_cov(self, npar=None, **args):
"""get the covariance matrix"""
return get_par(self, dummy='cov_mat', npar=npar, **args)
def set_par(self, dummy=None, setpar=None, npar=None, verbose=False, return_vv=False, roundto=5, **args):
"""Set the current parameter values.
In essence, this is a wrapper around `get_par` which also compiles the transition function with the desired parameters.
Parameters
----------
dummy : str or array, optional
If an array, sets all parameters. If a string and a parameter name,`setpar` must be provided to define the value of this parameter. Otherwise, `dummy` is forwarded to `get_par` and the returning value(s) are set as parameters.
setpar : float, optional
Parametervalue to be set. Of course, only if `dummy` is a parameter name.
npar : array, optional
Vector of parameters. If given, this vector will be altered and returnd without recompiling the model. THIS WILL ALTER THE PARAMTER WITHOUT MAKING A COPY!
verbose : bool
Whether to output more or less informative messages (defaults to False)
roundto : int
Define output precision if output is verbose. (default: 5)
args : keyword args
Keyword arguments forwarded to the `gen_sys` call.
"""
from .gensys import gen_sys_from_yaml as gen_sys
pfnames, pffunc = self.parafunc
pars_str = [str(p) for p in self.parameters]
par = np.array(self.par) if hasattr(
self, 'par') else np.array(self.par_fix)
if setpar is None:
if dummy is None:
par = get_par(self, dummy=dummy, asdict=False, full=True,
verbose=verbose, **args)
elif len(dummy) == len(self.par_fix):
par = dummy
elif len(dummy) == len(self.prior_arg):
par[self.prior_arg] = dummy
else:
par = get_par(self, dummy=dummy, asdict=False, full=True,
verbose=verbose, **args)
elif dummy in pars_str:
if npar is not None:
npar = npar.copy()
if len(npar) == len(self.prior_arg):
npar[self.prior_names.index(dummy)] = setpar
else:
npar[pars_str.index(dummy)] = setpar
if return_vv:
return npar, self.vv
return npar
par[pars_str.index(dummy)] = setpar
elif dummy in pfnames:
raise SyntaxError(
"Can not set parameter '%s' that is a function of other parameters." % dummy)
else:
raise SyntaxError(
"Parameter '%s' is not defined for this model." % dummy)
gen_sys(self, par=list(par), verbose=verbose, **args)
if hasattr(self, 'filter'):
Q = self.QQ(self.ppar) @ self.QQ(self.ppar)
self.filter.Q = Q
if verbose > 1:
pdict = dict(zip(pars_str, np.round(self.par, roundto)))
pfdict = dict(zip(pfnames, np.round(pffunc(self.par), roundto)))
print('[set_par:]'.ljust(15, ' ') +
' Parameter(s):\n%s\n%s' % (pdict, pfdict))
if return_vv:
return get_par(self), self.vv
return get_par(self)
def box_check(self, par=None):
"""Check if parameterset lies outside the box constraints
Parameters
----------
par : array or list, optional
The parameter set to check
"""
if par is None:
par = self.par
for i, name in enumerate(self.fdict['prior_names']):
lb, ub = self.fdict['prior_bounds']
if par[i] < lb[i]:
print('[box_check:]'.ljust(
15, ' ') + ' Parameter %s of %s lower than lb of %s.' % (name, par[i].round(5), lb[i]))
if par[i] > ub[i]:
print('[box_check:]'.ljust(
15, ' ') + ' Parameter %s of %s higher than ub of %s.' % (name, par[i].round(5), ub[i]))
return
|
the-stack_106_22640 | import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import AUC
from tensorflow.keras.callbacks import EarlyStopping
from mtrec.models import MMoE
from utils import build_census
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def main():
"""
========================= Hyper Parameters =======================
"""
train_file = 'data/census/census-income.data.gz'
test_file = 'data/census/census-income.test.gz'
embed_dim = 4
num_experts = 8
expert_units = [16, 8]
gate_units = [16, 8, 2]
tower_units = [8]
epochs = 10
batch_size = 1024
learning_rate = 0.001
"""
========================= Create dataset =======================
"""
sparse_feature_columns, train, test = build_census(train_file, test_file, embed_dim)
train_X, train_y = train
test_X, test_y = test
task_names = list(train_y.keys())
"""
========================= Build Model =======================
"""
model = MMoE(task_names, num_experts, sparse_feature_columns, expert_units,
gate_units, tower_units)
model.summary()
# tf.keras.utils.plot_model(model, "model_with_shape_info.png", show_shapes=True)
"""
============================Compile============================
"""
model.compile(loss={'income': 'binary_crossentropy', 'marital': 'binary_crossentropy'},
optimizer=Adam(learning_rate=learning_rate),
metrics=[AUC()])
"""
==============================Fit==============================
"""
model.fit(
train_X,
train_y,
epochs=epochs,
callbacks=[EarlyStopping(monitor='val_marital_loss', patience=2, restore_best_weights=True)], # checkpoint,
batch_size=batch_size,
validation_split=0.1
)
"""
===========================Test==============================
"""
test_metric = model.evaluate(test_X, test_y, batch_size=batch_size)
print('test income AUC: %f, marital AUC: %f' % (test_metric[3], test_metric[4]))
if __name__ == '__main__':
main()
|
the-stack_106_22641 | # Ran Jan 16 to simulate reads with RSEM from the monocle model
# The monocle model is a tobit model on the cluster1 log tpm
# perturbations made on 20% of transcripts with at least 2 fold change, simulated from a log normal effect size distribution
# also used to simulate nonperturb
import os
import glob
import numpy
import sys
RSEM_path="/home/lynnyi/RSEM-1.3.0"
RSEM_command= RSEM_path + "/rsem-simulate-reads"
RSEM_ref_path= RSEM_path + "/ref/human"
RSEM_model= RSEM_path + "/exp/Trapnell_cluster1.stat/Trapnell_cluster1.model"
theta = "0.2"
i = sys.argv[1]
o = sys.argv[2]
reads_logmean = 14.42
reads_logsd = 0.3336
with open('/home/lynnyi/dirichlet/log.txt', 'w') as logfile:
reads = int(numpy.random.lognormal(mean=reads_logmean, sigma = reads_logsd))
#reads = 2000000
cmd = RSEM_command + " " + RSEM_ref_path + " " + RSEM_model + " " + i + " " + theta + " " + str(reads) + " " + o + " --seed 0 "
logfile.write(cmd + '\n')
print(cmd)
os.system(cmd)
|
the-stack_106_22642 | import logging
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db import models
from mayan.apps.acls.models import AccessControlList
from mayan.apps.common.serialization import yaml_load
from .classes import Layer
from .transformations import BaseTransformation
logger = logging.getLogger(name=__name__)
class LayerTransformationManager(models.Manager):
def get_for_object(
self, obj, as_classes=False, maximum_layer_order=None,
only_stored_layer=None, user=None
):
"""
as_classes == True returns the transformation classes from .classes
ready to be feed to the converter class
"""
Layer.update()
StoredLayer = apps.get_model(
app_label='converter', model_name='StoredLayer'
)
content_type = ContentType.objects.get_for_model(model=obj)
transformations = self.filter(
enabled=True, object_layer__content_type=content_type,
object_layer__object_id=obj.pk, object_layer__enabled=True
)
if maximum_layer_order is not None:
access_layers = StoredLayer.objects.filter(
order__lte=maximum_layer_order
)
exclude_layers = StoredLayer.objects.filter(
order__gt=maximum_layer_order
)
else:
access_layers = StoredLayer.objects.all()
exclude_layers = StoredLayer.objects.none()
for stored_layer in access_layers:
try:
layer_class = stored_layer.get_layer()
except KeyError:
"""
This was a class defined but later erased. Ignore it.
"""
else:
access_permission = layer_class.permissions.get(
'access_permission', None
)
if access_permission:
try:
AccessControlList.objects.check_access(
obj=obj, permissions=(access_permission,), user=user
)
except PermissionDenied:
access_layers = access_layers.exclude(pk=stored_layer.pk)
for stored_layer in exclude_layers:
exclude_permission = stored_layer.get_layer().permissions.get(
'exclude_permission', None
)
if exclude_permission:
try:
AccessControlList.objects.check_access(
obj=obj, permissions=(exclude_permission,), user=user
)
except PermissionDenied:
pass
else:
exclude_layers = exclude_layers.exclude(pk=stored_layer.pk)
if only_stored_layer:
transformations = transformations.filter(
object_layer__stored_layer=only_stored_layer
)
transformations = transformations.filter(
object_layer__stored_layer__in=access_layers
)
transformations = transformations.exclude(
object_layer__stored_layer__in=exclude_layers
)
if as_classes:
result = []
for transformation in transformations:
try:
transformation_class = BaseTransformation.get(
transformation.name
)
except KeyError:
# Non existant transformation, but we don't raise an error
logger.error(
'Non existant transformation: %s for %s',
transformation.name, obj
)
else:
try:
# Some transformations don't require arguments
# return an empty dictionary as ** doesn't allow None
if transformation.arguments:
kwargs = yaml_load(
stream=transformation.arguments,
)
else:
kwargs = {}
result.append(
transformation_class(
**kwargs
)
)
except Exception as exception:
logger.error(
'Error while parsing transformation "%s", '
'arguments "%s", for object "%s"; %s',
transformation, transformation.arguments, obj,
exception, exc_info=True
)
return result
else:
return transformations
class ObjectLayerManager(models.Manager):
def get_for(self, layer, obj):
content_type = ContentType.objects.get_for_model(model=obj)
return self.get_or_create(
content_type=content_type, object_id=obj.pk,
stored_layer=layer.stored_layer
)
|
the-stack_106_22643 | from pylab import *
from scipy.stats import norm
from scipy.linalg.matfuncs import expm2
import os
def phi(x):
k = 5
#return norm.pdf(x-4) * exp(1j * k * x) + norm.pdf(x-40) * exp(-1j * k * x)
return norm.pdf(x-4) + norm.pdf(x-40)
def phi2(x):
k = 5
#return norm.pdf(x-4.1) * exp(1j * k * x) + norm.pdf(x-39.9) * exp(-1j * k * x)
return norm.pdf(x-4.1) + norm.pdf(x-39.9)
X = arange(-200,200,.5)
phis = [phi(X),phi2(X)]
#def evolve(initial,H,tfinal,timestep):
#def propagator(H,t):
#return expm2(-1j*H*t) #expm2: compute the matrix exponential using eigenvalue decomposition
#time = arange(0,tfinal,timestep)
#states = [initial]
#U = propagator(H,timestep)
##evolve the system
#for i in range(len(time)-1):
#states.append(array(dot(U,states[-1])))
#return time, states
def laplacematrix(dx,nx):
return matrix((diag(ones(nx-1),1) + diag(ones(nx-1),-1) + diag(-2*ones(nx))) / (dx*dx))
def evolve(phis):
lastphi = phis[-1]
size = len(lastphi)
#newphi = array(dot(laplacematrix(.1, size), lastphi)).flatten() + 2 * lastphi - phis[-2]
newphi = .1 * .1 *array(dot(laplacematrix(.1, size), lastphi)).flatten() + 2 * lastphi - phis[-2]
return array(newphi)
for i in range(30):
phis.append(evolve(phis))
def createvideo(spectrums, plotter):
#http://dawes.wordpress.com/2007/12/04/animating-png-files/
#http://stackoverflow.com/questions/4092927/generating-movie-from-python-without-saving-individual-frames-to-files
#http://www.scipy.org/Cookbook/Matplotlib/Animations
import tempfile
directory = tempfile.gettempdir()
command = ('ffmpeg','-i', directory + '/%03d.png', 'out.avi', '-r', '5')
#convert -delay 50 Th*.JPG anim.mpg
ymin, ymax = min(spectrums[0]), max(spectrums[0])
for i in range(len(spectrums)):
figure()
ylim(ymin, ymax)
plotter(spectrums[i])
filename = directory + '/%03d.png'%i
savefig(filename)
print('Wrote file '+ filename)
clf()
os.spawnvp(os.P_WAIT, 'ffmpeg', command)
createvideo(phis,plot)
#show()
#for i in phis:
#figure()
#plot(X,i)
#show()
|
the-stack_106_22645 | from collections import namedtuple
import threading
import time
import wrapt
from neotiles.exceptions import NeoTilesError
from neotiles.pixelcolor import PixelColor
MatrixSize = namedtuple('MatrixSize', 'cols rows')
TileSize = namedtuple('TileSize', 'cols rows')
TilePosition = namedtuple('TilePosition', 'x y')
PixelPosition = namedtuple('PixelPosition', 'x y')
class StoppableThread(threading.Thread):
"""
Thread class with a stop() method. The thread itself has to check regularly
for the stopped() condition.
http://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
"""
def __init__(self, **kwargs):
super(StoppableThread, self).__init__(**kwargs)
self._stop_requested = threading.Event()
def stop(self):
self._stop_requested.set()
def stopped(self):
return self._stop_requested.isSet()
class TileManager(object):
"""
Manages all the tiles displayed on a hardware matrix (either a neopixel
matrix or an RGB matrix).
TileManager is the only class in neotiles which affects the actual hardware
matrix.
Example usage: ::
from neotiles import TileManager
from neotiles.matrixes import NTNeoPixelMatrix
tiles = TileManager(NTNeoPixelMatrix(size=(8, 8), led_pin=18))
Or with an RGB matrix: ::
from neotiles import TileManager
from neotiles.matrixes import NTRGBMatrix
tiles = TileManager(NTRGBMatrix(rows=32, chain=2))
**Animation**:
The ``draw_fps`` (draw frames per second) parameter controls how many times
per second the animation loop -- which runs in a separate thread -- will
call :meth:`draw_hardware_matrix` which in turn calls all the tiles'
:meth:`Tile.draw` methods. If ``draw_fps=None`` then the matrix will not
be drawn automatically and you must call :meth:`draw_hardware_matrix`
manually.
The animation loop will attempt to re-draw the matrix at a rate of
``draw_fps`` times per second. This rate may or may not be achieved
depending on whatever else the CPU is doing, including the compute load
created by the tiles' :meth:`Tile.draw` methods.
The animation loop assumes that something else will be sending data to the
tiles via the :attr:`Tile.data` attribute or the
:meth:`TileManager.send_data_to_tiles` method. If that isn't happening
then the animation loop will likely keep re-drawing the matrix with the
same unchanging pixel colors.
:param matrix: (:class:`~neotiles.matrixes.NTNeoPixelMatrix` |
:class:`~neotiles.matrixes.NTRGBMatrix`) The matrix being managed.
:param draw_fps: (int|None) The frame rate for the drawing animation loop.
"""
def __init__(self, matrix, draw_fps=10):
self.hardware_matrix = matrix
self._draw_fps = draw_fps
self._animation_thread = None
self._pixels = None
self._clear_pixels()
# List of tiles we'll be displaying inside the matrix.
self._managed_tiles = []
def __repr__(self):
return '{}(matrix={}, draw_fps={})'.format(
self.__class__.__name__,
repr(self.hardware_matrix),
self._draw_fps
)
def __str__(self):
matrix = self.pixels
matrix_string = ''
pixel_num = 0
# Do a scan to see if any pixels have a white component. If any do
# then we'll display the white component for all of them; otherwise
# we'll suppress it in the interest of space.
display_white = False
for row_num in range(len(matrix)):
for col_num in range(len(matrix[row_num])):
color = matrix[row_num][col_num]
if color.white is not None:
display_white = True
# Display a 2-dimensional grid of pixel values.
for row_num in range(len(matrix)):
for col_num in range(len(matrix[row_num])):
color = matrix[row_num][col_num]
denormalized = color.hardware_components
if len(denormalized) == 3 and display_white:
display_components = denormalized + 0
else:
display_components = denormalized
if display_white:
matrix_string += (
'[{:2d}] {:-3d},{:-3d},{:-3d},{:-3d} '.format(
pixel_num, *display_components)
)
else:
matrix_string += '[{:2d}] {:-3d},{:-3d},{:-3d} '.format(
pixel_num, *display_components)
pixel_num += 1
matrix_string += '\n'
return matrix_string.rstrip()
def _clear_pixels(self):
"""
Generate a 2D matrix for the given size of the neopixel matrix where
all the pixels are set to off (0, 0, 0).
:return: ([[:class:`PixelColor`]]) 2D matrix of PixelColor objects all
set to (0, 0, 0, 0).
"""
self._pixels = [
[PixelColor(0, 0, 0, 0) for col in range(self.matrix_size.cols)]
for row in range(self.matrix_size.rows)
]
@wrapt.synchronized
def _set_pixels_from_tiles(self):
"""
Create a 2D matrix representing the entire pixel matrix, made up of
each of the individual tiles' colors for each tile pixel.
:return: ([[matrix]]) 2D list of :class:`PixelColor` objects.
:raises: :class:`NeoTilesError` if an attempt is made to render a
pixel outside of the neopixel matrix's dimensions.
"""
self._clear_pixels()
# Set the matrix pixels to the colors of each tile in turn. If any
# tiles happen to overlap then the last one processed will win.
for managed_tile in self._managed_tiles:
tile_object = managed_tile['tile_object']
if not tile_object.visible:
continue
# Call the draw() method of any tile which is flagged as animating.
if tile_object.animate:
tile_object.draw()
# Retrieve the pixel colors of the tile.
tile_matrix = tile_object.pixels
# Draw the tile's pixels in the right place on the matrix
# (determined by the tile's root position).
for tile_row_num in range(len(tile_matrix)):
for tile_col_num in range(len(tile_matrix[tile_row_num])):
pixel_color = tile_matrix[tile_row_num][tile_col_num]
matrix_row = managed_tile['root'].y + tile_row_num
matrix_col = managed_tile['root'].x + tile_col_num
try:
self._pixels[matrix_row][matrix_col] = pixel_color
except IndexError:
raise NeoTilesError(
'Cannot render tile {}: pixel position ({}, {}) '
'is invalid for {}x{} matrix'.format(
managed_tile, matrix_col, matrix_row,
self.matrix_size.cols, self.matrix_size.rows
))
@wrapt.synchronized
def _draw_hardware_matrix(self):
"""
Displays the current state of the matrix pixels on the neopixel
hardware.
"""
pixels = self.pixels
# Walk through the matrix from the top left to the bottom right,
# painting pixels as we go.
for row_num in range(len(pixels)):
for col_num in range(len(pixels[row_num])):
color = pixels[row_num][col_num]
self.hardware_matrix.setPixelColor(col_num, row_num, color)
self.hardware_matrix.show()
def _animate(self):
"""
Internal animation method. Spawns a new thread to manage the drawing
of the matrix at the (hoped-for) frame rate.
"""
frame_delay_millis = int(1000 / self._draw_fps)
current_time = int(round(time.time() * 1000))
next_frame_time = current_time + frame_delay_millis
# Draw the first frame. Not really necessary except perhaps for super
# slow frame rates.
self._set_pixels_from_tiles()
self._draw_hardware_matrix()
while True:
current_time = int(round(time.time() * 1000))
if current_time > next_frame_time:
next_frame_time = current_time + frame_delay_millis
self._set_pixels_from_tiles()
self._draw_hardware_matrix()
# The sleep time needs to be long enough that we're not churning
# through CPU cycles checking whether it's time to render the next
# frame or not; but short enough to allow us to render the next
# frame as soon as possible once we're past our next-frame wait
# time. This is a bit of a cheap-and-cheerful animation loop, and
# this sleep duration may not be ideal.
time.sleep(0.005)
if self._animation_thread.stopped():
return
def register_tile(
self, tile, size=None, root=None):
"""
Registers a tile with the TileManager. Registering a tile allows
its pixels to be drawn by the TileManager to the hardware matrix.
:param tile: (:class:`Tile`) The tile to register.
:param size: (:class:`TileSize`) Size of the tile (in cols and rows).
:param root: (:class:`TilePosition`) Position of the top left corner
of the tile within the hardware matrix.
"""
tile.size = TileSize(*size)
self._managed_tiles.append({
'root': TilePosition(*root),
'tile_object': tile,
})
# Set the tile manager's pixels based on this new tile. A future
# optimization would be to only render the new tile onto the manager's
# pixels.
self._set_pixels_from_tiles()
def deregister_tile(self, tile):
"""
Deregisters a tile from the tile manager. Deregistered tiles will
no longer be drawn to the hardware matrix.
If deregistering the tile results in no tiles being registered with
the manager then the matrix-drawing animation loop will be stopped
automatically.
:param tile: (:class:`Tile`) The tile being deregistered.
:return: (int) The number of tiles removed.
"""
removed = 0
for i, managed_tile in enumerate(self._managed_tiles):
if managed_tile['tile_object'] == tile:
del self._managed_tiles[i]
removed += 1
if len(self._managed_tiles) == 0:
self.draw_stop()
return removed
def send_data_to_tiles(self, data):
"""
Sends ``data`` to all registered tiles. The data will not be sent to
any tile which has its :attr:`Tile.is_accepting_data` attribute set to
``False``.
:param data: (any) Input data.
"""
for managed_tile in self._managed_tiles:
tile_object = managed_tile['tile_object']
if tile_object.is_accepting_data:
tile_object.data = data
def draw_hardware_matrix(self):
"""
Calls each tile's :meth:`Tile.draw` method to ensure that each tile's
pixels are up to date, with the result being displayed on the hardware
matrix.
If the TileManager's ``draw_fps`` is not ``None`` then this method will
also trigger the animation loop if it's not already running. This
means that you only need to call ``draw_hardware_matrix`` once if
you've enabled animation, as the animation loop will ensure that the
matrix is updated via each tile's :meth:`Tile.draw` method once per
animation frame.
"""
if self._draw_fps is None:
self._set_pixels_from_tiles()
self._draw_hardware_matrix()
return
if self._animation_thread is None:
self._animation_thread = StoppableThread(target=self._animate)
self._animation_thread.start()
def draw_stop(self):
"""
Stop the matrix-drawing animation loop.
"""
if self._animation_thread is not None:
self._animation_thread.stop()
self._animation_thread.join()
self._animation_thread = None
def clear_hardware_matrix(self):
"""
Clears the hardware matrix (sets all pixels to
``PixelColor(0, 0, 0, 0)``).
"""
pixels = self.pixels
black_pixel = PixelColor(0, 0, 0)
for row_num in range(len(pixels)):
for col_num in range(len(pixels[row_num])):
self.hardware_matrix.setPixelColor(
col_num, row_num, black_pixel)
self.hardware_matrix.show()
@property
def brightness(self):
"""
(int) Get or set the brightness of the matrix display. Range of
acceptable values will depend on the matrix type.
"""
return self.hardware_matrix.brightness
@brightness.setter
def brightness(self, val):
self.hardware_matrix.brightness = val
@property
def matrix_size(self):
"""
(:class:`MatrixSize`) Get the size of the matrix.
"""
return self.hardware_matrix.size
@property
def tiles(self):
"""
Get all registered tiles as a list of :class:`Tile` objects.
"""
return [tile['tile_object'] for tile in self._managed_tiles]
@property
def tiles_meta(self):
"""
Get all information on all registered tiles.
Tile information is returned as a list of dictionaries which contain
the ``root`` and ``tile_object`` keys (:class:`TilePosition` and
:class:`Tile` objects respectively).
If you just want the registered Tile instances then use :attr:`tiles`
instead.
"""
return self._managed_tiles
@property
def pixels(self):
"""
Get the tile manager's current pixel colors, which is a combination of
the current pixel colors of all the tiles being managed by the
TileManager.
The colors are returned as a two-dimensional list (with the same
dimensions as :attr:`matrix_size`) of :class:`~PixelColor` objects.
"""
return self._pixels
|
the-stack_106_22646 | #!/usr/bin/env python
"""Safe(ish) evaluation of mathematical expression using Python's ast
module.
This module provides an Interpreter class that compiles a restricted set of
Python expressions and statements to Python's AST representation, and then
executes that representation using values held in a symbol table.
The symbol table is a simple dictionary, giving a simple, flat namespace.
This comes pre-loaded with many functions from Python's builtin and math
module. If numpy is installed, many numpy functions are also included.
Additional symbols can be added when an Interpreter is created, but the
user of that interpreter will not be able to import additional modules.
Expressions, including loops, conditionals, and function definitions can be
compiled into ast node and then evaluated later, using the current values
in the symbol table.
The result is a restricted, simplified version of Python meant for
numerical caclulations that is somewhat safer than 'eval' because many
unsafe operations (such as 'import' and 'eval') are simply not allowed.
Many parts of Python syntax are supported, including:
for loops, while loops, if-then-elif-else conditionals
try-except (including 'finally')
function definitions with def
advanced slicing: a[::-1], array[-3:, :, ::2]
if-expressions: out = one_thing if TEST else other
list comprehension out = [sqrt(i) for i in values]
The following Python syntax elements are not supported:
Import, Exec, Lambda, Class, Global, Generators,
Yield, Decorators
In addition, while many builtin functions are supported, several builtin
functions that are considered unsafe are missing ('eval', 'exec', and
'getattr' for example)
"""
from __future__ import division, print_function
import ast
import time
import inspect
from sys import exc_info, stdout, stderr, version_info
import six
from .astutils import (UNSAFE_ATTRS, HAS_NUMPY, make_symbol_table, numpy,
op2func, ExceptionHolder, ReturnedNone,
valid_symbol_name)
builtins = __builtins__
if not isinstance(builtins, dict):
builtins = builtins.__dict__
ALL_NODES = ['arg', 'assert', 'assign', 'attribute', 'augassign', 'binop',
'boolop', 'break', 'call', 'compare', 'continue', 'delete',
'dict', 'ellipsis', 'excepthandler', 'expr', 'extslice',
'for', 'functiondef', 'if', 'ifexp', 'index', 'interrupt',
'list', 'listcomp', 'module', 'name', 'nameconstant', 'num',
'pass', 'print', 'raise', 'repr', 'return', 'slice', 'str',
'subscript', 'try', 'tuple', 'unaryop', 'while']
ERR_MAX_TIME = "Execution exceeded time limit, max runtime is {}s"
class Interpreter(object):
"""create an asteval Interpreter: a restricted, simplified interpreter
of mathematical expressions using Python syntax.
Parameters
----------
symtable : dict or `None`
dictionary to use as symbol table (if `None`, one will be created).
usersyms : dict or `None`
dictionary of user-defined symbols to add to symbol table.
writer : file-like or `None`
callable file-like object where standard output will be sent.
err_writer : file-like or `None`
callable file-like object where standard error will be sent.
use_numpy : bool
whether to use functions from numpy.
minimal : bool
create a minimal interpreter: disable all options (see Note 1).
no_if : bool
whether to support `if` blocks
no_for : bool
whether to support `for` blocks.
no_while : bool
whether to support `while` blocks.
no_try : bool
whether to support `try` blocks.
no_functiondef : bool
whether to support user-defined functions.
no_ifexp : bool
whether to support if expressions.
no_listcomp : bool
whether to support list comprehension.
no_augassign : bool
whether to support augemented assigments (`a += 1`, etc).
no_assert : bool
whether to support `assert`.
no_delete : bool
whether to support `del`.
no_raise : bool
whether to support `raise`.
no_print : bool
whether to support `print`.
max_time : float
deprecated, unreliable. max_time will be dropped soon. (default 86400)
readonly_symbols : iterable or `None`
symbols that the user can not assign to
builtins_readonly : bool
whether to blacklist all symbols that are in the initial symtable
Notes
-----
1. setting `minimal=True` is equivalent to setting all
`no_***` options to `True`.
2. max_time is not reliable and no longer supported -- the keyword will be dropped soon.
"""
def __init__(self, symtable=None, usersyms=None, writer=None,
err_writer=None, use_numpy=True, minimal=False,
no_if=False, no_for=False, no_while=False, no_try=False,
no_functiondef=False, no_ifexp=False, no_listcomp=False,
no_augassign=False, no_assert=False, no_delete=False,
no_raise=False, no_print=False, max_time=86400,
readonly_symbols=None, builtins_readonly=False):
self.writer = writer or stdout
self.err_writer = err_writer or stderr
if symtable is None:
if usersyms is None:
usersyms = {}
symtable = make_symbol_table(use_numpy=use_numpy, **usersyms)
symtable['print'] = self._printer
self.symtable = symtable
self._interrupt = None
self.error = []
self.error_msg = None
self.expr = None
self.retval = None
self.lineno = 0
self.start_time = time.time()
self.max_time = max_time
self.use_numpy = HAS_NUMPY and use_numpy
nodes = ALL_NODES[:]
if minimal or no_if:
nodes.remove('if')
if minimal or no_for:
nodes.remove('for')
if minimal or no_while:
nodes.remove('while')
if minimal or no_try:
nodes.remove('try')
if minimal or no_functiondef:
nodes.remove('functiondef')
if minimal or no_ifexp:
nodes.remove('ifexp')
if minimal or no_assert:
nodes.remove('assert')
if minimal or no_delete:
nodes.remove('delete')
if minimal or no_raise:
nodes.remove('raise')
if minimal or no_print:
nodes.remove('print')
if minimal or no_listcomp:
nodes.remove('listcomp')
if minimal or no_augassign:
nodes.remove('augassign')
self.node_handlers = {}
for node in nodes:
self.node_handlers[node] = getattr(self, "on_%s" % node)
# to rationalize try/except try/finally for Python2.6 through Python3.3
if 'try' in self.node_handlers:
self.node_handlers['tryexcept'] = self.node_handlers['try']
self.node_handlers['tryfinally'] = self.node_handlers['try']
if readonly_symbols is None:
self.readonly_symbols = set()
else:
self.readonly_symbols = set(readonly_symbols)
if builtins_readonly:
self.readonly_symbols |= set(self.symtable)
self.no_deepcopy = [key for key, val in symtable.items()
if (callable(val)
or 'numpy.lib.index_tricks' in repr(val)
or inspect.ismodule(val))]
def remove_nodehandler(self, node):
"""remove support for a node
returns current node handler, so that it
might be re-added with add_nodehandler()
"""
out = None
if node in self.node_handlers:
out = self.node_handlers.pop(node)
return out
def set_nodehandler(self, node, handler):
"""set node handler"""
self.node_handlers[node] = handler
def user_defined_symbols(self):
"""Return a set of symbols that have been added to symtable after
construction.
I.e., the symbols from self.symtable that are not in
self.no_deepcopy.
Returns
-------
unique_symbols : set
symbols in symtable that are not in self.no_deepcopy
"""
sym_in_current = set(self.symtable.keys())
sym_from_construction = set(self.no_deepcopy)
unique_symbols = sym_in_current.difference(sym_from_construction)
return unique_symbols
def unimplemented(self, node):
"""Unimplemented nodes."""
self.raise_exception(node, exc=NotImplementedError,
msg="'%s' not supported" %
(node.__class__.__name__))
def raise_exception(self, node, exc=None, msg='', expr=None,
lineno=None):
"""Add an exception."""
if self.error is None:
self.error = []
if expr is None:
expr = self.expr
if len(self.error) > 0 and not isinstance(node, ast.Module):
msg = '%s' % msg
err = ExceptionHolder(node, exc=exc, msg=msg, expr=expr, lineno=lineno)
self._interrupt = ast.Break()
self.error.append(err)
if self.error_msg is None:
self.error_msg = "at expr='%s'" % (self.expr)
elif len(msg) > 0:
self.error_msg = msg
if exc is None:
try:
exc = self.error[0].exc
except:
exc = RuntimeError
raise exc(self.error_msg)
# main entry point for Ast node evaluation
# parse: text of statements -> ast
# run: ast -> result
# eval: string statement -> result = run(parse(statement))
def parse(self, text):
"""Parse statement/expression to Ast representation."""
self.expr = text
try:
out = ast.parse(text)
except SyntaxError:
self.raise_exception(None, msg='Syntax Error', expr=text)
except:
self.raise_exception(None, msg='Runtime Error', expr=text)
return out
def run(self, node, expr=None, lineno=None, with_raise=True):
"""Execute parsed Ast representation for an expression."""
# Note: keep the 'node is None' test: internal code here may run
# run(None) and expect a None in return.
if time.time() - self.start_time > self.max_time:
raise RuntimeError(ERR_MAX_TIME.format(self.max_time))
out = None
if len(self.error) > 0:
return out
if node is None:
return out
if isinstance(node, str):
node = self.parse(node)
if lineno is not None:
self.lineno = lineno
if expr is not None:
self.expr = expr
# get handler for this node:
# on_xxx with handle nodes of type 'xxx', etc
try:
handler = self.node_handlers[node.__class__.__name__.lower()]
except KeyError:
return self.unimplemented(node)
# run the handler: this will likely generate
# recursive calls into this run method.
try:
ret = handler(node)
if isinstance(ret, enumerate):
ret = list(ret)
return ret
except:
if with_raise:
self.raise_exception(node, expr=expr)
def __call__(self, expr, **kw):
"""Call class instance as function."""
return self.eval(expr, **kw)
def eval(self, expr, lineno=0, show_errors=True):
"""Evaluate a single statement."""
self.lineno = lineno
self.error = []
self.start_time = time.time()
try:
node = self.parse(expr)
except:
errmsg = exc_info()[1]
if len(self.error) > 0:
errmsg = "\n".join(self.error[0].get_error())
if not show_errors:
try:
exc = self.error[0].exc
except:
exc = RuntimeError
raise exc(errmsg)
print(errmsg, file=self.err_writer)
return
try:
return self.run(node, expr=expr, lineno=lineno)
except:
errmsg = exc_info()[1]
if len(self.error) > 0:
errmsg = "\n".join(self.error[0].get_error())
if not show_errors:
try:
exc = self.error[0].exc
except:
exc = RuntimeError
raise exc(errmsg)
print(errmsg, file=self.err_writer)
return
@staticmethod
def dump(node, **kw):
"""Simple ast dumper."""
return ast.dump(node, **kw)
# handlers for ast components
def on_expr(self, node):
"""Expression."""
return self.run(node.value) # ('value',)
def on_index(self, node):
"""Index."""
return self.run(node.value) # ('value',)
def on_return(self, node): # ('value',)
"""Return statement: look for None, return special sentinal."""
self.retval = self.run(node.value)
if self.retval is None:
self.retval = ReturnedNone
return
def on_repr(self, node):
"""Repr."""
return repr(self.run(node.value)) # ('value',)
def on_module(self, node): # ():('body',)
"""Module def."""
out = None
for tnode in node.body:
out = self.run(tnode)
return out
def on_expression(self, node):
"basic expression"
return self.on_module(node) # ():('body',)
def on_pass(self, node):
"""Pass statement."""
return None # ()
def on_ellipsis(self, node):
"""Ellipses."""
return Ellipsis
# for break and continue: set the instance variable _interrupt
def on_interrupt(self, node): # ()
"""Interrupt handler."""
self._interrupt = node
return node
def on_break(self, node):
"""Break."""
return self.on_interrupt(node)
def on_continue(self, node):
"""Continue."""
return self.on_interrupt(node)
def on_assert(self, node): # ('test', 'msg')
"""Assert statement."""
if not self.run(node.test):
self.raise_exception(node, exc=AssertionError, msg=node.msg)
return True
def on_list(self, node): # ('elt', 'ctx')
"""List."""
return [self.run(e) for e in node.elts]
def on_tuple(self, node): # ('elts', 'ctx')
"""Tuple."""
return tuple(self.on_list(node))
def on_dict(self, node): # ('keys', 'values')
"""Dictionary."""
return dict([(self.run(k), self.run(v)) for k, v in
zip(node.keys, node.values)])
def on_num(self, node): # ('n',)
"""Return number."""
return node.n
def on_str(self, node): # ('s',)
"""Return string."""
return node.s
def on_nameconstant(self, node): # ('value',)
"""named constant"""
return node.value
def on_name(self, node): # ('id', 'ctx')
"""Name node."""
ctx = node.ctx.__class__
if ctx in (ast.Param, ast.Del):
return str(node.id)
else:
if node.id in self.symtable:
return self.symtable[node.id]
else:
msg = "name '%s' is not defined" % node.id
self.raise_exception(node, exc=NameError, msg=msg)
def on_nameconstant(self, node):
""" True, False, None in python >= 3.4 """
return node.value
def node_assign(self, node, val):
"""Assign a value (not the node.value object) to a node.
This is used by on_assign, but also by for, list comprehension,
etc.
"""
if node.__class__ == ast.Name:
if not valid_symbol_name(node.id) or node.id in self.readonly_symbols:
errmsg = "invalid symbol name (reserved word?) %s" % node.id
self.raise_exception(node, exc=NameError, msg=errmsg)
self.symtable[node.id] = val
if node.id in self.no_deepcopy:
self.no_deepcopy.remove(node.id)
elif node.__class__ == ast.Attribute:
if node.ctx.__class__ == ast.Load:
msg = "cannot assign to attribute %s" % node.attr
self.raise_exception(node, exc=AttributeError, msg=msg)
setattr(self.run(node.value), node.attr, val)
elif node.__class__ == ast.Subscript:
sym = self.run(node.value)
xslice = self.run(node.slice)
if isinstance(node.slice, ast.Index):
sym[xslice] = val
elif isinstance(node.slice, ast.Slice):
sym[slice(xslice.start, xslice.stop)] = val
elif isinstance(node.slice, ast.ExtSlice):
sym[xslice] = val
elif node.__class__ in (ast.Tuple, ast.List):
if len(val) == len(node.elts):
for telem, tval in zip(node.elts, val):
self.node_assign(telem, tval)
else:
raise ValueError('too many values to unpack')
def on_attribute(self, node): # ('value', 'attr', 'ctx')
"""Extract attribute."""
ctx = node.ctx.__class__
if ctx == ast.Store:
msg = "attribute for storage: shouldn't be here!"
self.raise_exception(node, exc=RuntimeError, msg=msg)
sym = self.run(node.value)
if ctx == ast.Del:
return delattr(sym, node.attr)
# ctx is ast.Load
fmt = "cannnot access attribute '%s' for %s"
if node.attr not in UNSAFE_ATTRS:
fmt = "no attribute '%s' for %s"
try:
return getattr(sym, node.attr)
except AttributeError:
pass
# AttributeError or accessed unsafe attribute
obj = self.run(node.value)
msg = fmt % (node.attr, obj)
self.raise_exception(node, exc=AttributeError, msg=msg)
def on_assign(self, node): # ('targets', 'value')
"""Simple assignment."""
val = self.run(node.value)
for tnode in node.targets:
self.node_assign(tnode, val)
return
def on_augassign(self, node): # ('target', 'op', 'value')
"""Augmented assign."""
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left=node.target,
op=node.op,
right=node.value)))
def on_slice(self, node): # ():('lower', 'upper', 'step')
"""Simple slice."""
return slice(self.run(node.lower),
self.run(node.upper),
self.run(node.step))
def on_extslice(self, node): # ():('dims',)
"""Extended slice."""
return tuple([self.run(tnode) for tnode in node.dims])
def on_subscript(self, node): # ('value', 'slice', 'ctx')
"""Subscript handling -- one of the tricky parts."""
val = self.run(node.value)
nslice = self.run(node.slice)
ctx = node.ctx.__class__
if ctx in (ast.Load, ast.Store):
if isinstance(node.slice, (ast.Index, ast.Slice, ast.Ellipsis)):
return val.__getitem__(nslice)
elif isinstance(node.slice, ast.ExtSlice):
return val[nslice]
else:
msg = "subscript with unknown context"
self.raise_exception(node, msg=msg)
def on_delete(self, node): # ('targets',)
"""Delete statement."""
for tnode in node.targets:
if tnode.ctx.__class__ != ast.Del:
break
children = []
while tnode.__class__ == ast.Attribute:
children.append(tnode.attr)
tnode = tnode.value
if tnode.__class__ == ast.Name and tnode.id not in self.readonly_symbols:
children.append(tnode.id)
children.reverse()
self.symtable.pop('.'.join(children))
else:
msg = "could not delete symbol"
self.raise_exception(node, msg=msg)
def on_unaryop(self, node): # ('op', 'operand')
"""Unary operator."""
return op2func(node.op)(self.run(node.operand))
def on_binop(self, node): # ('left', 'op', 'right')
"""Binary operator."""
return op2func(node.op)(self.run(node.left),
self.run(node.right))
def on_boolop(self, node): # ('op', 'values')
"""Boolean operator."""
val = self.run(node.values[0])
is_and = ast.And == node.op.__class__
if (is_and and val) or (not is_and and not val):
for n in node.values[1:]:
val = op2func(node.op)(val, self.run(n))
if (is_and and not val) or (not is_and and val):
break
return val
def on_compare(self, node): # ('left', 'ops', 'comparators')
"""comparison operators"""
lval = self.run(node.left)
out = True
for op, rnode in zip(node.ops, node.comparators):
rval = self.run(rnode)
out = op2func(op)(lval, rval)
lval = rval
if self.use_numpy and isinstance(out, numpy.ndarray) and out.any():
break
elif not out:
break
return out
def on_print(self, node): # ('dest', 'values', 'nl')
"""Note: implements Python2 style print statement, not print()
function.
May need improvement....
"""
dest = self.run(node.dest) or self.writer
end = ''
if node.nl:
end = '\n'
out = [self.run(tnode) for tnode in node.values]
if out and len(self.error) == 0:
self._printer(*out, file=dest, end=end)
def _printer(self, *out, **kws):
"""Generic print function."""
flush = kws.pop('flush', True)
fileh = kws.pop('file', self.writer)
sep = kws.pop('sep', ' ')
end = kws.pop('sep', '\n')
print(*out, file=fileh, sep=sep, end=end)
if flush:
fileh.flush()
def on_if(self, node): # ('test', 'body', 'orelse')
"""Regular if-then-else statement."""
block = node.body
if not self.run(node.test):
block = node.orelse
for tnode in block:
self.run(tnode)
def on_ifexp(self, node): # ('test', 'body', 'orelse')
"""If expressions."""
expr = node.orelse
if self.run(node.test):
expr = node.body
return self.run(expr)
def on_while(self, node): # ('test', 'body', 'orelse')
"""While blocks."""
while self.run(node.test):
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None
def on_for(self, node): # ('target', 'iter', 'body', 'orelse')
"""For blocks."""
for val in self.run(node.iter):
self.node_assign(node.target, val)
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None
def on_listcomp(self, node): # ('elt', 'generators')
"""List comprehension."""
out = []
for tnode in node.generators:
if tnode.__class__ == ast.comprehension:
for val in self.run(tnode.iter):
self.node_assign(tnode.target, val)
add = True
for cond in tnode.ifs:
add = add and self.run(cond)
if add:
out.append(self.run(node.elt))
return out
def on_excepthandler(self, node): # ('type', 'name', 'body')
"""Exception handler..."""
return (self.run(node.type), node.name, node.body)
def on_try(self, node): # ('body', 'handlers', 'orelse', 'finalbody')
"""Try/except/else/finally blocks."""
no_errors = True
for tnode in node.body:
self.run(tnode, with_raise=False)
no_errors = no_errors and len(self.error) == 0
if len(self.error) > 0:
e_type, e_value, e_tback = self.error[-1].exc_info
for hnd in node.handlers:
htype = None
if hnd.type is not None:
htype = builtins.get(hnd.type.id, None)
if htype is None or isinstance(e_type(), htype):
self.error = []
if hnd.name is not None:
self.node_assign(hnd.name, e_value)
for tline in hnd.body:
self.run(tline)
break
break
if no_errors and hasattr(node, 'orelse'):
for tnode in node.orelse:
self.run(tnode)
if hasattr(node, 'finalbody'):
for tnode in node.finalbody:
self.run(tnode)
def on_raise(self, node): # ('type', 'inst', 'tback')
"""Raise statement: note difference for python 2 and 3."""
if version_info[0] == 3:
excnode = node.exc
msgnode = node.cause
else:
excnode = node.type
msgnode = node.inst
out = self.run(excnode)
msg = ' '.join(out.args)
msg2 = self.run(msgnode)
if msg2 not in (None, 'None'):
msg = "%s: %s" % (msg, msg2)
self.raise_exception(None, exc=out.__class__, msg=msg, expr='')
def on_call(self, node):
"""Function execution."""
# ('func', 'args', 'keywords'. Py<3.5 has 'starargs' and 'kwargs' too)
func = self.run(node.func)
if not hasattr(func, '__call__') and not isinstance(func, type):
msg = "'%s' is not callable!!" % (func)
self.raise_exception(node, exc=TypeError, msg=msg)
args = [self.run(targ) for targ in node.args]
starargs = getattr(node, 'starargs', None)
if starargs is not None:
args = args + self.run(starargs)
keywords = {}
if six.PY3 and func == print:
keywords['file'] = self.writer
for key in node.keywords:
if not isinstance(key, ast.keyword):
msg = "keyword error in function call '%s'" % (func)
self.raise_exception(node, msg=msg)
keywords[key.arg] = self.run(key.value)
kwargs = getattr(node, 'kwargs', None)
if kwargs is not None:
keywords.update(self.run(kwargs))
try:
return func(*args, **keywords)
except Exception as ex:
self.raise_exception(
node, msg="Error running function call '%s' with args %s and "
"kwargs %s: %s" % (func.__name__, args, keywords, ex))
def on_arg(self, node): # ('test', 'msg')
"""Arg for function definitions."""
return node.arg
def on_functiondef(self, node):
"""Define procedures."""
# ('name', 'args', 'body', 'decorator_list')
if node.decorator_list:
raise Warning("decorated procedures not supported!")
kwargs = []
if not valid_symbol_name(node.name) or node.name in self.readonly_symbols:
errmsg = "invalid function name (reserved word?) %s" % node.name
self.raise_exception(node, exc=NameError, msg=errmsg)
offset = len(node.args.args) - len(node.args.defaults)
for idef, defnode in enumerate(node.args.defaults):
defval = self.run(defnode)
keyval = self.run(node.args.args[idef+offset])
kwargs.append((keyval, defval))
if version_info[0] == 3:
args = [tnode.arg for tnode in node.args.args[:offset]]
else:
args = [tnode.id for tnode in node.args.args[:offset]]
doc = None
nb0 = node.body[0]
if isinstance(nb0, ast.Expr) and isinstance(nb0.value, ast.Str):
doc = nb0.value.s
varkws = node.args.kwarg
vararg = node.args.vararg
if version_info[0] == 3:
if isinstance(vararg, ast.arg):
vararg = vararg.arg
if isinstance(varkws, ast.arg):
varkws = varkws.arg
self.symtable[node.name] = Procedure(node.name, self, doc=doc,
lineno=self.lineno,
body=node.body,
args=args, kwargs=kwargs,
vararg=vararg, varkws=varkws)
if node.name in self.no_deepcopy:
self.no_deepcopy.remove(node.name)
class Procedure(object):
"""Procedure: user-defined function for asteval.
This stores the parsed ast nodes as from the 'functiondef' ast node
for later evaluation.
"""
def __init__(self, name, interp, doc=None, lineno=0,
body=None, args=None, kwargs=None,
vararg=None, varkws=None):
"""TODO: docstring in public method."""
self.__ininit__ = True
self.name = name
self.__name__ = self.name
self.__asteval__ = interp
self.raise_exc = self.__asteval__.raise_exception
self.__doc__ = doc
self.body = body
self.argnames = args
self.kwargs = kwargs
self.vararg = vararg
self.varkws = varkws
self.lineno = lineno
self.__ininit__ = False
def __setattr__(self, attr, val):
if not getattr(self, '__ininit__', True):
self.raise_exc(None, exc=TypeError,
msg="procedure is read-only")
self.__dict__[attr] = val
def __dir__(self):
return ['name']
def __repr__(self):
"""TODO: docstring in magic method."""
sig = ""
if len(self.argnames) > 0:
sig = "%s%s" % (sig, ', '.join(self.argnames))
if self.vararg is not None:
sig = "%s, *%s" % (sig, self.vararg)
if len(self.kwargs) > 0:
if len(sig) > 0:
sig = "%s, " % sig
_kw = ["%s=%s" % (k, v) for k, v in self.kwargs]
sig = "%s%s" % (sig, ', '.join(_kw))
if self.varkws is not None:
sig = "%s, **%s" % (sig, self.varkws)
sig = "<Procedure %s(%s)>" % (self.name, sig)
if self.__doc__ is not None:
sig = "%s\n %s" % (sig, self.__doc__)
return sig
def __call__(self, *args, **kwargs):
"""TODO: docstring in public method."""
symlocals = {}
args = list(args)
nargs = len(args)
nkws = len(kwargs)
nargs_expected = len(self.argnames)
# check for too few arguments, but the correct keyword given
if (nargs < nargs_expected) and nkws > 0:
for name in self.argnames[nargs:]:
if name in kwargs:
args.append(kwargs.pop(name))
nargs = len(args)
nargs_expected = len(self.argnames)
nkws = len(kwargs)
if nargs < nargs_expected:
msg = "%s() takes at least %i arguments, got %i"
self.raise_exc(None, exc=TypeError,
msg=msg % (self.name, nargs_expected, nargs))
# check for multiple values for named argument
if len(self.argnames) > 0 and kwargs is not None:
msg = "multiple values for keyword argument '%s' in Procedure %s"
for targ in self.argnames:
if targ in kwargs:
self.raise_exc(None, exc=TypeError,
msg=msg % (targ, self.name),
lineno=self.lineno)
# check more args given than expected, varargs not given
if nargs != nargs_expected:
msg = None
if nargs < nargs_expected:
msg = 'not enough arguments for Procedure %s()' % self.name
msg = '%s (expected %i, got %i)' % (msg, nargs_expected, nargs)
self.raise_exc(None, exc=TypeError, msg=msg)
if nargs > nargs_expected and self.vararg is None:
if nargs - nargs_expected > len(self.kwargs):
msg = 'too many arguments for %s() expected at most %i, got %i'
msg = msg % (self.name, len(self.kwargs)+nargs_expected, nargs)
self.raise_exc(None, exc=TypeError, msg=msg)
for i, xarg in enumerate(args[nargs_expected:]):
kw_name = self.kwargs[i][0]
if kw_name not in kwargs:
kwargs[kw_name] = xarg
for argname in self.argnames:
symlocals[argname] = args.pop(0)
try:
if self.vararg is not None:
symlocals[self.vararg] = tuple(args)
for key, val in self.kwargs:
if key in kwargs:
val = kwargs.pop(key)
symlocals[key] = val
if self.varkws is not None:
symlocals[self.varkws] = kwargs
elif len(kwargs) > 0:
msg = 'extra keyword arguments for Procedure %s (%s)'
msg = msg % (self.name, ','.join(list(kwargs.keys())))
self.raise_exc(None, msg=msg, exc=TypeError,
lineno=self.lineno)
except (ValueError, LookupError, TypeError,
NameError, AttributeError):
msg = 'incorrect arguments for Procedure %s' % self.name
self.raise_exc(None, msg=msg, lineno=self.lineno)
save_symtable = self.__asteval__.symtable.copy()
self.__asteval__.symtable.update(symlocals)
self.__asteval__.retval = None
retval = None
# evaluate script of function
for node in self.body:
self.__asteval__.run(node, expr='<>', lineno=self.lineno)
if len(self.__asteval__.error) > 0:
break
if self.__asteval__.retval is not None:
retval = self.__asteval__.retval
self.__asteval__.retval = None
if retval is ReturnedNone:
retval = None
break
self.__asteval__.symtable = save_symtable
symlocals = None
return retval
|
the-stack_106_22651 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from bottles_of_bear import BottlesOfBear
class HQ9Plus():
def __init__(self, tokens):
self.tokens = tf.constant(tokens)
self.cnt = tf.Variable(0, trainable=False)
with open(__file__) as fin:
self.src = fin.read()
self.graph = tf.while_loop(self.cond, self.body, [0, self.tokens, ''],
back_prop=False)
def run(self):
with tf.Session() as sess:
tf.global_variables_initializer().run()
return sess.run(self.graph)
def cond(self, i, x, _):
return tf.less(i, tf.size(self.tokens))
def body(self, i, _, output):
def inc():
return tf.cond(tf.equal(tf.assign(self.cnt, self.cnt +1), 0),
lambda: (''),
lambda: (''))
r = tf.cond(tf.equal(self.tokens[i], 'H'),
lambda: ('Hello, world!\n'),
lambda: tf.cond(tf.equal(self.tokens[i], 'Q'),
lambda: (self.src),
lambda: tf.cond(tf.equal(self.tokens[i], '9'),
lambda: (BottlesOfBear().graph[1]),
lambda: tf.cond(tf.equal(self.tokens[i], '+'),
lambda: inc(),
lambda: ('')))))
return tf.add(i, 1), self.tokens, output + r
if __name__ == '__main__':
tokens = list('HQ9+')
_, _, output = HQ9Plus(tokens).run()
print(output)
|
the-stack_106_22652 | from torch import nn
from torch_rl.utils import gauss_weights_init
from torch_rl.core import *
import os
import glob
class SaveableModel(object):
def save(self, path):
tor.save(self, path)
@classmethod
def load(cls, path):
return tor.load(path)
@classmethod
def load_best(cls, path):
assert os.path.isdir(path)
best_models = glob.glob(os.path.join(path, "*best*"))
assert not len(best_models > 1)
return tor.load(os.path.join(path, best_models[0]))
class NeuralNet(nn.Module, SaveableModel):
def __init__(self):
super(NeuralNet, self).__init__()
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def action(self, x):
pass
class StochasticNeuralNet(NeuralNet):
def __init__(self):
super(StochasticNeuralNet, self).__init__()
def sample_action(self, action_distribution=None):
if not action_distribution:
action_distribution = self.out
action_distribution = action_distribution.cpu().data.numpy()
action = np.random.choice(action_distribution.squeeze(), p=action_distribution.squeeze())
action = np.argmax(action_distribution == action)
return action
class StochasticContinuousNeuralNet(NeuralNet):
def __init__(self):
super(StochasticContinuousNeuralNet, self).__init__()
def sigma(self):
pass
def mu(self):
pass
class SimpleNetwork(NeuralNet):
def __init__(self, architecture, weight_init=gauss_weights_init(0,0.02),activation_functions=None):
super(SimpleNetwork, self).__init__()
if len(architecture) < 2:
raise Exception("Architecture needs at least two numbers to create network")
self.activation_functions = activation_functions
self.layer_list = []
for i in range(len(architecture)-1):
self.layer_list.append(nn.Linear(architecture[i], architecture[i+1]))
setattr(self, "fc" + str(i), self.layer_list[-1])
self.apply(weight_init)
def forward(self, x):
if self.activation_functions:
for i, func in enumerate(self.activation_functions):
x = func(self.layer_list[i](x))
else:
for i, layer in enumerate(self.layer_list):
x = self.relu(layer(x))
i+=1
while i < len(self.layer_list):
x = self.layer_list[i](x)
i+=1
self.out = x
return x
class QNetwork(NeuralNet):
"""
Just adds a call method for simpler state and action passing.
"""
def __init__(self, architecture, weight_init=gauss_weights_init(0,0.02),
activation_functions=None):
super(NeuralNet, self).__init__()
self.activation_functions = activation_functions
self.layer_list = []
for i in range(len(architecture)-1):
self.layer_list.append(nn.Linear(architecture[i], architecture[i+1]))
setattr(self, "fc" + str(i), self.layer_list[-1])
#self.last_linear = nn.Linear(architecture[-1], 1)
self.apply(weight_init)
def forward(self, x):
if self.activation_functions:
for i, func in enumerate(self.activation_functions):
x = func(self.layer_list[i](x))
else:
for i, layer in enumerate(self.layer_list):
x = self.relu(layer(x))
i+=1
while i < len(self.layer_list):
x = self.layer_list[i](x)
i+=1
# x = self.last_linear(x)
return x
def __call__(self, s, a):
x = tor.cat((s,a), 1)
return self.forward(x)
class PolicyAHG(StochasticNeuralNet):
def __init__(self, input_size, output_size):
super(PolicyAHG, self).__init__()
self.f1 = nn.Linear(input_size,32)
self.f2 = nn.Linear(32, output_size)
def forward(self, x):
out = self.f1(x)
out = self.tanh(out)
out = self.f2(out)
out = self.softmax(out)
self.out = out
return out
class PolicySPG(StochasticNeuralNet):
def __init__(self, input_size, output_size):
super(PolicySPG, self).__init__()
self.f1 = nn.Linear(input_size,64)
self.f2 = nn.Linear(64, output_size)
def forward(self, x):
out = self.f1(x)
out = self.relu(out)
out = self.f2(out)
out = self.softmax(out)
self.out = out
return out
|
the-stack_106_22655 | """
Environment
(1) set the attribute of Node, App, BasicThroughput,
Currently we fix the No. of App with 9, homogeneous cluster.
The No. of Nodes could be set, No. of Containers in the batch is not required to know
(2) Functions:
1. _state_reset: clear state matrix
2. step: allocate one container to a node, return new state
3. get_tput_total_env: get the throughput of the entire cluster (after each episode)
"""
import numpy as np
from testbedlib.SubScheduler_pppo import NineNodeAPI
from testbedlib.simulator.simulator import Simulator
app_node_set = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26],
[2, 3, 5, 6, 7, 11, 12, 18, 20, 22, 23, 24, 25, 26],
[0, 2, 8, 9, 19, 23, 24, 25, 26],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]])
class LraClusterEnv():
def __init__(self, num_nodes):
#: Cluster configuration
self.NUM_NODES = num_nodes # node_id: 0,1,2,...
#: fixed 9 apps
self.NUM_APPS = 7
#: initialized state to zero matrix
self._state_reset()
# clustering
self.baisc_oath_name = 'unified_27_'
path_surffix = "./checkpoint/"
self.nine_node_api_0 = NineNodeAPI(path_name=self.baisc_oath_name + '0', surffix='0', path_surffix=path_surffix)
self.nine_node_api_1 = NineNodeAPI(path_name=self.baisc_oath_name + '10', surffix='10', path_surffix=path_surffix)
self.nine_node_api_2 = NineNodeAPI(path_name=self.baisc_oath_name + '20', surffix='20', path_surffix=path_surffix)
self.nine_node_api_3 = NineNodeAPI(path_name=self.baisc_oath_name + '30', surffix='30', path_surffix=path_surffix)
self.nine_node_api_4 = NineNodeAPI(path_name=self.baisc_oath_name + '40', surffix='40', path_surffix=path_surffix)
self.nine_node_api_5 = NineNodeAPI(path_name=self.baisc_oath_name + '50', surffix='50', path_surffix=path_surffix)
self.nine_node_api_6 = NineNodeAPI(path_name=self.baisc_oath_name + '60', surffix='60', path_surffix=path_surffix)
self.nine_node_api_7 = NineNodeAPI(path_name=self.baisc_oath_name + '70', surffix='70', path_surffix=path_surffix)
self.nine_node_api_8 = NineNodeAPI(path_name=self.baisc_oath_name + '80', surffix='80', path_surffix=path_surffix)
self.nine_node_api_9 = NineNodeAPI(path_name=self.baisc_oath_name + '90', surffix='90', path_surffix=path_surffix)
self.nine_node_api_10 = NineNodeAPI(path_name=self.baisc_oath_name + '100', surffix='100', path_surffix=path_surffix)
self.nine_node_api_11 = NineNodeAPI(path_name=self.baisc_oath_name + '100', surffix='100', path_surffix=path_surffix)
# self.nine_node_api_12 = NineNodeAPI(path_name=self.baisc_oath_name + '120', surffix='120', path_surffix=path_surffix)
# self.nine_node_api_13 = NineNodeAPI(path_name=self.baisc_oath_name + '130', surffix='130', path_surffix=path_surffix)
# self.nine_node_api_14 = NineNodeAPI(path_name=self.baisc_oath_name + '140', surffix='140', path_surffix=path_surffix)
# self.nine_node_api_15 = NineNodeAPI(path_name=self.baisc_oath_name + '150', surffix='150', path_surffix=path_surffix)
# self.nine_node_api_16 = NineNodeAPI(path_name=self.baisc_oath_name + '160', surffix='160', path_surffix=path_surffix)
# self.nine_node_api_17 = NineNodeAPI(path_name=self.baisc_oath_name + '170', surffix='170', path_surffix=path_surffix)
# self.nine_node_api_18 = NineNodeAPI(path_name=self.baisc_oath_name + '180', surffix='180', path_surffix=path_surffix)
# self.nine_node_api_19 = NineNodeAPI(path_name=self.baisc_oath_name + '190', surffix='190', path_surffix=path_surffix)
# self.nine_node_api_20 = NineNodeAPI(path_name=self.baisc_oath_name + '200', surffix='200', path_surffix=path_surffix)
self.sim = Simulator()
def _state_reset(self):
self.state = np.zeros([self.NUM_NODES, self.NUM_APPS])
def reset(self):
self._state_reset()
return self._get_state()
def step(self, action, appid):
"""
:param action: node chosen
:param appid: current app_id of the container to be allocated
:return: new state after allocation
"""
curr_app = appid
self.state[action][curr_app] += 1 # locate
state = self._get_state()
return state
def _get_state(self):
return self.state
@property
def _get_throughput(self):
state_all = np.empty([0, self.NUM_APPS])
for nid in range(self.NUM_NODES):
container_list = self.state[nid]
num_container = sum(container_list)
predictor_class = int((num_container-1)/10)
if predictor_class > 11:
predictor_class = 11
assert (predictor_class >= 0) & (predictor_class <= 11)
if predictor_class == 0:
state_this = self.nine_node_api_0.get_total_tput(container_list)
elif predictor_class == 1:
state_this = self.nine_node_api_1.get_total_tput(container_list)
elif predictor_class == 2:
state_this = self.nine_node_api_2.get_total_tput(container_list)
elif predictor_class == 3:
state_this = self.nine_node_api_3.get_total_tput(container_list)
elif predictor_class == 4:
state_this = self.nine_node_api_4.get_total_tput(container_list)
elif predictor_class == 5:
state_this = self.nine_node_api_5.get_total_tput(container_list)
elif predictor_class == 6:
state_this = self.nine_node_api_6.get_total_tput(container_list)
elif predictor_class == 7:
state_this = self.nine_node_api_7.get_total_tput(container_list)
elif predictor_class == 8:
state_this = self.nine_node_api_8.get_total_tput(container_list)
elif predictor_class == 9:
state_this = self.nine_node_api_9.get_total_tput(container_list)
elif predictor_class == 10:
state_this = self.nine_node_api_10.get_total_tput(container_list)
elif predictor_class == 11:
state_this = self.nine_node_api_11.get_total_tput(container_list)
state_all = np.append(state_all, state_this, 0)
if self. getTput:
total_tput = (self.sim.predict(state_all.reshape(-1, self.NUM_APPS)) * state_all).sum()
else:
total_tput = 0
state = state_all
# list_check_per_app = (state > 1).sum() # + max((env.state - 1).max(), 0)
# list_check_sum = sum(state.sum(1) > 8) # + max(max(env.state.sum(1) - params['container_limitation per node']), 0)
# list_check_coex = sum((state[:, 1] > 0) * (state[:, 2] > 0))
# list_check = list_check_sum + list_check_coex + list_check_per_app
list_check = 0
# for node in range(self.NUM_NODES * 27):
# for app in range(self.NUM_APPS):
# if state[node, :].sum() > 8 or state[node, app] > 1 or (app == 1 and state[node, 2] > 0) or (app == 2 and state[node, 1] > 0):
# list_check += state[node, app]
# container limitation & deployment spread
for node in range(self.NUM_NODES * 27):
for app in range(self.NUM_APPS):
if state[node, :].sum() > 8: # or env.state[node, app] > 1:
list_check += state[node, app]
# hardware affinity & increamental deployment
for app in range(7):
node_now = np.where(state[:, app] > 0)[0]
for node_ in node_now:
if node_%27 not in app_node_set[app]:
list_check += state[node_, app]
return total_tput, 0, 0, 0, list_check
def get_tput_total_env(self, getTput=True):
self. getTput = getTput
return self._get_throughput |
the-stack_106_22656 | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:[email protected]>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WildCardSearchRequestDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'query': 'str',
'source_lang': 'str',
'target_langs': 'list[str]',
'count': 'int',
'offset': 'int',
'source_langs': 'list[str]'
}
attribute_map = {
'query': 'query',
'source_lang': 'sourceLang',
'target_langs': 'targetLangs',
'count': 'count',
'offset': 'offset',
'source_langs': 'sourceLangs'
}
def __init__(self, query=None, source_lang=None, target_langs=None, count=None, offset=None, source_langs=None): # noqa: E501
"""WildCardSearchRequestDto - a model defined in Swagger""" # noqa: E501
self._query = None
self._source_lang = None
self._target_langs = None
self._count = None
self._offset = None
self._source_langs = None
self.discriminator = None
self.query = query
self.source_lang = source_lang
if target_langs is not None:
self.target_langs = target_langs
if count is not None:
self.count = count
if offset is not None:
self.offset = offset
if source_langs is not None:
self.source_langs = source_langs
@property
def query(self):
"""Gets the query of this WildCardSearchRequestDto. # noqa: E501
:return: The query of this WildCardSearchRequestDto. # noqa: E501
:rtype: str
"""
return self._query
@query.setter
def query(self, query):
"""Sets the query of this WildCardSearchRequestDto.
:param query: The query of this WildCardSearchRequestDto. # noqa: E501
:type: str
"""
if query is None:
raise ValueError("Invalid value for `query`, must not be `None`") # noqa: E501
self._query = query
@property
def source_lang(self):
"""Gets the source_lang of this WildCardSearchRequestDto. # noqa: E501
:return: The source_lang of this WildCardSearchRequestDto. # noqa: E501
:rtype: str
"""
return self._source_lang
@source_lang.setter
def source_lang(self, source_lang):
"""Sets the source_lang of this WildCardSearchRequestDto.
:param source_lang: The source_lang of this WildCardSearchRequestDto. # noqa: E501
:type: str
"""
if source_lang is None:
raise ValueError("Invalid value for `source_lang`, must not be `None`") # noqa: E501
self._source_lang = source_lang
@property
def target_langs(self):
"""Gets the target_langs of this WildCardSearchRequestDto. # noqa: E501
:return: The target_langs of this WildCardSearchRequestDto. # noqa: E501
:rtype: list[str]
"""
return self._target_langs
@target_langs.setter
def target_langs(self, target_langs):
"""Sets the target_langs of this WildCardSearchRequestDto.
:param target_langs: The target_langs of this WildCardSearchRequestDto. # noqa: E501
:type: list[str]
"""
self._target_langs = target_langs
@property
def count(self):
"""Gets the count of this WildCardSearchRequestDto. # noqa: E501
:return: The count of this WildCardSearchRequestDto. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this WildCardSearchRequestDto.
:param count: The count of this WildCardSearchRequestDto. # noqa: E501
:type: int
"""
if count is not None and count > 50: # noqa: E501
raise ValueError("Invalid value for `count`, must be a value less than or equal to `50`") # noqa: E501
if count is not None and count < 1: # noqa: E501
raise ValueError("Invalid value for `count`, must be a value greater than or equal to `1`") # noqa: E501
self._count = count
@property
def offset(self):
"""Gets the offset of this WildCardSearchRequestDto. # noqa: E501
:return: The offset of this WildCardSearchRequestDto. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this WildCardSearchRequestDto.
:param offset: The offset of this WildCardSearchRequestDto. # noqa: E501
:type: int
"""
self._offset = offset
@property
def source_langs(self):
"""Gets the source_langs of this WildCardSearchRequestDto. # noqa: E501
:return: The source_langs of this WildCardSearchRequestDto. # noqa: E501
:rtype: list[str]
"""
return self._source_langs
@source_langs.setter
def source_langs(self, source_langs):
"""Sets the source_langs of this WildCardSearchRequestDto.
:param source_langs: The source_langs of this WildCardSearchRequestDto. # noqa: E501
:type: list[str]
"""
self._source_langs = source_langs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WildCardSearchRequestDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WildCardSearchRequestDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_22657 | import tempfile
import logging
import json
import os
import unittest.mock
from unittest.mock import patch
from smac.utils.io.traj_logging import TrajLogger
from smac.utils.io.traj_logging import TrajEntry
from smac.configspace import ConfigurationSpace,\
Configuration, CategoricalHyperparameter, Constant, UniformFloatHyperparameter, UniformIntegerHyperparameter
from smac.scenario.scenario import Scenario
from smac.stats.stats import Stats
__copyright__ = "Copyright 2021, AutoML.org Freiburg-Hannover"
__license__ = "3-clause BSD"
class TrajLoggerTest(unittest.TestCase):
def mocked_get_used_wallclock_time(self):
self.value += 1
return self.value
def setUp(self):
logging.basicConfig()
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self.value = 0
self.cs = ConfigurationSpace()
self.cs.add_hyperparameters([
UniformFloatHyperparameter('param_a', -0.2, 1.77, 1.1),
UniformIntegerHyperparameter('param_b', -3, 10, 1),
Constant('param_c', 'value'),
CategoricalHyperparameter('ambigous_categorical', choices=['True', True, 5]), # True is ambigous here
])
self.test_config = Configuration(self.cs, {'param_a': 0.5,
'param_b': 1,
'param_c': 'value',
'ambigous_categorical': 5})
def test_init(self):
scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs, 'output_dir': ''})
stats = Stats(scen)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'tmp_test_folder')
TrajLogger(output_dir=path, stats=stats)
self.assertTrue(os.path.exists(path))
def test_oserror(self):
scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs, 'output_dir': ''})
stats = Stats(scen)
# test OSError
with patch('os.makedirs') as osMock:
osMock.side_effect = OSError()
self.assertRaises(OSError, TrajLogger, output_dir='random_directory', stats=stats)
@patch('smac.stats.stats.Stats')
def test_add_entries(self, mock_stats):
# Mock stats
mock_stats.ta_time_used = .5
mock_stats.get_used_wallclock_time = self.mocked_get_used_wallclock_time
mock_stats.finished_ta_runs = 1
with tempfile.TemporaryDirectory() as tmpdir:
tl = TrajLogger(output_dir=tmpdir, stats=mock_stats)
# Add some entries
tl.add_entry(0.9, 1, self.test_config, 0)
mock_stats.ta_runs = 2
mock_stats.ta_time_used = 0
tl.add_entry(1.3, 1, self.test_config, 10)
mock_stats.ta_time_used = 0
tl.add_entry(0.7, 2, Configuration(self.cs, dict(self.test_config.get_dictionary(), **{'param_a': 0.})), 10)
# Test the list that's added to the trajectory class
self.assertEqual(tl.trajectory[0], TrajEntry(0.9, 1, self.test_config, 1, 0.5, 1, 0))
# Test named-tuple-access:
self.assertEqual(tl.trajectory[0].train_perf, 0.9)
self.assertEqual(tl.trajectory[0].incumbent_id, 1)
self.assertEqual(tl.trajectory[0].ta_runs, 1)
self.assertEqual(tl.trajectory[0].ta_time_used, 0.5)
self.assertEqual(tl.trajectory[0].wallclock_time, 1)
self.assertEqual(tl.trajectory[0].budget, 0)
self.assertEqual(len(tl.trajectory), 3)
# Check if the trajectories are generated
for fn in ['traj_old.csv', 'traj_aclib2.json', 'traj.json']:
self.assertTrue(os.path.exists(os.path.join(tmpdir, fn)))
# Load trajectories
with open(os.path.join(tmpdir, 'traj_old.csv')) as to:
data = to.read().split('\n')
with open(os.path.join(tmpdir, 'traj_aclib2.json')) as js_aclib:
json_dicts_aclib2 = [json.loads(line) for line in js_aclib.read().splitlines()]
with open(os.path.join(tmpdir, 'traj.json')) as js:
json_dicts_alljson = [json.loads(line) for line in js.read().splitlines()]
# Check old format
header = data[0].split(',')
self.assertEqual(header[0], '"CPU Time Used"')
self.assertEqual(header[-1], '"Configuration..."')
data = list(map(lambda x: x.split(', '), data[1:]))
frmt_str = '%1.6f'
self.assertEqual(frmt_str % 0.5, data[0][0])
self.assertEqual(frmt_str % 0.9, data[0][1])
self.assertEqual(frmt_str % 0.5, data[0][4])
self.assertEqual(frmt_str % 0, data[1][0])
self.assertEqual(frmt_str % 1.3, data[1][1])
self.assertEqual(frmt_str % 2, data[1][4])
self.assertEqual(frmt_str % 0, data[2][0])
self.assertEqual(frmt_str % .7, data[2][1])
self.assertEqual(frmt_str % 3, data[2][4])
# Check aclib2-format
self.assertEqual(json_dicts_aclib2[0]['cpu_time'], .5)
self.assertEqual(json_dicts_aclib2[0]['cost'], 0.9)
self.assertEqual(len(json_dicts_aclib2[0]['incumbent']), 4)
self.assertTrue("param_a='0.5'" in json_dicts_aclib2[0]['incumbent'])
self.assertTrue("param_a='0.0'" in json_dicts_aclib2[2]['incumbent'])
# Check alljson-format
self.assertEqual(json_dicts_alljson[0]['cpu_time'], .5)
self.assertEqual(json_dicts_alljson[0]['cost'], 0.9)
self.assertEqual(len(json_dicts_alljson[0]['incumbent']), 4)
self.assertTrue(json_dicts_alljson[0]["incumbent"]["param_a"] == 0.5)
self.assertTrue(json_dicts_alljson[2]["incumbent"]["param_a"] == 0.0)
self.assertEqual(json_dicts_alljson[0]['budget'], 0)
self.assertEqual(json_dicts_alljson[2]['budget'], 10)
@patch('smac.stats.stats.Stats')
def test_add_entries_multi_objectives(self, mock_stats):
# Mock stats
mock_stats.ta_time_used = .5
mock_stats.get_used_wallclock_time = self.mocked_get_used_wallclock_time
mock_stats.finished_ta_runs = 1
num_obj = 2
with tempfile.TemporaryDirectory() as tmpdir:
tl = TrajLogger(output_dir=tmpdir, stats=mock_stats)
# Add some entries
tl.add_entry([0.9, 0.8], 1, self.test_config, 0)
# Test the list that's added to the trajectory class
self.assertEqual(tl.trajectory[0], TrajEntry([0.9, 0.8], 1, self.test_config, 1, 0.5, 1, 0))
# Test named-tuple-access:
self.assertEqual(tl.trajectory[0].train_perf, [0.9, 0.8])
self.assertEqual(len(tl.trajectory), 1)
# Check if the trajectories are generated
for fn in ['traj_old.csv', 'traj_aclib2.json', 'traj.json']:
self.assertTrue(os.path.exists(os.path.join(tmpdir, fn)))
# Load trajectories
with open(os.path.join(tmpdir, 'traj_old.csv')) as to:
data = to.read().split('\n')
with open(os.path.join(tmpdir, 'traj_aclib2.json')) as js_aclib:
json_dicts_aclib2 = [json.loads(line) for line in js_aclib.read().splitlines()]
with open(os.path.join(tmpdir, 'traj.json')) as js:
json_dicts_alljson = [json.loads(line) for line in js.read().splitlines()]
# Check old format
header = data[0].split(',')
self.assertEqual(header[0], '"CPU Time Used"')
self.assertEqual(header[-1], '"Configuration..."')
data = list(map(lambda x: x.split(', '), data[1:]))
data[0][1] = ', '.join(data[0][1: 1 + num_obj])
del data[0][1 + 1: 1 + num_obj]
frmt_str = '%1.6f'
self.assertEqual(frmt_str % 0.5, data[0][0])
self.assertEqual(f'[{0.9}, {0.8}]', data[0][1])
self.assertEqual(frmt_str % 0.5, data[0][4])
# Check aclib2-format
self.assertEqual(json_dicts_aclib2[0]['cpu_time'], .5)
self.assertEqual(json_dicts_aclib2[0]['cost'], [0.9, 0.8])
self.assertEqual(len(json_dicts_aclib2[0]['incumbent']), 4)
self.assertTrue("param_a='0.5'" in json_dicts_aclib2[0]['incumbent'])
# Check alljson-format
self.assertEqual(json_dicts_alljson[0]['cpu_time'], .5)
self.assertEqual(json_dicts_alljson[0]['cost'], [0.9, 0.8])
self.assertEqual(len(json_dicts_alljson[0]['incumbent']), 4)
self.assertTrue(json_dicts_alljson[0]["incumbent"]["param_a"] == 0.5)
self.assertEqual(json_dicts_alljson[0]['budget'], 0)
@patch('smac.stats.stats.Stats')
def test_ambigious_categoricals(self, mock_stats):
mock_stats.ta_time_used = 0.5
mock_stats.get_used_wallclock_time = self.mocked_get_used_wallclock_time
mock_stats.finished_ta_runs = 1
with tempfile.TemporaryDirectory() as tmpdir:
tl = TrajLogger(output_dir=tmpdir, stats=mock_stats)
problem_config = Configuration(self.cs, {'param_a': 0.0, 'param_b': 2, 'param_c': 'value',
'ambigous_categorical': True}) # not recoverable without json
tl.add_entry(0.9, 1, problem_config)
from_aclib2 = tl.read_traj_aclib_format(os.path.join(tmpdir, 'traj_aclib2.json'), self.cs)
from_alljson = tl.read_traj_alljson_format(os.path.join(tmpdir, 'traj.json'), self.cs)
# Wrong! but passes:
self.assertIsInstance(from_aclib2[0]['incumbent']['ambigous_categorical'], str)
# Works good for alljson:
self.assertIsInstance(from_alljson[0]['incumbent']['ambigous_categorical'], bool)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_22659 | from setuptools import setup, find_packages, Command
import os
from os import path
import subprocess
import configparser
# Get config parameters
config = configparser.ConfigParser()
config.read('setup.cfg')
pkg_name = config['metadata']['name']
pypi_server = config['netsquid']['pypi-server']
def load_readme_text():
"""Load in README file as a string."""
try:
dir_path = path.abspath(path.dirname(__file__))
with open(path.join(dir_path, 'README.md'), encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
return ""
def load_requirements():
"""Load in requirements.txt as a list of strings."""
try:
dir_path = path.abspath(path.dirname(__file__))
with open(path.join(dir_path, 'requirements.txt'), encoding='utf-8') as f:
install_requires = [line.strip() for line in f.readlines()]
return install_requires
except FileNotFoundError:
return ""
class DeployCommand(Command):
"""Run command for uploading binary wheel files to NetSquid PyPi index.
"""
description = "Deploy binary wheel files to NetSquid PyPi index."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print("Uploading binary snippet {} wheels to {} (requires authentication)"
.format(pkg_name, pypi_server))
if 'NETSQUIDCI_USER' not in os.environ:
print("ERROR: environment variable NETSQUIDCI_USER is not defined.")
return
# Check for wheel files
wheel_files = []
for f in os.listdir("dist/"):
if f.endswith(".whl"):
wheel_files.append("dist/{}".format(f))
# Upload wheel files
if len(wheel_files) > 0:
subprocess.check_output(("/usr/bin/scp", " ".join(wheel_files), "{}@{}:/srv/netsquid/pypi/{}/".format(
os.environ['NETSQUIDCI_USER'], pypi_server, pkg_name)), encoding='utf8')
else:
print("ERROR: no wheel files in dist/ to upload.")
setup(
cmdclass={"deploy": DeployCommand},
long_description=load_readme_text(),
long_description_content_type='text/markdown',
python_requires='>=3.5',
packages=find_packages(exclude=('tests', 'docs', 'examples')), # if offering a package
# py_modules=['pkgname.replace('-', '_')'], # if offering a single module file
install_requires=load_requirements(),
test_suite=pkg_name.replace('-', '_'),
zip_safe=False,
include_package_data=True,
platforms='any',
)
|
the-stack_106_22660 | import random
from pprint import pprint
import copy
def gen_fresh_name(base_name, used_names):
if base_name not in used_names:
new_name = base_name
else:
for i in range(2, 10000):
if f"{base_name}_{i}" in used_names:
continue
new_name = f"{base_name}_{i}"
break
used_names.append(new_name)
return new_name
def gen_name_mapping(old_name_sets, new_names):
# old names is a list of set
# new names is a list of strings
mapping = {}
for old_names, new_name in zip(old_name_sets, new_names):
for old_name in old_names:
mapping[old_name] = new_name
return mapping
class TableOp():
def tbl_op_dispatcher(s_exp):
if s_exp[0] == "table_or_subquery":
if s_exp[1][0] == 'table_name':
tbl = TableReference(s_exp)
if 'as' in s_exp:
tbl.alias = s_exp[3][1].replace("\'", "").replace("\"", "")
elif len(s_exp) > 2 and type(s_exp[2]) == list and s_exp[2][0] == 'table_alias':
tbl.alias = s_exp[2][1].replace("\'", "").replace("\"", "")
else:
tbl = SelectStatement(s_exp[1][0])
if 'as' in s_exp:
tbl.alias = s_exp[3][1].replace("\'", "").replace("\"", "")
elif len(s_exp) > 2 and type(s_exp[2]) == list and s_exp[2][0] == 'table_alias':
tbl.alias = s_exp[2][1].replace("\'", "").replace("\"", "")
return tbl
elif s_exp[0] == "join_clause":
return JoinTable.from_s_exp(s_exp)
class RenamedTable(TableOp):
def __init__(self, name, table, cols):
self.name = name
self.table = table
self.cols = cols # these are invented alias names
def infer_out_schema(self, schema):
# return self.table.infer_out_schema(schema)
return [set([f"{self.name}.{c}", c]) for c in self.cols]
def rename(self, schema, used_table_names):
return self.table.rename(schema, used_table_names)
def rename_ops(self, mapping):
pass
def to_rkt(self, schema):
return "(AS " + self.table.to_rkt(schema) + "\n[\"" + self.name + "\" (list " + " ".join(["\"" + col + "\"" for col in self.cols]) + ")])"
class TableReference(TableOp):
def __init__(self, s_exp):
self.name = s_exp[1][1][1]
if len(s_exp) > 2:
self.alias = s_exp[2][1]
else:
self.alias = None
def infer_out_schema(self, schema):
tbl_schema = schema[self.name]
to_return = []
for col in tbl_schema:
full_name = self.alias + "." + col if self.alias else self.name + "." + col
to_return.append(set([full_name, col]))
return to_return
def rename(self, schema, used_table_names):
tbl_schema = schema[self.name]
tbl_name = self.alias if self.alias else self.name
new_table_name = gen_fresh_name(tbl_name, used_table_names)
# collecting old names
old_names = [set([c, f"{tbl_name}.{c}"]) for c in tbl_schema]
new_names = [f"{new_table_name}.{c}" for c in tbl_schema]
mappings = gen_name_mapping(old_names, new_names)
return RenamedTable(new_table_name, self, list(tbl_schema.keys())), mappings, used_table_names
def rename_ops(self, mapping):
pass
def to_rkt(self, schema):
return "(NAMED " + self.name + ")"
class JoinTable(TableOp):
def __init__(self, left_tbl, right_tbl, join_op, constraint = None):
self.left_tbl = left_tbl
self.right_tbl = right_tbl
self.join_op = join_op
self.constraint = constraint
@classmethod
def from_s_exp(cls, s_exp):
left_tbl = TableOp.tbl_op_dispatcher(s_exp[1])
join_op = s_exp[2][1:]
right_tbl = TableOp.tbl_op_dispatcher(s_exp[3])
if "inner" in join_op:
if type(s_exp[4]) == list:
constraint = PredicateOp.pred_op_dispatcher(s_exp[4])
else:
constraint = None
cross_table = cls(left_tbl, right_tbl, 'cross')
to_return = SelectStatement([], wrap = True, wrap_tbl = cross_table, wrap_constraints = constraint)
return to_return
else:
if type(s_exp[4]) == list:
constraint = PredicateOp.pred_op_dispatcher(s_exp[4])
else:
constraint = None
return cls(left_tbl, right_tbl, join_op, constraint)
def infer_out_schema(self, schema):
left_cols = self.left_tbl.infer_out_schema(schema)
right_cols = self.right_tbl.infer_out_schema(schema)
left_cols_set = set()
right_cols_set = set()
for col_set in left_cols:
left_cols_set = left_cols_set.union(col_set)
for col_set in right_cols:
right_cols_set = right_cols_set.union(col_set)
duplicate_col_names = left_cols_set & right_cols_set
if hasattr(self, "alias") and self.alias:
name = self.alias
else:
name = None
used_names = set()
non_referrables = [set() for i in range(len(left_cols + right_cols))]
for index, colset in enumerate(left_cols + right_cols):
for col_name in colset:
if col_name in used_names:
non_referrables[index].add(col_name)
else:
used_names.add(col_name)
to_return = left_cols + right_cols
if not name:
to_return = [{c for c in nameset if c not in non_refs} for nameset, non_refs in zip(to_return, non_referrables)]
else:
to_return = [set([f'{name}.{c}' for c in nameset if c not in non_refs and "." not in c]
+ [c for c in nameset if c not in non_refs and '.' in c]) for nameset, non_refs in zip(to_return, non_referrables)]
# for index, nameset in enumerate(left_cols):
# for col in nameset:
# if col in duplicate_col_names:
# if name:
# to_return[index].union(set([col, name + "." + col]))
# else:
# to_return[index].union(set([col]))
return to_return
def rename(self, schema, used_table_names):
left_cols_old = self.left_tbl.infer_out_schema(schema)
right_cols_old = self.right_tbl.infer_out_schema(schema)
my_cols_old = self.infer_out_schema(schema)
self.left_tbl, mappings_left, used_table_names = self.left_tbl.rename(schema, used_table_names)
self.right_tbl, mappings_right, used_table_names = self.right_tbl.rename(schema, used_table_names)
left_cols_new = self.left_tbl.infer_out_schema(schema)
right_cols_new = self.right_tbl.infer_out_schema(schema)
self.rename_ops({**mappings_left, **mappings_right})
new_table_name = gen_fresh_name(self.alias if (hasattr(self, "alias") and self.alias) else "t", used_table_names)
mappings = {}
# the list of new names
column_list = []
used_col_names = []
for index, col_set in enumerate(my_cols_old):
old_full_name = [col for col in col_set if "." in col]
old_short_name = [col for col in col_set if "." not in col]
base_name = old_full_name[0].split(".")[-1] if len(old_full_name) > 0 else "c"
new_name = gen_fresh_name(base_name, used_col_names)
for old_name in col_set:
mappings[old_name] = new_table_name + "." + new_name
column_list.append(new_name)
# print("Join table mappings:")
# pprint(mappings)
return RenamedTable(new_table_name, self, column_list), mappings, used_table_names
def rename_ops(self, mapping):
if self.constraint:
self.constraint.rename_ops(mapping)
def to_rkt(self, schema):
if "left" in self.join_op:
op = "LEFT-OUTER-JOIN"
else:
op = "JOIN"
return f"({op} {self.left_tbl.to_rkt(schema)} {self.right_tbl.to_rkt(schema)} {self.constraint.to_rkt(schema) if self.constraint else ''})"
class SelectStatement(TableOp):
def __init__(self, entire_s_exp, wrap = False, wrap_tbl = None, wrap_constraints = None):
if wrap:
self.columns = [AllColumn()]
self.col_names = [None]
self.tables = [wrap_tbl]
self.tbl_names = [None]
self.subquery_tree = wrap_tbl
self.where_tree = wrap_constraints
self.group_col = None
self.having_tree = None
self.order_cols = []
self.ordering_dir = []
else:
self.from_s_exp(entire_s_exp)
def from_s_exp(self, entire_s_exp):
s_exp = entire_s_exp[1]
select_index = s_exp.index('select')
from_index = s_exp.index('from')
self.columns = []
self.col_names = []
for term in s_exp[select_index:from_index]:
if type(term) == list and term[0] == 'result_column':
self.columns.append(ColOp.col_op_dispatcher(term[1]))
if "as" in term:
alias = term[term.index("as") + 1][1]
alias = alias.replace("\'", "").replace("\"", "")
self.col_names.append(alias)
elif len(term) > 2 and type(term[2]) == list and term[2][0] == 'column_alias':
alias = term[2][1]
alias = alias.replace("\'", "").replace("\"", "")
self.col_names.append(alias)
else:
self.col_names.append(None)
self.tables = []
self.tbl_names = []
if "where" in s_exp:
where_index = s_exp.index('where')
for term in s_exp[from_index+1:where_index]:
if type(term) == list:
self.tables.append(TableOp.tbl_op_dispatcher(term))
if hasattr(self.tables[-1], "alias"):
self.tbl_names.append(self.tables[-1].alias)
else:
self.tbl_names.append(None)
else:
for term in s_exp[from_index+1:]:
if type(term) == list:
self.tables.append(TableOp.tbl_op_dispatcher(term))
if hasattr(self.tables[-1], "alias"):
self.tbl_names.append(self.tables[-1].alias)
else:
self.tbl_names.append(None)
if len(self.tables) > 1:
self.tables[0].alias = self.tbl_names[0]
self.tables[1].alias = self.tbl_names[1]
self.subquery_tree = JoinTable(self.tables[0], self.tables[1], 'cross')
if len(self.tables) > 2:
for i, tbl in enumerate(self.tables[2:]):
tbl.alias = self.tbl_names[i+2]
self.subquery_tree = JoinTable(left_tbl = self.subquery_tree, right_tbl = tbl, join_op = 'cross')
else:
self.subquery_tree = self.tables[0]
self.subquery_tree.alias = self.tbl_names[0]
if "where" in s_exp:
self.where_tree = PredicateOp.pred_op_dispatcher(s_exp[where_index+1])
else:
self.where_tree = None
if "group" in s_exp:
groupby_index = list(filter(lambda i: s_exp[i] == 'group' and s_exp[i+1] == 'by', range(len(s_exp)-2)))[0]
if "having" in s_exp:
having_index = s_exp.index('having')
self.group_col = [ColOp.col_op_dispatcher(col) for col in s_exp[groupby_index + 2:having_index]]
else:
self.group_col = [ColOp.col_op_dispatcher(col) for col in s_exp[groupby_index + 2:]]
else:
self.group_col = None
if "having" in s_exp:
having_index = s_exp.index('having')
self.having_tree = PredicateOp.pred_op_dispatcher(s_exp[having_index+1])
else:
self.having_tree = None
self.order_cols = []
self.ordering_dir = []
if 'order' in entire_s_exp:
orderby_index = list(filter(lambda i: entire_s_exp[i] == 'order' and entire_s_exp[i+1] == 'by', range(len(entire_s_exp)-2)))[0]
if "limit" in entire_s_exp:
end = entire_s_exp.index('limit')
else:
end = len(entire_s_exp)
for term in entire_s_exp[orderby_index+2:end]:
if type(term) == list and term[0] == "ordering_term":
self.order_cols.append(ColOp.col_op_dispatcher(term[1]))
if len(term) > 2:
self.ordering_dir.append(term[2])
else:
self.ordering_dir.append("asc")
if 'limit' in entire_s_exp:
limit_index = entire_s_exp.index('limit')
self.limit = entire_s_exp[limit_index+1][1][1]
def infer_out_schema(self, schema):
to_return = []
if type(self.columns[0]) == AllColumn:
schema = self.subquery_tree.infer_out_schema(schema)
to_return = [{col for col in nameset if "." not in col} for nameset in schema] # TODO: pass through names currently implemented
to_return = [{col for col in nameset} for nameset in schema]
else:
for col, col_name in zip(self.columns, self.col_names):
if col_name:
to_return.append(set([col_name]))
else:
if type(col) in [UnaryColumnOp, BinaryColumnOp, ConstantColumn]:
to_return.append(set())
else:
to_return.append(set([col.name]))
if hasattr(self, "alias") and self.alias:
for col in to_return:
if col:
col.add(self.alias + "." + next(iter(col)))
return to_return
def rename(self, schema, used_table_names):
child_old_names = self.subquery_tree.infer_out_schema(schema)
my_old_names = self.infer_out_schema(schema)
self.subquery_tree, child_mappings, used_table_names = self.subquery_tree.rename(schema, used_table_names)
child_new_names = self.subquery_tree.infer_out_schema(schema)
# print("My child mappings are", child_mappings)
self.rename_ops(child_mappings, child_new_names)
new_table_name = gen_fresh_name(self.alias if (hasattr(self, "alias") and self.alias) else "t", used_table_names)
mappings = {}
column_list = []
used_col_names = []
#print("My old names are", old_names)
for index, col_set in enumerate(my_old_names):
old_full_name = [col for col in col_set if "." in col]
old_short_name = [col for col in col_set if "." not in col]
if len(old_full_name) > 0:
base_name = old_full_name[0].split(".")[-1]
elif len(old_short_name) > 0:
base_name = old_short_name[0]
else:
base_name = 'c'
new_name = gen_fresh_name(base_name, used_col_names)
for old_name in col_set:
mappings[old_name] = new_table_name + "." + new_name
column_list.append(new_name)
# print(f"My mapping is [Select {new_table_name}]:")
# pprint(mappings)
# old names overwrites new names
mappings_copy = copy.deepcopy(mappings)
for name in child_mappings:
mappings_copy[name] = child_mappings[name]
self.rename_having(mappings_copy)
return RenamedTable(new_table_name, self, column_list), mappings, used_table_names
def rename_ops(self, mapping, new_names):
replacement_col_names = []
if type(self.columns[0]) == AllColumn:
pass
else:
for col in self.columns:
col.rename_ops(mapping)
if self.where_tree:
self.where_tree.rename_ops(mapping)
if self.group_col:
for col in self.group_col:
col.rename_ops(mapping)
def rename_having(self, mapping):
if self.having_tree:
self.having_tree.rename_ops(mapping)
for col in self.order_cols:
col.rename_ops(mapping)
def to_rkt(self, schema):
where_part = "\nWHERE " + self.where_tree.to_rkt(schema) if self.where_tree else "\nWHERE (TRUE)" # TODO reference full names
group_part = "\nGROUP-BY (list " + " ".join([col.to_rkt(schema) + " " for col in self.group_col])+ ")" if self.group_col else "" # TODO: reference full names
if self.having_tree:
having_part = "\nHAVING " + self.having_tree.to_rkt(schema) # TODO reference full names
else:
if self.group_col:
having_part = "\nHAVING (TRUE)"
else:
having_part = ""
if type(self.columns[0]) == AllColumn:
select_part = "SELECT (VALS " + self.columns[0].to_rkt(schema, self.subquery_tree) + ")"
else:
select_part = "SELECT " + "(VALS " + " ".join([col.to_rkt(schema) for col in self.columns]) + ")"
return "(" + select_part + "\nFROM " + self.subquery_tree.to_rkt(schema) + where_part + " " + group_part + " " + having_part + ")"
class ColOp():
def col_op_dispatcher(s_exp):
if s_exp == "*" or s_exp == "[*]":
return AllColumn()
assert s_exp[0] == "expr"
if s_exp[1][0] == "column_name":
return Column(s_exp[1])
elif s_exp[1][0] == "literal_value":
return ConstantColumn(s_exp[1])
elif s_exp[1][0] == "function_name" and len(s_exp) == 3:
return UnaryColumnOp(s_exp)
elif '.' in s_exp:
return Column(s_exp, table = True)
elif len(s_exp) == 4:
return BinaryColumnOp(s_exp)
class Column(ColOp):
def __init__(self, s_exp, table = False, direct = False, name = None):
if direct:
self.name = name
self.table = None
elif not table:
assert s_exp[0] == "column_name"
self.name = s_exp[1][1] if type(s_exp[1][1]) == str else s_exp[1][1][0][1]
self.table = None
else:
assert s_exp[1][0] == "table_name"
self.name = s_exp[3][1][1]
self.table = s_exp[1][1][1]
def rename_ops(self, mapping):
if self.table:
new_name = mapping[self.table + "." + self.name]
else:
new_name = mapping[self.name]
self.table = new_name.split(".")[0]
self.name = new_name.split(".")[1]
def to_rkt(self, schema):
return "\"" + (self.table + "." + self.name if self.table else self.name) + "\""
class ConstantColumn(ColOp):
def __init__(self, s_exp):
assert s_exp[0] == "literal_value"
self.value = s_exp[1]
def to_rkt(self, schema):
return str(self.value)
def rename_ops(self, mapping):
pass
class UnaryColumnOp(ColOp):
def __init__(self, s_exp):
self.op = s_exp[1][1][1]
self.col_ops = []
for child in s_exp[2]:
self.col_ops.append(ColOp.col_op_dispatcher(child))
def rename_ops(self, mapping):
# If child is an all column, replace it simply with just any 1 column which can be found in mapping's values.
for child in self.col_ops:
if type(child) == AllColumn:
key = next(iter(mapping))
self.col_ops = [Column(None, False, direct = True, name = mapping[key])] # this is a hacky solution to replace count(*) with any column -- will not support NaNs!
break
else:
child.rename_ops(mapping)
def to_rkt(self, schema):
return "(" + self.op.upper() + " " + " ".join(["\"" + (col.table + "." + col.name if col.table else col.name) + "\"" for col in self.col_ops]) + ")"
class BinaryColumnOp(ColOp):
def __init__(self, s_exp):
self.op = s_exp[2]
if self.op == "==":
self.op = "="
self.left_col_op = ColOp.col_op_dispatcher(s_exp[1])
self.right_col_op = ColOp.col_op_dispatcher(s_exp[3])
def rename_ops(self, mapping):
self.left_col_op.rename_ops(mapping)
self.right_col_op.rename_ops(mapping)
def to_rkt(self, schema):
return "(BINOP " + self.left_col_op.to_rkt(schema) + " " + self.op + " " + self.right_col_op.to_rkt(schema) + ")"
class AllColumn(ColOp):
def __init__(self):
pass
def rename_ops(self, mapping):
pass
def to_rkt(self, schema, table):
table_schema = table.infer_out_schema(schema)
full_names = [{colname for colname in colset if "." in colname}.pop() for colset in table_schema]
to_return = ""
for full_name in full_names:
to_return += f"\"{full_name}\" "
return to_return[:-1]
class PredicateOp():
def pred_op_dispatcher(s_exp):
if s_exp[0] == 'join_constraint':
return JoinPredicate(s_exp)
assert s_exp[0] == "expr"
s_exp = find_ultimate_pred(s_exp)
if 'and' in s_exp:
return AndPred(s_exp)
elif 'or' in s_exp:
return OrPred(s_exp)
elif s_exp[1][0] == 'unary_operator' and s_exp[1][1] == 'not':
return NotPred(s_exp)
elif "like" in s_exp:
return LikePredicate(s_exp)
else:
return Predicate(s_exp)
class JoinPredicate(PredicateOp):
def __init__(self, s_exp):
assert s_exp[0] == 'join_constraint'
pred_stmt = s_exp[2]
self.op = pred_stmt[2]
if self.op == '==':
self.op = '='
self.left_pred_op = ColOp.col_op_dispatcher(pred_stmt[1])
self.right_pred_op = ColOp.col_op_dispatcher(pred_stmt[3])
def rename_ops(self, mapping):
self.left_pred_op.rename_ops(mapping)
self.right_pred_op.rename_ops(mapping)
def to_rkt(self, schema):
return "(BINOP " + self.left_pred_op.to_rkt(schema) + " " + self.op + " " + self.right_pred_op.to_rkt(schema) + ")"
def find_ultimate_pred(s_exp):
assert s_exp[0] == "expr"
if type(s_exp[1][0]) == list:
return find_ultimate_pred(s_exp[1][0])
else:
return s_exp
class Predicate(PredicateOp):
def __init__(self, s_exp):
assert s_exp[0] == 'expr'
pred_stmt = s_exp
self.left_pred_op = ColOp.col_op_dispatcher(pred_stmt[1])
if pred_stmt[2] == 'not':
self.op = pred_stmt[2:4]
self.right_pred_op = ColOp.col_op_dispatcher(pred_stmt[4])
else:
self.op = pred_stmt[2]
self.right_pred_op = ColOp.col_op_dispatcher(pred_stmt[3])
def rename_ops(self, mapping):
self.left_pred_op.rename_ops(mapping)
self.right_pred_op.rename_ops(mapping)
def to_rkt(self, schema):
return "(BINOP " + self.left_pred_op.to_rkt(schema) + " " + self.op + " " + self.right_pred_op.to_rkt(schema) + ")"
class LikePredicate(Predicate):
def __init__(self, s_exp):
assert s_exp[0] == 'expr'
self.left_pred_op = ColOp.col_op_dispatcher(s_exp[1])
if s_exp[2] == 'not':
self.like = False
self.pattern = s_exp[4][1][1]
else:
self.like = True
self.pattern = s_exp[3][1][1]
def rename_ops(self, mapping):
self.left_pred_op.rename_ops(mapping)
def to_rkt(self, schema):
if self.like:
return "(TRUE)"
return "(LIKEOP " + self.left_pred_op.to_rkt(schema) + " \"" + self.pattern + "\")" # TODO
else:
return "(TRUE)"
return "(NOT (LIKEOP " + self.left_pred_op.to_rkt(schema) + " \"" + self.pattern + "\"))"
class AndPred(PredicateOp):
def __init__(self, s_exp):
self.left_pred_op = PredicateOp.pred_op_dispatcher(s_exp[1])
self.right_pred_op = PredicateOp.pred_op_dispatcher(s_exp[3])
def rename_ops(self, mapping):
self.left_pred_op.rename_ops(mapping)
self.right_pred_op.rename_ops(mapping)
def to_rkt(self, schema):
return "(AND " + self.left_pred_op.to_rkt(schema) + " " + self.right_pred_op.to_rkt(schema) + ")"
class OrPred(PredicateOp):
def __init__(self, s_exp):
self.left_pred_op = PredicateOp.pred_op_dispatcher(s_exp[1])
self.right_pred_op = PredicateOp.pred_op_dispatcher(s_exp[3])
def rename_ops(self, mapping):
self.left_pred_op.rename_ops(mapping)
self.right_pred_op.rename_ops(mapping)
def to_rkt(self, schema):
return "(OR " + self.left_pred_op.to_rkt(schema) + " " + self.right_pred_op.to_rkt(schema) + ")"
def NotPred(PredicateOp):
def __init__(self, s_exp):
pred_stmt = s_exp[2][1][0]
self.op = pred_stmt[2]
self.pred_op = PredOp.pred_op_dispatcher(pred_stmt[1])
def rename_ops(self, mapping):
self.pred_op.rename_ops(mapping)
def to_rkt(self, schema):
return "(NOT " + self.pred_op.to_rkt(schema) + ")"
|
the-stack_106_22661 | #!/usr/bin/python
#
# Copyright 2017 "OVS Performance" Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Files name:
# ovs_performance.py
#
# Description:
# Simple script to run the OVS performance tests
#
# Author:
# Eelco Chaudron
#
# Initial Created:
# 17 January 2017
#
# Notes:
# - Install the spur python module
# dnf install python-spur
# - Install the XenaPythonLib from https://github.com/fleitner/XenaPythonLib
# cd XenaPythonLib/
# sudo python setup.py install
# - Install natsort and enum modules
# pip install natsort enum34
# - Install matplotlib
# dnf install python-matplotlib
# - Install latest Scapy
# pip install scapy
# - Install netaddr
# pip install netaddr
#
# Example:
#
#
# TODOs:
# - Add tunnel test cases (Geneve and VXLAN)
# - Add check after test to see all OF flows got packets (i.e. n_packets != 0)
# - Add option to stop trying more packet sizes once maximum performance
# of link is reached (i.e. two consecutive runs @ wire speed)
# - Test to determine maximum throughput without dropping packets
# - Add option to maximize traffic rate (PPS, and/or % based on port speed)
# - Add some VLAN test cases
# - Add a Bi-directional PVP test [phy0-vf0-VM-vf1-phy1]
# - Add option to run traffic part multiple(3) times to calculate deviation,
# and add error bars to the graphs
#
#
# Imports
#
import argparse
import csv
import datetime
import inspect
import os
import logging
import numpy as np
import re
import spur
import sys
import time
#
# Imports from simpel shell API
#
from dut_ssh_shell import DutSshShell
#
# Import general traffic_generator library
#
from traffic_generator_base import TrafficFlowType
from traffic_generator import TrafficGenerator, TrafficGeneratorType
#
# Imports from Matplot, by default disable the tk interface
#
import matplotlib
matplotlib.use('Agg')
#
# Imports from natural sort
#
from natsort import natsorted
#
# Imports from distutils
#
from distutils.version import StrictVersion
# In Python 2, raw_input() returns a string, and input() tries
# to run the input as a Python expression.
# Since getting a string was almost always what we wanted,
# Python 3 does that with input()
# The following line checks the Python version being used to
# stick to raw_input() for Python2 and input() for Python3
if sys.version_info[0] == 3:
raw_input = input
#
# Default configuration
#
DEFAULT_TESTER_TYPE = 'xena'
DEFAULT_TESTER_SERVER_ADDRESS = ''
DEFAULT_TESTER_INTERFACE = ''
DEFAULT_SECOND_TESTER_INTERFACE = ''
DEFAULT_DUT_ADDRESS = ''
DEFAULT_DUT_LOGIN_USER = 'root'
DEFAULT_DUT_LOGIN_PASSWORD = 'root'
DEFAULT_DUT_VM_ADDRESS = ''
DEFAULT_DUT_SECOND_VM_ADDRESS = ''
DEFAULT_DUT_VM_NIC_PCI_ADDRESS = ''
DEFAULT_DUT_VM_LOGIN_USER = 'root'
DEFAULT_DUT_VM_LOGIN_PASSWORD = 'root'
DEFAULT_PHYSICAL_INTERFACE = ''
DEFAULT_SECOND_PHYSICAL_INTERFACE = ''
DEFAULT_PACKET_LIST = '64, 128, 256, 512, 768, 1024, 1514'
DEFAULT_VIRTUAL_INTERFACE = ''
DEFAULT_SECOND_VIRTUAL_INTERFACE = ''
DEFAULT_RUN_TIME = 20
DEFAULT_STREAM_LIST = '10, 1000, 10000, 100000, 1000000'
DEFAULT_BRIDGE_NAME = 'ovs_pvp_br0'
DEFAULT_WARM_UP_TIMEOUT = 360
DEFAULT_DST_MAC_ADDRESS = '00:00:02:00:00:00'
DEFAULT_SRC_MAC_ADDRESS = '00:00:01:00:00:00'
#
# Run simple traffic test Virtual to Virtual
#
def test_v2v(nr_of_flows, packet_sizes):
v2v_tx_results = list()
v2v_rx_results = list()
cpu_results = list()
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.virtual_interface],
of_interfaces[config.second_virtual_interface])
##################################################
lprint(" * Start packet receiver on second VM...")
start_traffic_rx_on_vm(config.dut_second_vm_address,
config.dut_second_vm_nic_pci)
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(config.run_time))
start_traffic_tx_on_vm(config.dut_vm_address,
nr_of_flows, packet_size)
time.sleep(config.run_time)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream on VM1...")
stop_traffic_tx_on_vm(config.dut_vm_address)
##################################################
lprint(" * Stop packet receiver on VM2...")
stop_traffic_rx_on_vm(config.dut_second_vm_address)
##################################################
lprint(" * Gathering statistics...")
of_dump_port_to_logfile(config.bridge_name)
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_second_vm_address)
vm_tx_pkts_sec = get_traffic_tx_stats_from_vm(config.dut_vm_address)
lprint(" - Transmit rate on VM: {:,} pps".format(vm_tx_pkts_sec))
lprint(" ! Result, average: {:,} pps".format(vm_pkts_sec))
cpu_results.append(get_cpu_monitoring_stats())
v2v_tx_results.append(vm_tx_pkts_sec)
v2v_rx_results.append(vm_pkts_sec)
##################################################
lprint(" * Restoring state for next test...")
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
flow_str = get_flow_type_short()
flow_file_str = get_flow_type_name()
create_multiple_graph(packet_sizes, {'Send Rate': v2v_tx_results,
'Receive Rate': v2v_rx_results},
"Packet size", "Packets/second",
"Virtual to Virtual with {} {} flows".format(nr_of_flows, flow_str),
"test_v2v_{}_{}".format(nr_of_flows, flow_file_str), None,
cpu_utilization={'Receive Rate': cpu_results})
create_multiple_graph(packet_sizes, {'Send Rate': v2v_tx_results,
'Receive Rate': v2v_rx_results},
"Packet size", "Packets/second",
"Virtual to Virtual with {} {} flows".format(nr_of_flows, flow_str),
"test_v2v_{}_{}_ref".format(nr_of_flows, flow_file_str),
[phy_speed], cpu_utilization={'Receive Rate': cpu_results})
return v2v_rx_results, cpu_results
#
# Run simple traffic test Physical to VM back to Physical
#
def test_p2v2p(nr_of_flows, packet_sizes):
p2v2p_results = list()
cpu_results = list()
warm_up_done = False
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_bidirectional_of_rules(
nr_of_flows,
of_interfaces[config.physical_interface],
of_interfaces[config.virtual_interface])
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
start_vm_time = datetime.datetime.now()
start_traffic_loop_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows * 2,
config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done:
if config.warm_up_no_fail:
stop_traffic_loop_on_vm(config.dut_vm_address)
flow_table_cool_down()
else:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_tx_start, pp_tx_drop_start, pp_rx_start, pp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
vp_tx_start, vp_tx_drop_start, vp_rx_start, vp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])
##################################################
if not config.warm_up or not warm_up_done:
lprint(" * Start packet receiver on VM...")
start_traffic_loop_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
warm_up_time = 0
else:
# warm_up_time is the total time it takes from the start of the
# VM at warm-up till we would normally start the loop back VM.
# This values is used to remove warm-up statistics.
warm_up_time = int(np.ceil((datetime.datetime.now() -
start_vm_time).total_seconds()))
lprint(" * Determine warm op time, {} seconds...".
format(warm_up_time))
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
tester.take_rx_statistics_snapshot(config.tester_interface)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Stop packet receiver on VM...")
stop_traffic_loop_on_vm(config.dut_vm_address)
##################################################
lprint(" * Gathering statistics...")
tester.take_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(config.tester_interface)
full_rx_stats = tester.get_rx_statistics_snapshots(config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
slogger.debug(" full_rx_stats={}".format(full_rx_stats))
pp_tx_end, pp_tx_drop_end, pp_rx_end, pp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
vp_tx_end, vp_tx_drop_end, vp_rx_end, vp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])
pp_rx = pp_rx_end - pp_rx_start
pp_tx = pp_tx_end - pp_tx_start
pp_rx_drop = pp_rx_drop_end - pp_rx_drop_start
pp_tx_drop = pp_tx_drop_end - pp_tx_drop_start
vp_rx = vp_rx_end - vp_rx_start
vp_tx = vp_tx_end - vp_tx_start
vp_rx_drop = vp_rx_drop_end - vp_rx_drop_start
vp_tx_drop = vp_tx_drop_end - vp_tx_drop_start
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_vm_address,
skip_samples=warm_up_time)
packets_tx = full_tx_stats[sorted(full_tx_stats.keys())[-1]]['pt_total']['packets']
packets_rx = full_rx_stats[sorted(full_rx_stats.keys())[-1]]['pr_total']['packets']
lprint(" - Packets send by Tester : {:-20,}".format(packets_tx))
lprint(" - Packets received by physical: {:-20,} [Lost {:,}, Drop {:,}]".
format(pp_rx, packets_tx - pp_rx, pp_rx_drop))
lprint(" - Packets received by virtual : {:-20,} [Lost {:,}, Drop {:,}]".
format(vp_tx, pp_rx - vp_tx, vp_tx_drop))
lprint(" - Packets send by virtual : {:-20,} [Lost {:,}, Drop {:,}]".
format(vp_rx, vp_tx - vp_rx, vp_rx_drop))
lprint(" - Packets send by physical : {:-20,} [Lost {:,}, Drop {:,}]".
format(pp_tx, vp_rx - pp_tx, pp_tx_drop))
lprint(" - Packets received by Tester : {:-20,} [Lost {:,}]".
format(packets_rx, pp_tx - packets_rx))
lprint(" - Receive rate on VM: {:,} pps".format(vm_pkts_sec))
rx_pkts_sec = get_packets_per_second_from_traffic_generator_rx_stats(full_rx_stats)
lprint(" ! Result, average: {:,} pps".format(rx_pkts_sec))
p2v2p_results.append(rx_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, p2v2p_results,
"Packet size", "Packets/second",
"Physical to Virtual back to Physical with {} {} flows".
format(nr_of_flows, get_flow_type_short()),
"test_p2v2p_{}_{}".format(nr_of_flows,
get_flow_type_name()),
phy_speed,
cpu_utilization=cpu_results)
return p2v2p_results, cpu_results
#
# Run simple traffic test Physical to VM
#
def test_p2v(nr_of_flows, packet_sizes):
p2v_results = list()
cpu_results = list()
warm_up_done = False
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.physical_interface],
of_interfaces[config.virtual_interface])
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows, config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done:
if config.warm_up_no_fail:
flow_table_cool_down()
else:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_rx_start \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])[2]
vp_tx_start, vp_tx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])[0:2]
##################################################
lprint(" * Start packet receiver on VM...")
start_traffic_rx_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Stop packet receiver on VM...")
stop_traffic_rx_on_vm(config.dut_vm_address)
##################################################
lprint(" * Gathering statistics...")
tester.take_tx_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
pp_rx_end \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])[2]
vp_tx_end, vp_tx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])[0:2]
pp_rx = pp_rx_end - pp_rx_start
vp_tx = vp_tx_end - vp_tx_start
vp_tx_drop = vp_tx_drop_end - vp_tx_drop_start
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_vm_address)
packets_tx = full_tx_stats[sorted(full_tx_stats.keys())[-1]]['pt_total']['packets']
lprint(" - Packets send by Tester {:,}".format(packets_tx))
lprint(" - Packets received by physical port {:,} [Lost {:,}]".
format(pp_rx, packets_tx - pp_rx))
lprint(" - Packets received by virtual port {:,} [Lost {:,}]".
format(vp_tx, pp_rx - vp_tx))
lprint(" - Packets dropped by virtual port {:,}".
format(vp_tx_drop))
lprint(" ! Result, average: {:,} pps".format(vm_pkts_sec))
p2v_results.append(vm_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, p2v_results,
"Packet size", "Packets/second",
"Physical to Virtual with {} {} flows".
format(nr_of_flows, get_flow_type_short()),
"test_p2v_{}_{}".
format(nr_of_flows, get_flow_type_name()),
phy_speed, cpu_utilization=cpu_results)
return p2v_results, cpu_results
#
# Run simple traffic test Physical to Physical
#
def test_p2p(nr_of_flows, packet_sizes):
p2p_results = list()
cpu_results = list()
warm_up_done = False
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces[config.physical_interface],
of_interfaces[config.second_physical_interface])
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(config.tester_interface,
get_traffic_generator_flow(),
nr_of_flows, packet_size,
traffic_dst_mac=config.dst_mac_address,
traffic_src_mac=config.src_mac_address)
##################################################
if config.warm_up:
lprint(" * Doing flow table warm-up...")
tester.start_traffic(config.tester_interface)
warm_up_done = warm_up_verify(nr_of_flows, config.warm_up_timeout)
tester.stop_traffic(config.tester_interface)
if not warm_up_done:
if config.warm_up_no_fail:
flow_table_cool_down()
else:
sys.exit(-1)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
tester.clear_statistics(config.second_tester_interface)
pp_tx_start, pp_tx_drop_start, pp_rx_start, pp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
rpp_tx_start, rpp_tx_drop_start, rpp_rx_start, rpp_rx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.second_physical_interface])
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".format(config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
tester.take_rx_statistics_snapshot(config.second_tester_interface)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Gathering statistics...")
tester.take_tx_statistics_snapshot(config.tester_interface)
tester.take_rx_statistics_snapshot(config.second_tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(config.tester_interface)
full_rx_stats = tester.get_rx_statistics_snapshots(config.second_tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
slogger.debug(" full_rx_stats={}".format(full_rx_stats))
pp_tx_end, pp_tx_drop_end, pp_rx_end, pp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.physical_interface])
rpp_tx_end, rpp_tx_drop_end, rpp_rx_end, rpp_rx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.second_physical_interface])
pp_rx = pp_rx_end - pp_rx_start
pp_rx_drop = pp_rx_drop_end - pp_rx_drop_start
rpp_tx = rpp_tx_end - rpp_tx_start
rpp_tx_drop = rpp_tx_drop_end - rpp_tx_drop_start
packets_tx = full_tx_stats[sorted(full_tx_stats.keys())[-1]]['pt_total']['packets']
packets_rx = full_rx_stats[sorted(full_rx_stats.keys())[-1]]['pr_total']['packets']
lprint(" - Packets send by Tester : {:-20,}".format(packets_tx))
lprint(" - Packets received by physical : {:-20,} [Lost {:,}, Drop {:,}]".
format(pp_rx, packets_tx - pp_rx, pp_rx_drop))
lprint(" - Packets send by second physical: {:-20,} [Lost {:,}, Drop {:,}]".
format(rpp_tx, pp_rx - rpp_tx, rpp_tx_drop))
lprint(" - Packets received by Tester : {:-20,} [Lost {:,}]".
format(packets_rx, rpp_tx - packets_rx))
rx_pkts_sec = get_packets_per_second_from_traffic_generator_rx_stats(full_rx_stats)
lprint(" ! Result, average: {:,} pps".format(rx_pkts_sec))
p2p_results.append(rx_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
# dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0} && ovs-appctl dpctl/del-flows"'.\
# format(config.bridge_name),
# die_on_error=True)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, p2p_results,
"Packet size", "Packets/second",
"Physical to Physical with {} {} flows".
format(nr_of_flows, get_flow_type_short()),
"test_p2p_{}_{}".
format(nr_of_flows, get_flow_type_name()),
phy_speed, cpu_utilization=cpu_results)
return p2p_results, cpu_results
#
# Run VXLAN test
#
# TODO: This is only tested on OVS-DPDK, need modular support
# so it will work on kernel (hw offload) datapath.
#
# Also needs encap test, and encap-decap test.
#
# Also note that this test will not distribute the
# load among rx queue's as the outer IP+UDP headers
# do not change. Making the source UDP port of the
# outer header will solve this, but we have no more
# modifiers. We could do a destination IP only OF
# rule and use the source IP counters for src UDP.
#
def test_vxlan(nr_of_flows, packet_sizes):
vxlan_results = list()
cpu_results = list()
tunnel_bridge = (config.bridge_name + "_tterm")[:15]
for packet_size in packet_sizes:
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] START".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
##################################################
lprint(" * Get bridge MAC address...")
tunnel_dst_mac = get_of_bridge_mac_address(tunnel_bridge)
##################################################
lprint(" * Create OVS OpenFlow rules...")
create_ovs_of_rules(nr_of_flows,
of_interfaces['vxlan0'],
of_interfaces[config.virtual_interface])
##################################################
if ovs_data_path == "netdev":
#
# For DPDK data path only
#
lprint(" * Setup neighbor entry...")
dut_shell.dut_exec('sh -c "ovs-appctl tnl/neigh/set {} '
' 3.1.1.2 00:00:00:00:00:01"'.format(tunnel_bridge),
die_on_error=True)
dut_shell.dut_exec('sh -c "ip addr add 3.1.1.1/24 dev {0};'
'ip link set {0} up"'.format(tunnel_bridge),
die_on_error=True)
##################################################
lprint(" * Initializing packet generation...")
tester.configure_traffic_stream(config.tester_interface,
TrafficFlowType.vxlan_l3_ipv4,
nr_of_flows, packet_size,
tunnel_dst_mac=tunnel_dst_mac,
traffic_dst_mac=config.dst_mac_address)
##################################################
lprint(" * Clear all statistics...")
tester.clear_statistics(config.tester_interface)
pp_rx_start \
= get_of_port_packet_stats(of_interfaces[config.physical_interface],
bridge=tunnel_bridge)[2]
vp_tx_start, vp_tx_drop_start \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])[0:2]
##################################################
lprint(" * Start packet receiver on VM...")
start_traffic_rx_on_vm(config.dut_vm_address,
config.dut_vm_nic_pci)
##################################################
lprint(" * Start CPU monitoring on DUT...")
start_cpu_monitoring()
##################################################
lprint(" * Start packet generation for {0} seconds...".
format(config.run_time))
tester.start_traffic(config.tester_interface)
for i in range(1, config.run_time):
time.sleep(1)
##################################################
lprint(" * Stop CPU monitoring on DUT...")
stop_cpu_monitoring()
##################################################
lprint(" * Stopping packet stream...")
tester.stop_traffic(config.tester_interface)
time.sleep(1)
##################################################
lprint(" * Stop packet receiver on VM...")
stop_traffic_rx_on_vm(config.dut_vm_address)
##################################################
lprint(" * Gathering statistics...")
tester.take_tx_statistics_snapshot(config.tester_interface)
full_tx_stats = tester.get_tx_statistics_snapshots(config.tester_interface)
slogger.debug(" full_tx_stats={}".format(full_tx_stats))
pp_rx_end = get_of_port_packet_stats(of_interfaces[config.physical_interface],
bridge=tunnel_bridge)[2]
vp_tx_end, vp_tx_drop_end \
= get_of_port_packet_stats(of_interfaces[config.virtual_interface])[0:2]
pp_rx = pp_rx_end - pp_rx_start
vp_tx = vp_tx_end - vp_tx_start
vp_tx_drop = vp_tx_drop_end - vp_tx_drop_start
vm_pkts_sec = get_traffic_rx_stats_from_vm(config.dut_vm_address)
packets_tx = full_tx_stats[sorted(full_tx_stats.keys())[-1]]['pt_total']['packets']
lprint(" - Packets send by Tester {:,}".format(packets_tx))
lprint(" - Packets received by physical port {:,} [Lost {:,}]".
format(pp_rx, packets_tx - pp_rx))
lprint(" - Packets received by virtual port {:,} [Lost {:,}]".
format(vp_tx, pp_rx - vp_tx))
lprint(" - Packets dropped by virtual port {:,}".
format(vp_tx_drop))
lprint(" ! Result, average: {:,} pps".format(vm_pkts_sec))
vxlan_results.append(vm_pkts_sec)
cpu_results.append(get_cpu_monitoring_stats())
##################################################
lprint(" * Restoring state for next test...")
tester.unconfigure_traffic_stream(config.tester_interface)
##################################################
lprint("- [TEST: {0}(flows={1}, packet_size={2})] END".
format(inspect.currentframe().f_code.co_name,
nr_of_flows, packet_size))
create_single_graph(packet_sizes, vxlan_results,
"Packet size", "Packets/second",
"VXLAN Tunnel with {} {} flows".
format(nr_of_flows, get_flow_type_short()),
"test_vxlan_{}_{}".
format(nr_of_flows, get_flow_type_name()),
phy_speed, cpu_utilization=cpu_results)
return vxlan_results, cpu_results
#
# Count datapath flows
#
def get_active_datapath_flows():
if ovs_data_path == "netdev":
cmd = 'sh -c "ovs-appctl dpctl/show netdev@ovs-netdev | ' \
'grep flows | awk \'{print $2}\'"'
else:
cmd = 'sh -c "ovs-appctl dpctl/show system@ovs-system | ' \
'grep flows | awk \'{print $2}\'"'
result = dut_shell.dut_exec(cmd, die_on_error=True)
return int(result.stdout_output)
#
# Warm up verification
#
def warm_up_verify(requested_flows, timeout):
run_time = 0
active_flows = 0
while active_flows < requested_flows:
run_time += 1
if timeout != 0 and run_time >= timeout:
lprint("ERROR: Failed to complete warm-up in time ({} seconds)!".
format(timeout))
return False
time.sleep(1)
active_flows = get_active_datapath_flows()
#
# Flows exist, we can continue now
#
return True
#
# Wait for datapth flows to flush
#
def flow_table_cool_down(failure_fatal=True):
run_time = 0
active_flows = 0
if config.warm_up or not config.no_cool_down:
lprint(" * Doing flow table cool-down...")
active_flows = get_active_datapath_flows()
while active_flows > 32:
run_time += 1
if run_time >= 20:
if failure_fatal:
lprint("ERROR: Failed to complete cool-down in time "
"(20 seconds)!")
sys.exit(-1)
else:
lprint("WARNING: Failed to complete cool-down in time "
"(20 seconds)!")
break
active_flows = get_active_datapath_flows()
time.sleep(1)
#
# Flush all OVS flows
#
def flush_ovs_flows():
# data_path = "system@ovs-system"
#
# For now we only flush the openflow rules for nedtev, because as soon as
# we flush the datapath rules no more flows get added to the datapath.
#
# However other vendors are also struggling when flushing the datapath.
#
# if ovs_data_path == "netdev":
# data_path = "netdev@ovs-netdev"
#
# cmd = 'sh -c "ovs-ofctl del-flows {0}; ' \
# 'ovs-appctl dpctl/del-flows {1}"'. \
# format(config.bridge_name, data_path)
cmd = 'sh -c "ovs-ofctl del-flows {0}"'. \
format(config.bridge_name)
dut_shell.dut_exec(cmd, die_on_error=True)
flow_table_cool_down(failure_fatal=False)
time.sleep(2)
#
# Dump openflow port statistics to logfile
#
def of_dump_port_to_logfile(bridge):
return dut_shell.dut_exec("ovs-ofctl dump-ports {}".format(bridge),
die_on_error=True)
#
# Start packet receive application on VM
#
def start_traffic_rx_on_vm(vm, pci):
cpu_mask = ((1 << (config.dut_vm_nic_queues + 1)) - 1)
pmd_cpu_mask = cpu_mask & ~0x1
disable_hw_vlan = " --disable-hw-vlan" if vm_dpdk_version < \
StrictVersion('18.2.0') else ""
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup sh -c " \
r' "(while sleep 1; do echo show port stats 0; done | ' \
r" testpmd -c {5:x} -n 4 --socket-mem 2048,0 -w {3} -- "\
r" --burst 64 -i --rxq={4} --txq={4} --rxd={8} " \
r" --txd={9} --auto-start --forward-mode=rxonly " \
r' --port-topology=chained --coremask={6:x}{7})" ' \
r" &>results.txt &'". \
format(vm, config.dut_vm_user, config.dut_vm_password, pci,
config.dut_vm_nic_queues, cpu_mask, pmd_cpu_mask,
disable_hw_vlan, config.dut_vm_nic_rxd,
config.dut_vm_nic_txd)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
time.sleep(2)
#
# Stop packet receive application on VM
#
def stop_traffic_rx_on_vm(vm, **kwargs):
die = kwargs.pop("die", True)
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'kill -SIGINT `pidof testpmd`'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=die)
#
# Start packet receive and loop application on VM
#
def start_traffic_loop_on_vm(vm, pci):
cpu_mask = ((1 << (config.dut_vm_nic_queues + 1)) - 1)
pmd_cpu_mask = cpu_mask & ~0x1
mac_swap = " --forward-mode=macswap" if config.mac_swap else ""
disable_hw_vlan = " --disable-hw-vlan" if vm_dpdk_version < \
StrictVersion('18.2.0') else ""
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup sh -c " \
r' "(while sleep 1; do echo show port stats 0; done | ' \
r" testpmd -c {5:x} -n 4 --socket-mem 2048,0 -w {3} -- "\
r" --burst 64 -i --rxq={4} --txq={4} --rxd={9} " \
r" --txd={10} --coremask={6:x} --auto-start " \
r' --port-topology=chained{7}{8})" ' \
r" &>results.txt &'". \
format(vm, config.dut_vm_user, config.dut_vm_password, pci,
config.dut_vm_nic_queues, cpu_mask, pmd_cpu_mask,
mac_swap, disable_hw_vlan, config.dut_vm_nic_rxd,
config.dut_vm_nic_txd)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
time.sleep(2)
#
# Stop packet receive and loop application on VM
#
def stop_traffic_loop_on_vm(vm):
stop_traffic_rx_on_vm(vm)
#
# Get traffic receive stats from application on VM
#
def get_traffic_rx_stats_from_vm(vm, **kwargs):
skip_samples = kwargs.pop("skip_samples", 0)
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
"'cat ~/results.txt | grep -E \"Rx-pps|Tx-pps\"'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
pkt_rates = [int(re.sub(r'^\s*Rx-pps:\s*', '', s))
for s in re.findall(r'^\s*Rx-pps:\s*\d+$', result.stdout_output,
re.MULTILINE)]
if skip_samples > 0:
pkt_rates = pkt_rates[skip_samples:]
if len(pkt_rates) <= 10:
lprint("ERROR: No engough elements to calculate packet rate!")
sys.exit(-1)
pkt_rates = pkt_rates[5:-5]
return sum(pkt_rates) / len(pkt_rates)
#
# Start packet generation application on VM
#
def start_traffic_tx_on_vm(vm, nr_of_flows, packet_size):
if config.flow_type == 'L2':
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup /bin/trafgen -c 3 -n 4 -- -p 1 --benchmark " \
r"--flows-per-stream 1 --bursts-per-stream 1 --streams {3} " \
r"--src-mac {5} --dst-mac {6} " \
r"--src-ip 1.0.0.0 --dst-ip 2.0.0.0 --packet-size {4} " \
r"--vary-src mac --vary-dst mac -s ~/results.txt" \
r"> /dev/null 2>&1 &'". \
format(vm, config.dut_vm_user,
config.dut_vm_password, nr_of_flows, packet_size,
config.src_mac_address, config.dst_mac_address)
elif config.flow_type == 'L3':
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup /bin/trafgen -c 3 -n 4 -- -p 1 --benchmark " \
r"--flows-per-stream 1 --bursts-per-stream 1 --streams {3} " \
r"--src-mac {5} --dst-mac {6} " \
r"--src-ip 1.0.0.0 --dst-ip 2.0.0.0 --packet-size {4} " \
r"--vary-src ip --vary-dst ip -s ~/results.txt" \
r"> /dev/null 2>&1 &'". \
format(vm, config.dut_vm_user,
config.dut_vm_password, nr_of_flows, packet_size,
config.src_mac_address, config.dst_mac_address)
elif config.flow_type == 'L4-UDP':
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'rm -f ~/results.txt; " \
r" nohup /bin/trafgen -c 3 -n 4 -- -p 1 --benchmark " \
r"--flows-per-stream 1 --bursts-per-stream 1 --streams {3} " \
r"--src-mac {5} --dst-mac {6} " \
r"--src-ip 1.0.0.0 --dst-ip 2.0.0.0 --packet-size {4} " \
r"--src-port 0 --dst-port 0 " \
r"--vary-src port --vary-dst port -s ~/results.txt" \
r"> /dev/null 2>&1 &'". \
format(vm, config.dut_vm_user,
config.dut_vm_password, nr_of_flows, packet_size,
config.src_mac_address, config.dst_mac_address)
else:
raise ValueError("No support for this protocol on!!")
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Stop packet generation application on VM
#
def stop_traffic_tx_on_vm(vm, **kwargs):
die = kwargs.pop("die", True)
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'kill -SIGINT `pidof trafgen`'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=die)
#
# Get traffic transmit stats from application on VM
#
def get_traffic_tx_stats_from_vm(vm):
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"'cat ~/results.txt | grep port0.tx_packets'". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
return get_packets_per_second_from_pkt_counters(result.stdout_output, 5)
#
# Get packets per seconds from traffic rx generator starts
#
def get_packets_per_second_from_traffic_generator_rx_stats(rx_stats):
avg = cnt = 0
for timestamp in natsorted(list(rx_stats.keys()))[2:-2]:
stats = rx_stats[timestamp]
pps = stats['pr_total']['pps']
avg += pps
cnt += 1
return avg / cnt
#
# Get packets per seconds from traffic tx generator starts
#
def get_packets_per_second_from_traffic_generator_tx_stats(tx_stats):
avg = cnt = 0
for timestamp in natsorted(list(tx_stats.keys()))[2:-2]:
stats = tx_stats[timestamp]
pps = stats['pt_total']['pps']
avg += pps
cnt += 1
return avg / cnt
#
# Get packets per seconds from a string with packets count values
# It might strip, start, stop number of entries, and than return
# average value.
#
def get_packets_per_second_from_pkt_counters(counters, strip):
slogger.info("get_pacets_per_second_from_counters(\"{}\", {})".
format(counters, strip))
counters_clean = re.sub(r'.+:\s?', '', counters)
counter_list = map(int, counters_clean.split())
if strip < 0 or (len(counter_list) - (strip * 2)) < 2:
lprint("ERROR: No engough elements to calculate packet rate!")
sys.exit(-1)
if strip > 0:
del counter_list[:strip]
del counter_list[-strip:]
slogger.info("[gppsfc] Work list \"{}\"".format(counter_list))
pkts_sec = 0
for i in range(1, len(counter_list)):
pkts_sec = pkts_sec + (counter_list[i] - counter_list[i - 1])
pkts_sec = pkts_sec / (len(counter_list) - 1)
slogger.info("[gppsfc] pkts/sec = {:,}".format(pkts_sec))
return pkts_sec
#
# Add OVS OpenFlow rules
#
def create_ovs_of_rules(number_of_flows, src_port, dst_port, **kwargs):
if config.flow_type == 'L2':
create_ovs_l2_of_rules(number_of_flows, src_port, dst_port, **kwargs)
elif config.flow_type == 'L3':
create_ovs_l3_of_rules(number_of_flows, src_port, dst_port, **kwargs)
elif config.flow_type == 'L4-UDP':
create_ovs_l4_of_rules(number_of_flows, src_port, dst_port, **kwargs)
else:
raise ValueError("No support for this protocol!!")
#
# Add OVS OpenFlow rules
#
def create_ovs_bidirectional_of_rules(number_of_flows, src_port, dst_port, **kwargs):
if config.flow_type == 'L2':
create_ovs_bidirectional_l2_of_rules(number_of_flows, src_port, dst_port, **kwargs)
elif config.flow_type == 'L3':
create_ovs_bidirectional_l3_of_rules(number_of_flows, src_port, dst_port, **kwargs)
elif config.flow_type == 'L4-UDP':
create_ovs_bidirectional_l4_of_rules(number_of_flows, src_port, dst_port, **kwargs)
else:
raise ValueError("No support for this protocol!!")
#
# Add OVS OpenFlow rule from physical 2 physical, and reverse
#
def create_ovs_bidirectional_of_phy_rules(src_port, dst_port):
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0}"'.
format(config.bridge_name),
die_on_error=True)
lprint(" * Create two OpenFlow physical to physical rules...")
cmd = "ovs-ofctl add-flow {0} in_port={1},action={2} && " \
"ovs-ofctl add-flow {0} in_port={2},action={1}". \
format(config.bridge_name,
src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
lprint(" * Verify that of physical port flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | grep -v \'NXST_FLOW reply\'"'.
format(config.bridge_name),
die_on_error=True)
if result.output.count('\n') != 2:
lprint("ERROR: Only 2 flows should exsits, but there are {1}!".
format(result.output.count('\n') - 1))
sys.exit(-1)
#
# Add OVS OpenFlow rule from physical 2 physical
#
def create_ovs_of_phy_rule(src_port, dst_port, **kwargs):
clear_rules = kwargs.pop("clear_rules", True)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
lprint(" * Create OpenFlow physical to physical rules...")
cmd = "ovs-ofctl add-flow {0} in_port={1},action={2}". \
format(config.bridge_name, src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
lprint(" * Verify that of physical port flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | grep -v \'NXST_FLOW reply\'"'.
format(config.bridge_name),
die_on_error=True)
if result.output.count('\n') != 1:
lprint("ERROR: Only 2 flows should exsits, but there are {1}!".
format(result.output.count('\n') - 1))
sys.exit(-1)
#
# Add OVS L2 OpenFlow rules
#
def create_ovs_l2_of_rules(number_of_flows, src_port, dst_port, **kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
mac_swap = kwargs.pop("mac_swap", False)
base_mac = mac_2_int(config.dst_mac_address if not mac_swap
else config.src_mac_address) & 0xffffff000000
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L2 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range({4}, {0}): " \
"print \"add in_port={2}," \
"dl_dst={{0:02x}}:{{1:02x}}:{{2:02x}}:{{3:02x}}:{{4:02x}}:{{5:02x}}," \
"action={3}\".format((i >> 40) & 0xff, (i >> 32) & 0xff, (i >> 24) " \
"& 0xff, (i >> 16) & 0xff, (i >> 8) & 0xff, i & 0xff)'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows + base_mac, config.bridge_name,
src_port, dst_port, base_mac)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS Bidirectional L2 OpenFlow rules
#
def create_ovs_bidirectional_l2_of_rules(number_of_flows, src_port, dst_port, **kwargs):
create_ovs_l2_of_rules(number_of_flows,
src_port,
dst_port)
create_ovs_l2_of_rules(number_of_flows,
dst_port,
src_port,
total_number_of_flows=number_of_flows * 2,
clear_rules=False,
mac_swap=config.mac_swap)
#
# Add OVS L3 OpenFlow rules
#
def create_ovs_l3_of_rules(number_of_flows, src_port, dst_port, **kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
ip_start_offset = kwargs.pop("ipv4_start", 0x01000000)
if number_of_flows > 1000000:
lprint("ERROR: Maximum of 1,000,000 L3 flows are supported!")
sys.exit(-1)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L3 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range({4}, {0}): " \
"print \"add in_port={2}," \
"eth_type(0x800),nw_src={{}}.{{}}.{{}}.{{}},nw_dst={{}}.{{}}.{{}}.{{}}," \
"action={3}\".format(" \
"(i >> 24) & 0xff, (i >> 16) & 0xff," \
"(i >> 8) & 0xff, i & 0xff," \
"((i + 0x01000000) >> 24) & 0xff, ((i + 0x01000000) >> 16) & 0xff," \
"((i + 0x01000000) >> 8) & 0xff, (i + 0x01000000) & 0xff)'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows + ip_start_offset, config.bridge_name,
src_port, dst_port, ip_start_offset)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS Bidirectional L3 OpenFlow rules
#
def create_ovs_bidirectional_l3_of_rules(number_of_flows, src_port, dst_port, **kwargs):
clear_rules = kwargs.pop("clear_rules", True)
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows * 2)
ip_start_offset = kwargs.pop("ipv4_start", 0x01000000)
create_ovs_l3_of_rules(number_of_flows,
src_port,
dst_port,
clear_rules=clear_rules,
total_number_of_flows=0,
ipv4_start=ip_start_offset)
create_ovs_l3_of_rules(number_of_flows,
dst_port,
src_port,
clear_rules=False,
total_number_of_flows=total_nr_of_flows,
ipv4_start=ip_start_offset)
#
# Add OVS OpenFlow rules for the /16 flow ranges we create
#
def create_ovs_bidirectional_l3_of_slash_16_rules(number_of_flows,
src_port, dst_port):
create_ovs_l3_of_slash_16_rules(number_of_flows,
src_port,
dst_port)
create_ovs_l3_of_slash_16_rules(number_of_flows,
dst_port,
src_port,
total_number_of_flows=number_of_flows * 2,
clear_rules=False)
def create_ovs_l3_of_slash_16_rules(number_of_flows,
src_port, dst_port,
**kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
if number_of_flows > 255:
lprint("ERROR: Maximum of 255 /16 flows are supported!")
sys.exit(-1)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L3 /16 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range(0, {0}): " \
"print \"add in_port={2}," \
"eth_type(0x800),nw_src=1.{{0}}.0.0/16,nw_dst=2.{{0}}.0.0/16," \
"action={3}\".format(i)'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows, config.bridge_name,
src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS L4 OpenFlow rules
#
def create_ovs_l4_of_rules(number_of_flows, src_port, dst_port, **kwargs):
total_nr_of_flows = kwargs.pop("total_number_of_flows", number_of_flows)
clear_rules = kwargs.pop("clear_rules", True)
if number_of_flows > 1000000:
lprint("ERROR: Maximum of 1,000,000 L4 flows are supported!")
sys.exit(-1)
if clear_rules:
lprint(" * Clear all OpenFlow/Datapath rules on bridge \"{}\"...".
format(config.bridge_name))
dut_shell.dut_exec('sh -c "ovs-ofctl del-flows {0}"'.
format(config.bridge_name),
die_on_error=True)
flush_ovs_flows()
if config.debug or config.debug_dut_shell:
of_dump_port_to_logfile(config.bridge_name)
lprint(" * Create {} L4 OpenFlow rules...".format(number_of_flows))
cmd = "python -c 'for i in range(0, {0}): " \
"print \"add in_port={2}," \
"udp,udp_src={{0}},udp_dst={{0}}," \
"action={3}\".format(i)'" \
" | ovs-ofctl add-flow {1} -". \
format(number_of_flows, config.bridge_name,
src_port, dst_port)
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
if total_nr_of_flows != 0:
lprint(" * Verify requested number of flows exists...")
result \
= dut_shell.dut_exec('sh -c "ovs-ofctl dump-flows {0} | grep -v \'NXST_FLOW reply\' | wc -l"'.
format(config.bridge_name),
die_on_error=True)
if int(result.stdout_output) != total_nr_of_flows:
lprint("ERROR: Only {0} flows should exsits, but there are {1}!".
format(number_of_flows, int(result.stdout_output)))
sys.exit(-1)
#
# Add OVS Bidirectional L4 OpenFlow rules
#
def create_ovs_bidirectional_l4_of_rules(number_of_flows, src_port, dst_port, **kwargs):
create_ovs_l4_of_rules(number_of_flows,
src_port,
dst_port)
create_ovs_l4_of_rules(number_of_flows,
dst_port,
src_port,
total_number_of_flows=number_of_flows * 2,
clear_rules=False)
#
# Add test bridge setup
#
def create_ovs_bridge():
lprint("- Configuring bridge...")
if "dpdk" in config.physical_interface:
dpdk = True
else:
dpdk = False
#
# Delete bridge if existing
#
dut_shell.dut_exec("ovs-vsctl -- --if-exists del-br {0} "
"-- --if-exists del-br {1}".
format(config.bridge_name,
(config.bridge_name + "_tterm")[:15]),
die_on_error=True)
#
# Create bridge and set data path if needed
#
command = "ovs-vsctl add-br {0} ".format(config.bridge_name)
if dpdk:
command += "-- set Bridge {} datapath_type=netdev ".format(config.bridge_name)
#
# Add basic ports (1x ingress, and 1x egress)
#
command += "-- add-port {0} {1} -- set Interface {1} ofport_request=10 ". \
format(config.bridge_name, config.physical_interface)
if config.virtual_interface:
command += "-- add-port {0} {1} -- set Interface {1} ofport_request=20 ". \
format(config.bridge_name, config.virtual_interface)
if dpdk:
command += "-- set Interface {0} type=dpdk " . \
format(config.physical_interface)
if config.virtual_interface:
command += "-- set Interface {0} type=dpdkvhostuser ". \
format(config.virtual_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} " . \
format(config.physical_interface,
config.pmd_rxq_affinity.count(':'), config.pmd_rxq_affinity)
if config.virtual_interface:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} ". \
format(config.virtual_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
#
# Add second virtual ports if vv test is enabled
#
if not config.skip_vv_test:
command += "-- add-port {0} {1} -- set Interface {1} ofport_request=21 ". \
format(config.bridge_name,
config.second_virtual_interface)
if dpdk:
command += "-- set Interface {0} type=dpdkvhostuser ". \
format(config.second_virtual_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} ". \
format(config.second_virtual_interface,
config.pmd_rxq_affinity.count(':'), config.pmd_rxq_affinity)
#
# Add second physical port if pp test is enabled
#
if config.run_pp_test:
command += "-- add-port {0} {1} -- set Interface {1} ofport_request=11 ". \
format(config.bridge_name,
config.second_physical_interface)
if dpdk:
command += "-- set Interface {0} type=dpdk ". \
format(config.second_physical_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={1} " \
"other_config:pmd-rxq-affinity={2} ". \
format(config.second_physical_interface,
config.pmd_rxq_affinity.count(':'),
config.pmd_rxq_affinity)
#
# If we are running DPDK and it's 2.7 or higher we need to specify the PCI
# addresses for the physical ports.
#
if dpdk and StrictVersion(ovs_version) >= StrictVersion('2.7.0'):
if not check_pci_address_string(config.physical_interface_pci) or \
(config.run_pp_test and not
check_pci_address_string(config.second_physical_interface_pci)):
lprint("ERROR: For OVS >=2.7 you must supply a valid PCI address "
"for the physical interfaces!")
sys.exit(-1)
command += "-- set Interface {0} options:dpdk-devargs={1} ". \
format(config.physical_interface,
config.physical_interface_pci)
if config.second_physical_interface:
command += "-- set Interface {0} options:dpdk-devargs={1} " . \
format(config.second_physical_interface,
config.second_physical_interface_pci)
#
# Configure all the above!
#
dut_shell.dut_exec(command, die_on_error=True)
if config.debug or config.debug_dut_shell:
dut_shell.dut_exec("ovs-vsctl show", die_on_error=True)
#
# If this is DPDK, you might need to start the VM for thinks to start
# working. So we pause here, asking for restart of the VM.
#
if dpdk and config.virtual_interface:
print("!!! Finished configuring the OVS bridge, please restart the Virtual Machine !!!")
raw_input("Press Enter to continue...")
#
# Add VXLAN test bridge setup
#
def create_ovs_vxlan_bridge():
lprint("- Configuring bridge...")
if "dpdk" in config.physical_interface:
dpdk = True
else:
dpdk = False
tunnel_bridge = (config.bridge_name + "_tterm")[:15]
#
# Delete bridge if existing
#
dut_shell.dut_exec("ovs-vsctl -- --if-exists del-br {0} "
"-- --if-exists del-br {1}".
format(config.bridge_name, tunnel_bridge),
die_on_error=True)
#
# Create bridge and set data path if needed
#
command = "ovs-vsctl add-br {0} -- add-br {1} " \
.format(config.bridge_name, tunnel_bridge)
if dpdk:
command += "-- set Bridge {} datapath_type=netdev ".format(config.bridge_name)
command += "-- set Bridge {} datapath_type=netdev ".format(tunnel_bridge)
#
# Add basic ports (1x ingress, and 1x egress)
#
command += "-- add-port {3} {1} -- set Interface {1} ofport_request=10 " \
"-- add-port {0} {2} -- set Interface {2} ofport_request=20 " \
"-- add-port {0} vxlan0 -- set Interface vxlan0 ofport_request=30 " \
"-- set interface vxlan0 type=vxlan options:remote_ip=3.1.1.2 options:key=69 ". \
format(config.bridge_name,
config.physical_interface,
config.virtual_interface,
tunnel_bridge)
if dpdk:
command += "-- set Interface {0} type=dpdk " \
"-- set Interface {1} type=dpdkvhostuser ". \
format(config.physical_interface, config.virtual_interface)
if config.pmd_rxq_affinity is not None:
command += "-- set Interface {0} options:n_rxq={2} " \
"other_config:pmd-rxq-affinity={3} " \
"-- set Interface {1} options:n_rxq={2} " \
"other_config:pmd-rxq-affinity={3} ". \
format(config.physical_interface, config.virtual_interface,
config.pmd_rxq_affinity.count(':'), config.pmd_rxq_affinity)
#
# If we are running DPDK and it's 2.7 or higher we need to specify the PCI
# addresses for the physical ports.
#
if dpdk and StrictVersion(ovs_version) >= StrictVersion('2.7.0'):
if not check_pci_address_string(config.physical_interface_pci) or \
(config.run_pp_test and not
check_pci_address_string(config.second_physical_interface_pci)):
lprint("ERROR: For OVS >=2.7 you must supply a valid PCI address "
"for the physical interfaces!")
sys.exit(-1)
command += "-- set Interface {0} options:dpdk-devargs={1} ". \
format(config.physical_interface,
config.physical_interface_pci)
#
# Configure all the above!
#
dut_shell.dut_exec(command, die_on_error=True)
if config.debug or config.debug_dut_shell:
dut_shell.dut_exec("ovs-vsctl show", die_on_error=True)
#
# If this is DPDK, you might need to start the VM for thinks to start
# working. So we pause here, asking for restart of the VM.
#
if dpdk:
print("!!! Finished configuring the OVS bridge, please restart the Virtual Machine !!!")
raw_input("Press Enter to continue...")
#
# Get bridge port numbers
#
def get_bridge_port_numbers(tunnel=False):
lprint("- Get OpenFlow and DataPath port numbers...")
of = dict()
dp = dict()
#
# Get mapping from openvswitch
#
command = 'sh -c "ovs-ofctl show {0} && ovs-appctl dpctl/show"'.\
format(config.bridge_name)
if tunnel:
tunnel_bridge = (config.bridge_name + "_tterm")[:15]
command = 'sh -c "ovs-ofctl show {0} && ovs-ofctl show {1} && '\
'ovs-appctl dpctl/show"'.\
format(config.bridge_name, tunnel_bridge)
result = dut_shell.dut_exec(command, die_on_error=True)
#
# Create list of interfaces, second interfaces are optional,
# so check if they exist before adding.
#
interfaces = [config.physical_interface]
if config.virtual_interface != '':
interfaces.append(config.virtual_interface)
if config.second_virtual_interface != '':
interfaces.append(config.second_virtual_interface)
if config.second_physical_interface != '':
interfaces.append(config.second_physical_interface)
if tunnel:
interfaces.append('vxlan0')
for interface in interfaces:
m = re.search('\s*([0-9]*)\({0}\): addr:.*'.format(interface),
result.output)
if m:
of[interface] = m.group(1)
else:
lprint("ERROR: Can't figure out OpenFlow interface for {0}".
format(interface))
sys.exit(-1)
if interface == 'vxlan0':
continue
m = re.search('\s*port\s*([0-9]*):\s*{0}\s*.*'.format(interface),
result.output)
if m:
dp[interface] = m.group(1)
else:
lprint("ERROR: Can't figure out OpenFlow datapath interface for {0}"
.format(interface))
sys.exit(-1)
slogger.info("OpenFlow ports; {}".format(of))
slogger.info("DataPath ports; {}".format(dp))
return of, dp
#
# Get OpenFlow port packet stats
#
def get_of_port_packet_stats(of_port, **kwargs):
bridge = kwargs.pop("bridge", config.bridge_name)
port_stats = of_dump_port_to_logfile(bridge)
m = re.search('\s.*port *{}: rx pkts=.*\n.*tx pkts=([0-9?]*), '.format(of_port),
port_stats.output)
if m:
if '?' in m.group(1):
tx = int(0)
else:
tx = int(m.group(1))
else:
lprint("ERROR: Can't get transmitted packet stats for OpenFlow "
"port {0} on brige \"{1}\"".
format(of_port, config.bridge_name))
sys.exit(-1)
m = re.search('\s.*port *{}: rx pkts=.*\n.*tx pkts=.* drop=([0-9?]*), .*'.format(of_port),
port_stats.output)
if m:
if '?' in m.group(1):
tx_drop = int(0)
else:
tx_drop = int(m.group(1))
else:
lprint("ERROR: Can't get transmitted drop stats for OpenFlow "
"port {0} on brige \"{1}\"".
format(of_port, config.bridge_name))
sys.exit(-1)
m = re.search('\s.*port *{}: rx pkts=([0-9?]*), .*'.format(of_port),
port_stats.output)
if m:
if '?' in m.group(1):
rx = int(0)
else:
rx = int(m.group(1))
else:
lprint("ERROR: Can't get received packet stats for OpenFlow "
"port {0} on brige \"{1}\"".
format(of_port, config.bridge_name))
sys.exit(-1)
m = re.search('\s.*port *{}: rx pkts=.* drop=([0-9?]*), .*'.format(of_port),
port_stats.output)
if m:
if '?' in m.group(1):
rx_drop = int(0)
else:
rx_drop = int(m.group(1))
else:
lprint("ERROR: Can't get received drop stats for OpenFlow port {0} on brige \"{1}\""
.format(of_port, config.bridge_name))
sys.exit(-1)
slogger.debug("OF port {0} stats: tx = {1}, tx_drop = {2}, rx = {3}, tx_drop = {3}".
format(of_port, tx, tx_drop, rx, rx_drop))
return tx, tx_drop, rx, rx_drop
#
# Convert a MAC address string to an integer
#
def mac_2_int(mac_str):
return int(mac_str.replace(":", ""), 16)
#
# Check tester interface number string
#
def tester_interface_valid(interface):
if config.tester_type == 'xena':
xport = interface.split(',')
if len(xport) != 2:
return False
else:
xport = interface
for number in xport:
try:
if int(number) < 0:
return False
except ValueError:
return False
return True
#
# Create a single graph
#
def create_single_graph(x, y, x_label, y_label, title,
file_name, phy_speed, **kwargs):
cpu_util = kwargs.pop("cpu_utilization", None)
show_idle_cpu = kwargs.pop("show_cpu_idle", False)
slogger.info("create_single_graph[{}], x = {} : y = {}".
format(title, x, y))
if cpu_util is None:
fig, pps = plt.subplots()
pps_plot = pps
else:
fig, pps = plt.subplots(2)
pps_plot = pps[0]
fig.set_figwidth(2 * fig.get_figwidth(), forward=True)
fig.set_figheight(2 * fig.get_figheight(), forward=True)
#
# Main graph showing utilization
#
pps_plot.set_title(title)
pps_plot.set_xlabel(x_label)
pps_plot.set_ylabel(y_label)
pps_plot.grid(True)
pps_plot.autoscale(enable=True, axis='both', tight=False)
pps_plot.plot(x, y, 'o-', label='average')
pps_plot.ticklabel_format(axis='y', style='plain')
pps_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
pps_plot.minorticks_on()
#
# Add second scaled graph showing line utilization
#
if phy_speed > 0:
util_y = list()
for i in range(0, len(x)):
util_y.append(eth_utilization(phy_speed,
x[i], y[i]))
util = pps_plot.twinx()
util.plot(x, util_y, '.:', color='r')
util.set_ylim(0, 100)
util.set_ylabel('Link Utilization in % ({} Gbit/s)'.
format(phy_speed / 1000000000), color='r')
util.tick_params('y', colors='r')
#
# Adding CPU utilization if requested
#
if cpu_util is not None:
cpu_plot = pps[1]
x_cpu = np.arange(len(x))
bar_width = 0.20
ovs_y_values = list()
usr_y_values = list()
nice_y_values = list()
sys_y_values = list()
iowait_y_values = list()
irq_y_values = list()
soft_y_values = list()
steal_y_values = list()
guest_y_values = list()
gnice_y_values = list()
idle_y_values = list()
for i in range(0, len(x)):
ovs_y_values.append(cpu_util[i]['ovs_cpu'])
usr_y_values.append(cpu_util[i]['sys_usr'])
nice_y_values.append(cpu_util[i]['sys_nice'])
sys_y_values.append(cpu_util[i]['sys_sys'])
iowait_y_values.append(cpu_util[i]['sys_iowait'])
irq_y_values.append(cpu_util[i]['sys_irq'])
soft_y_values.append(cpu_util[i]['sys_soft'])
steal_y_values.append(cpu_util[i]['sys_steal'])
guest_y_values.append(cpu_util[i]['sys_guest'])
gnice_y_values.append(cpu_util[i]['sys_gnice'])
idle_y_values.append(cpu_util[i]['sys_idle'])
y_cpu_values = [usr_y_values, nice_y_values, sys_y_values,
iowait_y_values, irq_y_values, soft_y_values,
steal_y_values, guest_y_values, gnice_y_values,
idle_y_values]
y_cpu_colors = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5']
y_cpu_labels = ['usr', 'nice', 'sys', 'iowait', 'irq',
'soft', 'steal', 'guest', 'gnice', 'idle']
cpu_plot.bar(x_cpu, ovs_y_values, bar_width, label='OVS',
color='b', edgecolor='b')
bottom = [0] * len(x)
for i in range(0, len(y_cpu_values) - (1, 0)[show_idle_cpu]):
cpu_plot.bar(x_cpu + bar_width, y_cpu_values[i], bar_width,
color=y_cpu_colors[i], edgecolor=y_cpu_colors[i],
bottom=bottom, label=y_cpu_labels[i])
bottom = [a + b for a, b in zip(bottom, y_cpu_values[i])]
if show_idle_cpu:
total_util = bottom[0]
else:
total_util = bottom[0] + y_cpu_values[i+1][0]
cpu_plot.set_title("Open vSwitch, and system CPU usage (max {:.0f}%)".
format(total_util))
cpu_plot.set_xticks(x_cpu + bar_width)
cpu_plot.set_xticklabels(x)
cpu_plot.set_ylabel("CPU utilization")
cpu_plot.set_xlabel(x_label)
cpu_plot.grid(b=True, which='major')
cpu_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
cpu_plot.minorticks_on()
cpu_plot.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#
# Due to bug in matplotlib we need to disable some np errors
#
old_np_seterr = np.seterr(divide='ignore', invalid='ignore')
#
# Final tweaking
#
fig.tight_layout()
if cpu_util is not None:
box = cpu_plot.get_position()
cpu_plot.set_position([box.x0, box.y0, box.width * 0.9, box.height])
#
# Write picture
#
if file_name is not None and file_name != "":
plt.savefig(file_name + '.png')
#
# Show picture if requested, and clear the graph
#
if config.gui:
plt.show()
plt.close()
np.seterr(**old_np_seterr)
#
# Single graph with multiple results
#
def create_multiple_graph(x, y, x_label, y_label,
title, file_name, phy_speed, **kwargs):
fixed_packet_size = kwargs.pop("fixed_packet_size", None)
cpu_util = kwargs.pop("cpu_utilization", None)
show_idle_cpu = kwargs.pop("show_cpu_idle", True)
slogger.info("create_multiple_graph[{}], x = {} : y = {}".
format(title, x, y))
if cpu_util is None:
fig, pps = plt.subplots()
pps_plot = pps
else:
fig = plt.figure()
#
# This split looked nice, until we used all packets sizes,
# and multiple flows
#
# pps_plot = plt.subplot2grid((2, 2), (0, 0), colspan=2)
# cpu_plot = plt.subplot2grid((2, 2), (1, 0))
# sys_plot = plt.subplot2grid((2, 2), (1, 1))
# fig.set_figwidth(2 * fig.get_figwidth(), forward = True)
# fig.set_figheight(2 * fig.get_figheight(), forward = True)
pps_plot = plt.subplot2grid((3, 2), (0, 0), colspan=2)
cpu_plot = plt.subplot2grid((3, 2), (1, 0), colspan=2)
sys_plot = plt.subplot2grid((3, 2), (2, 0), colspan=2)
fig.set_figwidth(2 * fig.get_figwidth(), forward=True)
fig.set_figheight(3 * fig.get_figheight(), forward=True)
#
# Main graph showing utilization
#
pps_plot.set_title(title)
pps_plot.set_xlabel(x_label)
pps_plot.set_ylabel(y_label)
pps_plot.grid(True)
pps_plot.autoscale(enable=True, axis='both', tight=False)
pps_plot.ticklabel_format(axis='y', style='plain')
pps_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
pps_plot.minorticks_on()
for y_run in natsorted(list(y.keys())):
pps_plot.plot(x, y[y_run], 'o-', label="{}".format(y_run))
#
# Add maximum PPS for the given physical speed
#
if phy_speed is not None:
for speed in phy_speed:
y_values = list()
for x_val in x:
if fixed_packet_size is None:
y_values.append(eth_max_pps(speed, x_val))
else:
y_values.append(eth_max_pps(speed, fixed_packet_size))
pps_plot.plot(x, y_values, '.:', label="Max PPS {}G".
format(speed / 1000000000))
pps_plot.legend(loc='upper right', shadow=True)
#
# Add CPU util information if given
#
if cpu_util is not None:
#
# OVS CPU utilization
#
x_cpu = np.arange(len(x))
bar_width = 0.11
cpu_plot.set_title("Open vSwitch CPU utilization")
ovs_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
for i in range(0, len(x)):
for key in list(cpu_util.keys()):
ovs_y_values[key].append(cpu_util[key][i]['ovs_cpu'])
if len(cpu_util) % 2 != 0:
align = 'center'
else:
align = 'edge'
for i, key in enumerate(natsorted(list(cpu_util.keys()))):
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
x_pos = (x_cpu - (len(cpu_util) / 2 * bar_width)) + (i * bar_width)
cpu_plot.bar(x_pos, ovs_y_values[key], bar_width, align=align,
color=colors[i % len(colors)], edgecolor="none")
cpu_plot.set_xlim(0 - (len(cpu_util) * bar_width),
len(x_cpu) - 1 + (len(cpu_util) * bar_width))
cpu_plot.set_xticks(x_cpu)
cpu_plot.set_xticklabels(x, ha='center')
cpu_plot.set_ylabel("CPU utilization")
cpu_plot.set_xlabel(x_label)
cpu_plot.grid(b=True, which='major')
cpu_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
cpu_plot.minorticks_on()
#
# System CPU utilization
#
sys_plot.set_title("Total System CPU utilization")
usr_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
nice_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
sys_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
iowait_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
irq_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
soft_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
steal_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
guest_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
gnice_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
idle_y_values = dict(list(zip(list(cpu_util.keys()),
[[] for i in range(len(cpu_util))])))
y_cpu_values = [usr_y_values, nice_y_values, sys_y_values,
iowait_y_values, irq_y_values, soft_y_values,
steal_y_values, guest_y_values, gnice_y_values,
idle_y_values]
y_cpu_labels = ['usr', 'nice', 'sys', 'iowait', 'irq',
'soft', 'steal', 'guest', 'gnice', 'idle']
y_cpu_keys = ['sys_usr', 'sys_nice', 'sys_sys', 'sys_iowait', 'sys_irq',
'sys_soft', 'sys_steal', 'sys_guest', 'sys_gnice', 'sys_idle']
y_cpu_colors = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5']
for i in range(0, len(x)):
for key in list(cpu_util.keys()):
for j, y_cpu_value in enumerate(y_cpu_values):
y_cpu_value[key].append(cpu_util[key][i][y_cpu_keys[j]])
if len(cpu_util) % 2 != 0:
align = 'center'
else:
align = 'edge'
for i, key in enumerate(natsorted(list(cpu_util.keys()))):
x_pos = (x_cpu - (len(cpu_util) / 2 * bar_width)) + (i * bar_width)
bottom = [0] * len(x)
for j in range(0, len(y_cpu_values) - (1, 0)[show_idle_cpu]):
sys_plot.bar(x_pos, y_cpu_values[j][key], bar_width, align=align,
color=y_cpu_colors[j],
label=y_cpu_labels[j] if i == 0 else "",
bottom=bottom)
bottom = [a + b for a, b in zip(bottom, y_cpu_values[j][key])]
sys_plot.set_xlim(0 - (len(cpu_util) * bar_width),
len(x_cpu) - 1 + (len(cpu_util) * bar_width))
sys_plot.set_xticks(x_cpu)
sys_plot.set_xticklabels(x, ha='center')
sys_plot.set_ylabel("CPU utilization")
sys_plot.set_xlabel(x_label)
sys_plot.grid(b=True, which='major')
sys_plot.grid(b=True, which='minor', color='k', linestyle=':', alpha=0.2)
sys_plot.minorticks_on()
handles, labels = sys_plot.get_legend_handles_labels()
sys_plot.legend(list(reversed(handles)),
list(reversed(labels)),
loc='center left', bbox_to_anchor=(1, 0.5))
#
# Due to bug in matplotlib we need to disable some np errors
#
old_np_seterr = np.seterr(divide='ignore', invalid='ignore')
#
# Final tweaking
#
fig.tight_layout()
if cpu_util is not None:
box = sys_plot.get_position()
sys_plot.set_position([box.x0, box.y0, box.width * 0.90, box.height])
#
# Write picture
#
if file_name is not None and file_name != "":
plt.savefig(file_name + '.png')
#
# Show picture if requested, and clear the graph
#
if config.gui:
plt.show()
plt.close()
np.seterr(**old_np_seterr)
#
# Try to get phy speed from physical port
#
def get_physical_port_speed():
speed = 10000000000
result = dut_shell.dut_exec("ethtool {}".format(config.physical_interface))
m = re.search('\s*Speed: ([0-9]*)Mb.*', result.output)
if m:
speed = int(m.group(1)) * 1000000
else:
slogger.info("Can't determine physical interface \"{0}\" its speed!".
format(config.physical_interface))
slogger.info("Set physical interface \"{0}\" speed to {1} bits/second".
format(config.physical_interface, speed))
return speed
#
# Calculate wire utilization based on packet size and packets per seconds
#
# Packet size = 12 bytes IFG +
# 8 bytes preamble +
# x bytes packet +
# 4 bytes CRC
#
def eth_utilization(line_speed_bps, packet_size, packets_per_second):
packet_size_bits = (12 + 8 + packet_size + 4) * 8
packet_speed_second = packet_size_bits * packets_per_second
util = int(float(packet_speed_second) / line_speed_bps * 100)
if util > 100:
util = 100
return util
#
# Calculate max packets per second base on packet size and wire speed
#
def eth_max_pps(line_speed_bps, packet_size):
packet_size_bits = (12 + 8 + packet_size + 4) * 8
return line_speed_bps / packet_size_bits
#
# Print results in CSV
#
def csv_write_test_results(csv_handle, test_name, flow_size_list,
packet_size_list, test_results, cpu_results):
if config.flow_type == 'L2':
flow_type = ", L2 flows"
elif config.flow_type == 'L3':
flow_type = ", L3 flows"
elif config.flow_type == 'L4-UDP':
flow_type = ", L4-udp flows"
else:
raise ValueError("No support for this protocol!!")
csv_handle.writerow([test_name + flow_type])
if len(test_results) > 0:
csv_handle.writerow(['', 'Packet size'])
csv_handle.writerow(['Number of flows'] + packet_size_list)
for flow in flow_size_list:
results = [flow]
for i in range(0, len(packet_size_list)):
results.append(test_results[flow][i])
csv_handle.writerow(results)
results = ["cpu_{}".format(flow)]
for i in range(0, len(packet_size_list)):
results.append(cpu_results[flow][i])
csv_handle.writerow(results)
for i in range(0, 4):
csv_handle.writerow([])
#
# Check a string of list entries, and make sure they are valid number,
# and are in order.
#
def check_list(list_string, min_val, max_val):
last_entry = 0
list = list_string.split(',')
if len(list) == 0:
return False
for entry in list:
try:
value = int(entry)
except ValueError:
return False
if value < min_val or value > max_val or last_entry >= value:
return False
last_entry = value
return True
#
# Check the string to be a valid PCI address in the format "0000:02:00.0".
# In addition we also allow the ",txq_inline=" option needed for some vendors,
# as a workaround for L3 forwarding to work.
#
def check_pci_address_string(pci_address):
if pci_address is None:
return False
if re.match("^\d{4}:\d{2}:[0-9A-Fa-f]{2}\.\d{1}$", pci_address) is None and \
re.match("^\d{4}:\d{2}:[0-9A-Fa-f]{2}\.\d{1},txq_inline=\d+$", pci_address) is None:
return False
return True
#
# Mimic the normal print command, but also send the same output
# put on the console to the log file. But only if the log file option
# is enabled else we end up with the same text on the console twice.
#
def lprint(msg):
print (msg)
if config.logging is not None:
slogger.info(msg)
#
# Start Perf recording on DUT
#
def start_perf_recording(test_name):
if not config.perf:
return
perf_path = "/root/ovs_test_perf_data/run_{}".format(run_start_time)
perf_file = "{}/{}.perf".format(perf_path, test_name)
cmd = r"mkdir -p {0}; " \
r"nohup perf record -o '{1}' -g -p `pidof ovs-vswitchd` &> /dev/null &". \
format(perf_path, perf_file)
lprint(" * Start perf recording on DUT ({})...".format(perf_file))
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Stop Perf recording on DUT
#
def stop_perf_recording():
if not config.perf:
return
lprint(" * Stop perf recording on DUT...")
cmd = r"kill -s INT `pidof perf`"
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Start CPU monitoring on DUT
#
def start_cpu_monitoring():
#
# pidstat -u -t -p `pidof ovs-vswitchd`,`pidof ovsdb-server` 1
# PIDSTAT for all qemu?
# mpstat -P ALL 1
# kill -SIGINT `pidof pidstat`
cmd = r"rm -f /var/tmp/cpu_ovs.txt /var/tmp/cpu_mpstat.txt; " \
r"nohup pidstat -u -t -p `pidof ovs-vswitchd`,`pidof ovsdb-server` 1 > /var/tmp/cpu_ovs.txt 2> /dev/null & " \
r"nohup mpstat -P ALL 1 > /var/tmp/cpu_mpstat.txt 2> /dev/null &"
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
#
# Stop CPU monitoring on DUT
#
def stop_cpu_monitoring(**kwargs):
die = kwargs.pop("die", True)
cmd = r"kill -s INT `pidof pidstat`; " \
r"kill -s INT `pidof mpstat`"
dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=die)
#
# Get CPU monitoring stats
#
def get_cpu_monitoring_stats():
cmd = r"cat /var/tmp/cpu_ovs.txt"
results = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
# Average: 0 - 6982 0.00 0.05 0.00 0.05 - |__ovs-vswitchd
regex = re.compile("^Average:\s+[0-9]+\s+-\s+[0-9]+\s+[0-9\.]+\s+[0-9\.]+\s+[0-9\.]+\s+([0-9\.]+).+", re.MULTILINE)
ovs_cpu_usage = float(0)
for match in regex.finditer(results.stdout_output):
ovs_cpu_usage += float(match.group(1))
cmd = r"cat /var/tmp/cpu_mpstat.txt"
results = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd], die_on_error=True)
cpu_raw_stats = results.stdout_output
cpu_usr = float(0)
cpu_nice = float(0)
cpu_sys = float(0)
cpu_iowait = float(0)
cpu_irq = float(0)
cpu_soft = float(0)
cpu_steal = float(0)
cpu_guest = float(0)
cpu_gnice = float(0)
cpu_idle = float(0)
# %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
regex = re.compile("^Average:\s+[0-9]+\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)$",
re.MULTILINE)
for match in regex.finditer(results.stdout_output):
cpu_usr += float(match.group(1))
cpu_nice += float(match.group(2))
cpu_sys += float(match.group(3))
cpu_iowait += float(match.group(4))
cpu_irq += float(match.group(5))
cpu_soft += float(match.group(6))
cpu_steal += float(match.group(7))
cpu_guest += float(match.group(8))
cpu_gnice += float(match.group(9))
cpu_idle += float(match.group(10))
cpu_total = int(cpu_usr + cpu_nice + cpu_sys + cpu_iowait +
cpu_irq + cpu_soft + cpu_steal + cpu_guest +
cpu_gnice + cpu_idle)
cpu_results = dict([('ovs_cpu', ovs_cpu_usage),
('sys_usr', cpu_usr),
('sys_nice', cpu_nice),
('sys_sys', cpu_sys),
('sys_iowait', cpu_iowait),
('sys_irq', cpu_irq),
('sys_soft', cpu_soft),
('sys_steal', cpu_steal),
('sys_guest', cpu_guest),
('sys_gnice', cpu_gnice),
('sys_idle', cpu_idle),
('sys_total', cpu_total)])
slogger.debug("CPU results: {}".format(cpu_results))
return cpu_results
#
# Get ovs version
#
def get_ovs_version():
result = dut_shell.dut_exec('sh -c "ovs-vswitchd --version"'.
format(config.bridge_name),
die_on_error=True)
m = re.search('.*([0-9]+.[0-9]+.[0-9]+).*',
str(result.output))
if m:
return str(m.group(1))
lprint("ERROR: Can't figure out ovs-vswitchd's version!")
sys.exit(-1)
#
# Get VM DPDK version
#
def get_vm_dpdk_version(vm):
cmd = r"sshpass -p {2} ssh -o UserKnownHostsFile=/dev/null " \
r"-o StrictHostKeyChecking=no -n {1}@{0} " \
r"testpmd -v". \
format(vm, config.dut_vm_user, config.dut_vm_password)
result = dut_shell.dut_exec('', raw_cmd=['sh', '-c', cmd],
die_on_error=False)
m = re.search('DPDK ([0-9]+\.[0-9]+\.[0-9]+)',
result.output)
if m:
return str(m.group(1))
lprint("ERROR: Can't figure out VMs DPDK version!")
sys.exit(-1)
#
# Get ovs data path type
#
def get_ovs_datapath():
result = dut_shell.dut_exec('sh -c "ovs-appctl dpif/show"',
die_on_error=True)
output = result.output.replace("\n", "")
m = re.search('(.+@.*{}):.*'.format(config.bridge_name),
output)
if m:
m = re.search('(.+)@.*'.format(config.bridge_name),
m.group(1))
return m.group(1)
lprint("ERROR: Can't figure out ovs datapath!")
sys.exit(-1)
#
# Get bridge MAC address
#
def get_of_bridge_mac_address(bridge):
command = 'sh -c "ovs-ofctl show {0}"'.format(bridge)
result = dut_shell.dut_exec(command, die_on_error=True)
m = re.search('\s*LOCAL\({0}\): addr:(.*)'.format(bridge),
result.output)
if not m:
lprint("ERROR: Can't figure out MAC address for bridge \"{}\"".
format(bridge))
sys.exit(-1)
slogger.debug("MAC address for bridge \"{}\" is {}".format(bridge,
m.group(1)))
return m.group(1)
#
# Flow type definitions
#
flow_types = ['L2', 'L3', 'L4-UDP']
def get_flow_type_short():
labels = dict(list(zip(flow_types,
['L2', 'L3', 'L4-UDP'])))
return labels[config.flow_type]
def get_flow_type_name():
labels = dict(list(zip(flow_types,
['l2', 'l3', 'l4_udp'])))
return labels[config.flow_type]
def get_traffic_generator_flow():
flow_type = dict(list(zip(flow_types,
[TrafficFlowType.l2_mac,
TrafficFlowType.l3_ipv4,
TrafficFlowType.l4_udp])))
return flow_type[config.flow_type]
#
# Traffic tester type definitions
#
traffic_tester_types = ['xena', 'trex']
def get_traffic_generator_type():
traffic_generator_type = dict(list(zip(traffic_tester_types,
[TrafficGeneratorType.xena,
TrafficGeneratorType.trex])))
return traffic_generator_type[config.tester_type]
#
# main()
#
def main():
#
# Not the best way to share all of this, but will work for this
# small test script
#
global config
global plt
global dut_shell
global slogger
global of_interfaces
global ovs_data_path
global dp_interfaces
global tester
global phy_speed
global ovs_version
global vm_dpdk_version
global run_start_time
run_start_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#
# Command line argument parsing
#
parser = argparse.ArgumentParser()
parser.add_argument("--bridge-name", metavar="BRIDGE",
help="Bridge name to use for testing", type=str,
default=DEFAULT_BRIDGE_NAME)
parser.add_argument("-d", "--debug",
help="Enable debugging", action="store_true")
parser.add_argument("--debug-dut-shell",
help="Enable DUT shell debugging", action="store_true")
parser.add_argument("--debug-scapy",
help="Enable scapy debugging", action="store_true")
parser.add_argument("--debug-script",
help="Enable script debugging", action="store_true")
parser.add_argument("--debug-tester",
help="Enable tester debugging", action="store_true")
parser.add_argument("--pmd-rxq-affinity", metavar="AFINITY",
help="Set pmd-rxq-affinity when script configures bridges", type=str)
parser.add_argument("--dut-vm-address", metavar="ADDRESS",
help="IP address of VM running on OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_VM_ADDRESS)
parser.add_argument("--dut-vm-nic-pci", metavar="PCI",
help="PCI address of VMs virtual NIC", type=str,
default=DEFAULT_DUT_VM_NIC_PCI_ADDRESS)
parser.add_argument("--dut-vm-user", metavar="USER",
help="User name of VM running on OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_VM_LOGIN_USER)
parser.add_argument("--dut-vm-password", metavar="PASSWORD",
help="User name of VM running on OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_VM_LOGIN_PASSWORD)
parser.add_argument("--dut-vm-nic-queues", metavar="QUEUES",
help="Number of VM nic queues (and cores) to allocate, default 1",
type=int, default=1)
parser.add_argument("--dut-vm-nic-rxd", metavar="DESCRIPTORS",
help="Number of VM nic receive descriptors, default 4096",
type=int, default=4096)
parser.add_argument("--dut-vm-nic-txd", metavar="DESCRIPTORS",
help="Number of VM nic transmit descriptors, default 1024",
type=int, default=1024)
# Removed VV test for now, as it needs non-upstream trafgen tool
#parser.add_argument("--dut-second-vm-address", metavar="ADDRESS",
# help="IP address of second VM running on OpenVSwitch DUT", type=str,
# default=DEFAULT_DUT_SECOND_VM_ADDRESS)
#parser.add_argument("--dut-second-vm-nic-pci", metavar="PCI",
# help="PCI address of VMs virtual NIC", type=str,
# default=DEFAULT_DUT_VM_NIC_PCI_ADDRESS)
parser.add_argument("--flow-type",
help="Flow type used for the tests, default L3",
choices=flow_types, default='L3')
parser.add_argument("-g", "--gui",
help="Show graph GUI", action="store_true")
parser.add_argument("--no-bridge-config",
help="Do not configure OVS", action="store_true")
parser.add_argument("-o", "--ovs-address", metavar="ADDRESS",
help="IP address of OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_ADDRESS)
parser.add_argument("--ovs-user", metavar="USER",
help="User name of OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_LOGIN_USER)
parser.add_argument("--ovs-password", metavar="PASSWORD",
help="User name of OpenVSwitch DUT", type=str,
default=DEFAULT_DUT_LOGIN_PASSWORD)
parser.add_argument("-p", "--physical-interface", metavar="DEVICE",
help="Physical interface", type=str,
default=DEFAULT_PHYSICAL_INTERFACE)
parser.add_argument("--perf",
help="Enable perf profiling", action="store_true")
parser.add_argument("--physical-interface-pci", metavar="PCI",
help="Physical interface's PCI address", type=str)
parser.add_argument("--second-physical-interface", metavar="DEVICE",
help="Second Physical interface", type=str,
default=DEFAULT_SECOND_PHYSICAL_INTERFACE)
parser.add_argument("--second-physical-interface-pci", metavar="PCI",
help="Second Physical interface", type=str)
parser.add_argument("--physical-speed", metavar="GBPS",
help="Physical interface speed in Gbit/s", type=int,
default=0)
parser.add_argument("--packet-list", metavar="LIST",
help="List of packet sizes to test", type=str,
default=DEFAULT_PACKET_LIST)
parser.add_argument("-r", "--run-time", metavar="SECONDS",
help="Traffic run time per test", type=int,
default=DEFAULT_RUN_TIME)
parser.add_argument("--run-pp-test",
help="Run the P to P test", action="store_true")
# Disable VXLAN for now due to it being incomplete
#parser.add_argument("--run-vxlan-test",
# help="Run the VXLAN tunnel test", action="store_true")
parser.add_argument("--skip-pv-test",
help="Do not run the P to V test", action="store_true")
parser.add_argument("--skip-pvp-test",
help="Do not run the P to V to P test", action="store_true")
# Removed VV test for now, as it needs non-upstream trafgen tool
# parser.add_argument("--skip-vv-test",
# help="Do not run the V to V test", action="store_true")
parser.add_argument("--stream-list", metavar="LIST",
help="List of stream sizes to test", type=str,
default=DEFAULT_STREAM_LIST)
parser.add_argument("--warm-up",
help="Do flow warm-up round before tests", action="store_true")
parser.add_argument("--warm-up-timeout", metavar="SECONDS",
help="Warm up timeout", type=int,
default=DEFAULT_WARM_UP_TIMEOUT)
parser.add_argument("--warm-up-no-fail",
help="Continue running the test even if warm up times out", action="store_true")
parser.add_argument("--no-cool-down",
help="Do not wait for datapath flows to be cleared", action="store_true")
parser.add_argument("-v", "--virtual-interface", metavar="DEVICE",
help="Virtual interface", type=str,
default=DEFAULT_VIRTUAL_INTERFACE)
# Removed VV test for now, as it needs non-upstream trafgen tool
#parser.add_argument("-w", "--second-virtual-interface", metavar="DEVICE",
# help="Virtual interface for second VM", type=str,
# default=DEFAULT_SECOND_VIRTUAL_INTERFACE)
parser.add_argument("-x", "--tester-address", metavar="ADDRESS",
help="IP address of network tester", type=str,
default=DEFAULT_TESTER_SERVER_ADDRESS)
parser.add_argument("--tester-type",
help="Traffic tester type to use, default \"xena\"",
choices=traffic_tester_types,
default=DEFAULT_TESTER_TYPE)
parser.add_argument("-i", "--tester-interface", metavar="{MOD,}PORT",
help="Tester interface", type=str,
default=DEFAULT_TESTER_INTERFACE)
parser.add_argument("--second-tester-interface", metavar="{MOD,}PORT",
help="Second tester interface", type=str,
default=DEFAULT_SECOND_TESTER_INTERFACE)
parser.add_argument("-l", "--logging", metavar="FILE",
help="Redirecting log output to file", type=str)
parser.add_argument("--dst-mac-address",
help="Destination Base MAC address",
type=str, default=DEFAULT_DST_MAC_ADDRESS)
parser.add_argument("--src-mac-address",
help="Source Base MAC address",
type=str, default=DEFAULT_SRC_MAC_ADDRESS)
parser.add_argument("--mac-swap",
help="Swap source/destination mac at VM",
action="store_true")
config = parser.parse_args()
#
# Removed VV test for now, as it needs non-upstream trafgen tool
#
config.skip_vv_test = True
config.dut_second_vm_address = DEFAULT_DUT_SECOND_VM_ADDRESS
config.dut_second_vm_nic_pci = DEFAULT_DUT_VM_NIC_PCI_ADDRESS
config.second_virtual_interface = DEFAULT_SECOND_VIRTUAL_INTERFACE
#
# Disable VXLAN for now due to it being incomplete
#
config.run_vxlan_test = False
#
# Setting up the logger
#
logging.basicConfig(format='%(asctime)s[%(levelname)-8.8s][%(name)s]: %(message)s',
datefmt='%H:%M:%S',
level=logging.ERROR,
filename=config.logging)
slogger = logging.getLogger('script')
slogger.setLevel(logging.INFO)
slogger.info("**********************************************************************")
slogger.info("** Starting \"%s\"", os.path.basename(__file__))
slogger.info("**********************************************************************")
#
# Check some input parameters
#
if config.ovs_address == '':
lprint("ERROR: You must supply the OVS host address to use for testing!")
sys.exit(-1)
if (not config.skip_vv_test or not config.skip_pv_test or \
not config.skip_pvp_test ) and config.dut_vm_address == '':
lprint("ERROR: You must supply the DUT VM host address to use for testing!")
sys.exit(-1)
if config.dst_mac_address == '':
lprint("ERROR: You must supply a Destination Base MAC Address")
sys.exit(-1)
if config.src_mac_address == '':
lprint("ERROR: You must supply a Source Base MAC Address")
sys.exit(-1)
if config.flow_type == 'L2':
if (int(config.src_mac_address.replace(":", ""), 16) & 0xffffff) \
!= 0:
lprint("ERROR: For L2 tests the Source Base MAC address must "
"be xx:xx:xx:00:00:00")
sys.exit(-1)
if (int(config.dst_mac_address.replace(":", ""), 16) & 0xffffff) \
!= 0:
lprint("ERROR: For L2 tests the Destination Base MAC address must "
"be xx:xx:xx:00:00:00")
sys.exit(-1)
if (not config.skip_vv_test or not config.skip_pv_test or \
not config.skip_pvp_test ) and \
not check_pci_address_string(config.dut_vm_nic_pci):
lprint("ERROR: You must supply a valid PCI address for the VMs NIC!")
sys.exit(-1)
if not config.skip_vv_test and config.second_virtual_interface == '':
lprint("ERROR: You must supply a second virtual interface to use for testing!")
sys.exit(-1)
if not config.skip_vv_test and config.dut_second_vm_address == '':
lprint("ERROR: You must supply the second DUT VM address!")
sys.exit(-1)
if not config.skip_vv_test and \
not check_pci_address_string(config.dut_second_vm_nic_pci):
lprint("ERROR: You must supply a valid PCI address for the second VMs NIC!")
sys.exit(-1)
if config.dut_second_vm_address != '' and config.dut_vm_nic_pci == '':
lprint("ERROR: You must supply the second DUT VM host's NIC PCI address!")
sys.exit(-1)
if config.physical_interface == '':
lprint("ERROR: You must supply the physical interface to use for testing!")
sys.exit(-1)
if config.run_pp_test and config.second_physical_interface == '':
lprint("ERROR: You must supply the second physical interface to use for testing!")
sys.exit(-1)
if (not config.skip_vv_test or not config.skip_pv_test or \
not config.skip_pvp_test) and config.virtual_interface == '':
lprint("ERROR: You must supply the virtual interface to use for testing!")
sys.exit(-1)
if config.tester_address == '':
lprint("ERROR: You must supply the tester's address to use for testing!")
sys.exit(-1)
if config.tester_interface == '':
lprint("ERROR: You must supply the tester's interface to use for testing!")
sys.exit(-1)
if config.run_pp_test and config.second_tester_interface == '':
lprint("ERROR: You must supply the second tester's interface to use for testing!")
sys.exit(-1)
if not tester_interface_valid(config.tester_interface):
lprint("ERROR: Invalid tester interface configuration!")
sys.exit(-1)
if config.second_tester_interface != '' and \
not tester_interface_valid(config.second_tester_interface):
lprint("ERROR: Invalid second tester interface configuration!")
sys.exit(-1)
if not check_list(config.stream_list, 1, 1000000):
lprint("ERROR: Invalid stream list, \"{}\", supplied!".format(config.stream_list))
sys.exit(-1)
if config.flow_type == 'L4-UDP' and not check_list(config.stream_list, 1, 65535):
lprint("ERROR: Invalid stream list, \"{}\", supplied for L4 flows!".
format(config.stream_list))
sys.exit(-1)
if not check_list(config.packet_list, 64, 9000):
lprint("ERROR: Invalid packet list, \"{}\", supplied!".format(config.packet_list))
sys.exit(-1)
if config.run_time < 20 or config.run_time > 3600:
lprint("ERROR: Run time should be [20..3600] seconds!")
sys.exit(-1)
if config.physical_speed != 0 and \
(config.physical_speed < 0 or config.physical_speed > 1000):
lprint("ERROR: Invalid physical speed supplied [1..1000]!")
sys.exit(-1)
if config.dut_vm_nic_queues < 1 or config.dut_vm_nic_queues > 63:
lprint("ERROR: Invalid VM NIC queue count supplied [1..63]!")
sys.exit(-1)
if config.run_vxlan_test and config.no_bridge_config:
#
# We can only support tunnels with no bridge config, if no other tests
# are ran, as it needs a special config compared to the other tests.
#
if not config.skip_vv_test or not config.skip_pv_test \
or not config.skip_pvp_test or config.run_pp_test:
lprint("ERROR: Tunnel tests can only be run individually "
"with the no-bridge-config option!")
sys.exit(-1)
if config.run_vxlan_test and config.flow_type != 'L3':
lprint("ERROR: Tunnel tests only support the L3 flow type!")
sys.exit(-1)
if config.run_vxlan_test and not check_list(config.packet_list, 96, 9000):
#
# ETH + IPv4 + UDP + VXLAN + ETH + IPv4 + UDP + ETH_CRC
#
lprint("ERROR: Minimal packet size for the VXLAN test should be 96 bytes!")
sys.exit(-1)
if config.warm_up and (not config.skip_vv_test or config.run_vxlan_test):
lprint("WARNING: Warm-up only works for P2P, P2V, and P2V2P tests!")
#
# Dump settings if global debug is enabled
#
if config.debug:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if config.debug_script or config.debug:
slogger.setLevel(logging.DEBUG)
if config.debug_scapy or config.debug:
logging.getLogger("scapy.runtime").setLevel(logging.DEBUG)
slogger.debug("Configured values:")
slogger.debug(" %-23.23s: %s", 'Debug', config.debug)
slogger.debug(" %-23.23s: %s", 'Debug DUT Shell', config.debug_dut_shell)
slogger.debug(" %-23.23s: %s", 'Debug Scapy', config.debug_scapy)
slogger.debug(" %-23.23s: %s", 'Debug Script', config.debug_script)
slogger.debug(" %-23.23s: %s", 'Debug Tester', config.debug_tester)
slogger.debug(" %-23.23s: %s", 'Flow Type', config.flow_type)
slogger.debug(" %-23.23s: %s", 'Perf tracing', config.perf)
slogger.debug(" %-23.23s: %s", 'Tester Type', config.tester_type)
slogger.debug(" %-23.23s: %s", 'Tester Address', config.tester_address)
slogger.debug(" %-23.23s: %s", 'Tester Interface', config.tester_interface)
slogger.debug(" %-23.23s: %s", 'Second Tester Interface', config.second_tester_interface)
slogger.debug(" %-23.23s: %s", 'OVS Bridge Name', config.bridge_name)
slogger.debug(" %-23.23s: %s", 'OVS DUT Address', config.ovs_address)
slogger.debug(" %-23.23s: %s", 'OVS DUT Login', config.ovs_user)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM1 Address', config.dut_vm_address)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM2 Address', config.dut_second_vm_address)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM1 PCI Address', config.dut_vm_nic_pci)
slogger.debug(" %-23.23s: %s", 'OVS DUT VM2 PCI Address', config.dut_second_vm_nic_pci)
slogger.debug(" %-23.23s: %s", 'OVS VM Login', config.dut_vm_user)
slogger.debug(" %-23.23s: %s", 'OVS VM NIC queues', config.dut_vm_nic_queues)
slogger.debug(" %-23.23s: %s", 'OVS VM NIC rxd', config.dut_vm_nic_rxd)
slogger.debug(" %-23.23s: %s", 'OVS VM NIC txd', config.dut_vm_nic_txd)
slogger.debug(" %-23.23s: %s", 'Physical Interface', config.physical_interface)
slogger.debug(" %-23.23s: %u Gbit/s", 'Physical Int. Speed', config.physical_speed)
slogger.debug(" %-23.23s: %s", 'Virtual Interface', config.virtual_interface)
slogger.debug(" %-23.23s: %s", '2nd Virtual Interface', config.second_virtual_interface)
slogger.debug(" %-23.23s: %s", 'MAC swap', config.mac_swap)
slogger.debug(" %-23.23s: %s", 'Source MAC', config.src_mac_address)
slogger.debug(" %-23.23s: %s", 'Destination MAC', config.dst_mac_address)
slogger.debug(" %-23.23s: %u seconds", 'Test run time', config.run_time)
slogger.debug(" %-23.23s: %s", 'Run with stream size\'s', config.stream_list)
slogger.debug(" %-23.23s: %s", 'Run with packet size\'s', config.packet_list)
slogger.debug(" %-23.23s: %s", 'Skip PV test', config.skip_pv_test)
slogger.debug(" %-23.23s: %s", 'Skip PVP test', config.skip_pvp_test)
slogger.debug(" %-23.23s: %s", 'Skip VV test', config.skip_vv_test)
slogger.debug(" %-23.23s: %s", 'Run PP test', config.run_pp_test)
slogger.debug(" %-23.23s: %s", 'Warm-up', config.warm_up)
slogger.debug(" %-23.23s: %s", 'No-cool-down', config.no_cool_down)
#
# If we use the GUI, we need to set the correct back-end
# However this does not seem to work always in a non-Tk back-end, if you get
# Tinker errors, set the following environment variable:
# export MPLBACKEND="agg"
#
# if config.gui:
# matplotlib.use('TkAgg')
# else:
# matplotlib.use('Agg')
#
# Commenting out the above, as it no longer works. Use the export as
# explained above as python loads the modules beforehand.
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
#
# Quick regenerate grapgh from results (DEBUG)
#
# packet_sizes = [64, 128, 256, 512, 1024, 1514]
# p2v_results = [22969229, 25139846, 18116596, 9398727, 4789329, 3259472]
# create_single_graph(packet_sizes, p2v_results,
# "Packet size", "Packets/second",
# "Physical to Virtual with 1000 flows",
# "test_p2v_1000")
# sys.exit(-1)
#
# Connecting to Tester
#
lprint("- Connecting to the tester...")
tester = TrafficGenerator(get_traffic_generator_type(),
hostname=config.tester_address)
if config.debug_tester:
logging.getLogger('xenalib.BaseSocket').setLevel(logging.DEBUG)
logging.getLogger('xenalib.KeepAliveThread').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaManager').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaModifier').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaPort').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaSocket').setLevel(logging.DEBUG)
logging.getLogger('xenalib.XenaStream').setLevel(logging.DEBUG)
if not tester.reserve_port(config.tester_interface):
lprint("ERROR: Failed to add first tester port")
sys.exit(-1)
if config.second_tester_interface != '':
if not tester.reserve_port(config.second_tester_interface):
lprint("ERROR: Failed to add second tester port")
sys.exit(-1)
#
# Connecting to DUT
#
lprint("- Connecting to DUT, \"{}\"...".format(config.ovs_address))
dut_shell = DutSshShell(hostname=config.ovs_address,
username=config.ovs_user,
password=config.ovs_password,
missing_host_key=spur.ssh.MissingHostKey.accept)
if config.debug_dut_shell:
dut_shell.logger.setLevel(logging.DEBUG)
ovs_version = get_ovs_version()
#
# Stop any running test tools on the VMs
#
#
lprint("- Stop any running test tools...")
stop_cpu_monitoring(die=False)
if config.dut_vm_address != '':
stop_traffic_rx_on_vm(config.dut_vm_address, die=False)
stop_traffic_tx_on_vm(config.dut_vm_address, die=False)
lprint("- Getting VM's DPDK version...")
vm_dpdk_version = get_vm_dpdk_version(config.dut_vm_address)
if config.dut_second_vm_address != '':
stop_traffic_rx_on_vm(config.dut_second_vm_address, die=False)
stop_traffic_tx_on_vm(config.dut_second_vm_address, die=False)
#
# Create OVS bridge, and get OpenFlow port numbers
#
if not config.no_bridge_config:
if not config.skip_pv_test or not config.skip_pvp_test or \
not config.skip_vv_test or config.run_pp_test:
#
# Also skip if all we are running are the tunnel tests
#
create_ovs_bridge()
#
# If we run only tunnel tests we need to skip this
#
if not config.skip_pv_test or not config.skip_pvp_test or \
not config.skip_vv_test or config.run_pp_test:
of_interfaces = dict()
dp_interfaces = dict()
of_interfaces, dp_interfaces = get_bridge_port_numbers()
#
# Getting physical port speed, used for graphs
#
if config.physical_speed != 0:
phy_speed = config.physical_speed * 1000000000
else:
phy_speed = get_physical_port_speed()
#
# Get datapath type
#
ovs_data_path = get_ovs_datapath()
lprint("- Get OVS datapath type, \"{}\"...".format(ovs_data_path))
#
# Open CSV file for writing
#
lprint("- Create \"test_results.csv\" for writing results...")
if config.flow_type == 'L2':
csv_file = "test_results_l2.csv"
elif config.flow_type == 'L3':
csv_file = "test_results_l3.csv"
elif config.flow_type == 'L4-UDP':
csv_file = "test_results_l4_udp.csv"
else:
raise ValueError("No support for this protocol!!")
with open(csv_file, 'w') as csvfile:
csv_handle = csv.writer(csvfile, dialect='excel')
csv_handle.writerow(["Physical port, \"{}\", speed {} Gbit/s".
format(config.physical_interface,
phy_speed / 1000000000)])
csv_handle.writerow([])
csv_handle.writerow([])
#
# Run tests
#
stream_size_list = [int(i) for i in config.stream_list.split(',')]
packet_size_list = [int(i) for i in config.packet_list.split(',')]
flow_str = get_flow_type_short()
flow_file_str = get_flow_type_name()
v2v_results = dict()
v2v_cpu_results = dict()
p2v_results = dict()
p2v_cpu_results = dict()
p2p_results = dict()
p2p_cpu_results = dict()
p2v2p_results = dict()
p2v2p_cpu_results = dict()
if not config.skip_vv_test:
for nr_of_streams in stream_size_list:
v2v_results[nr_of_streams], \
v2v_cpu_results[nr_of_streams] = test_v2v(nr_of_streams, packet_size_list)
create_multiple_graph(packet_size_list, v2v_results,
"Packet size", "Packets/second",
"Virtual to Virtual, {}".
format(get_flow_type_short()),
"test_v2v_all_{}".
format(get_flow_type_name()),
None, cpu_utilization=v2v_cpu_results)
create_multiple_graph(packet_size_list, v2v_results,
"Packet size", "Packets/second",
"Virtual to Virtual, {}".
format(get_flow_type_short()),
"test_v2v_all_{}_ref".
format(get_flow_type_name()),
[phy_speed], cpu_utilization=v2v_cpu_results)
csv_write_test_results(csv_handle, 'Virtual to Virtual test',
stream_size_list, packet_size_list,
v2v_results, v2v_cpu_results)
if not config.skip_pv_test:
for nr_of_streams in stream_size_list:
p2v_results[nr_of_streams], \
p2v_cpu_results[nr_of_streams] = test_p2v(nr_of_streams, packet_size_list)
create_multiple_graph(packet_size_list, p2v_results,
"Packet size", "Packets/second",
"Physical to Virtual, {}".format(flow_str),
"test_p2v_all_{}".format(flow_file_str),
None, cpu_utilization=p2v_cpu_results)
create_multiple_graph(packet_size_list, p2v_results,
"Packet size", "Packets/second",
"Physical to Virtual, {}".format(flow_str),
"test_p2v_all_{}_ref".format(flow_file_str),
[phy_speed], cpu_utilization=p2v_cpu_results)
csv_write_test_results(csv_handle, 'Physical to Virtual test',
stream_size_list, packet_size_list,
p2v_results, p2v_cpu_results)
if not config.skip_pvp_test:
for nr_of_streams in stream_size_list:
p2v2p_results[nr_of_streams], \
p2v2p_cpu_results[nr_of_streams] = test_p2v2p(nr_of_streams,
packet_size_list)
create_multiple_graph(packet_size_list, p2v2p_results,
"Packet size", "Packets/second",
"Physical to Virtual to Physical, {}".format(flow_str),
"test_p2v2p_all_{}".format(flow_file_str),
None, cpu_utilization=p2v2p_cpu_results)
create_multiple_graph(packet_size_list, p2v2p_results,
"Packet size", "Packets/second",
"Physical to Virtual to Physical, {}".format(flow_str),
"test_p2v2p_all_{}_ref".format(flow_file_str),
[phy_speed], cpu_utilization=p2v2p_cpu_results)
csv_write_test_results(csv_handle,
'Physical to Virtual to Physical test',
stream_size_list, packet_size_list,
p2v2p_results, p2v2p_cpu_results)
if config.run_pp_test:
for nr_of_streams in stream_size_list:
p2p_results[nr_of_streams], \
p2p_cpu_results[nr_of_streams] = test_p2p(nr_of_streams, packet_size_list)
create_multiple_graph(packet_size_list, p2p_results,
"Packet size", "Packets/second",
"Physical to Physical, {}".format(flow_str),
"test_p2p_all_{}".format(flow_file_str),
None, cpu_utilization=p2p_cpu_results)
create_multiple_graph(packet_size_list, p2p_results,
"Packet size", "Packets/second",
"Physical to Physical, {}".format(flow_str),
"test_p2p_all_{}_ref".format(flow_file_str),
[phy_speed], cpu_utilization=p2p_cpu_results)
csv_write_test_results(csv_handle, 'Physical to Physical test',
stream_size_list, packet_size_list,
p2p_results, p2p_cpu_results)
if config.run_vxlan_test:
if not config.no_bridge_config:
create_ovs_vxlan_bridge()
of_interfaces = dict()
dp_interfaces = dict()
of_interfaces, dp_interfaces = get_bridge_port_numbers(tunnel=True)
vxlan_results = dict()
vxlan_cpu_results = dict()
for nr_of_streams in stream_size_list:
vxlan_results[nr_of_streams], \
vxlan_cpu_results[nr_of_streams] = test_vxlan(nr_of_streams,
packet_size_list)
create_multiple_graph(packet_size_list, vxlan_results,
"Packet size", "Packets/second",
"VXLAN Tunnel, {}".format(flow_str),
"test_vxlan_all_{}".format(flow_file_str),
None, cpu_utilization=vxlan_cpu_results)
create_multiple_graph(packet_size_list, vxlan_results,
"Packet size", "Packets/second",
"VXLAN Tunnel, {}".format(flow_str),
"test_vxlan_all_{}_ref".format(flow_file_str),
[phy_speed], cpu_utilization=vxlan_cpu_results)
csv_write_test_results(csv_handle, 'VXLAN Tunnel',
stream_size_list, packet_size_list,
vxlan_results, vxlan_cpu_results)
#
# Done...
#
lprint("- Done running performance tests!")
# For now we leave the DUT in the last test state in case we would like
# to do some trouble shooting. First step in re-run is to remove bridge,
# and delete all openflow rules.
tester.disconnect()
del tester
#
# Start main() as default entry point...
#
if __name__ == '__main__':
main()
|
the-stack_106_22667 | import flask
from indexd.blueprint import dist_get_record
from indexd.errors import AuthError
from indexd.errors import UserError
from indexd.alias.errors import NoRecordFound as AliasNoRecordFound
from indexd.index.errors import NoRecordFound as IndexNoRecordFound
blueprint = flask.Blueprint('dos', __name__)
blueprint.config = dict()
blueprint.index_driver = None
blueprint.alias_driver = None
blueprint.dist = []
@blueprint.route('/ga4gh/dos/v1/dataobjects/<path:record>', methods=['GET'])
def get_dos_record(record):
'''
Returns a record from the local ids, alias, or global resolvers.
Returns DOS Schema
'''
try:
ret = blueprint.index_driver.get(record)
ret['alias'] = blueprint.index_driver.get_aliases_for_did(record)
except IndexNoRecordFound:
try:
ret = blueprint.index_driver.get_by_alias(record)
ret['alias'] = blueprint.index_driver.get_aliases_for_did(ret['did'])
except IndexNoRecordFound:
try:
ret = blueprint.alias_driver.get(record)
except AliasNoRecordFound:
if not blueprint.dist:
raise
ret = dist_get_record(record)
return flask.jsonify(indexd_to_dos(ret)), 200
@blueprint.route('/ga4gh/dos/v1/dataobjects/list', methods=['POST'])
def list_dos_records():
'''
Returns a record from the local ids, alias, or global resolvers.
Returns DOS Schema
'''
start = flask.request.json.get('page_token')
limit = flask.request.json.get('page_size')
try:
limit = 100 if limit is None else int(limit)
except ValueError:
raise UserError('limit must be an integer')
if limit <= 0 or limit > 1024:
raise UserError('limit must be between 1 and 1024')
url = flask.request.json.get('url')
# Support this in the future when we have
# more fully featured aliases?
#alias = flask.request.json.get('alias')
checksum = flask.request.json.get('checksum')
if checksum:
hashes = {checksum['type']: checksum['checksum']}
else:
hashes = None
records = blueprint.index_driver.ids(
start=start,
limit=limit,
urls=url,
hashes=hashes
)
for record in records:
record['alias'] = blueprint.index_driver.get_aliases_for_did(record['did'])
ret = {"data_objects": [indexd_to_dos(record)['data_object'] for record in records]}
return flask.jsonify(ret), 200
def indexd_to_dos(record):
data_object = {
"id": record['did'],
"name": record['file_name'],
'created': record['created_date'],
'updated': record['updated_date'],
"size": record['size'],
"version": record['rev'],
"description": "",
"mime_type": ""
}
data_object['aliases'] = record['alias']
# parse out checksums
data_object['checksums'] = []
for k in record['hashes']:
data_object['checksums'].append(
{'checksum': record['hashes'][k], 'type': k})
# parse out the urls
data_object['urls'] = []
for url in record['urls']:
url_object = {
'url': url }
if 'metadata' in record and record['metadata']:
url_object['system_metadata'] = record['metadata']
if 'urls_metadata' in record and url in record['urls_metadata'] and record['urls_metadata'][url]:
url_object['user_metadata'] = record['urls_metadata'][url]
data_object['urls'].append(url_object)
result = { "data_object": data_object }
return result
@blueprint.errorhandler(UserError)
def handle_user_error(err):
ret = { msg: str(err), status_code: 0 }
return flask.jsonify(ret), 400
@blueprint.errorhandler(AuthError)
def handle_auth_error(err):
ret = { msg: str(err), status_code: 0 }
return flask.jsonify(ret), 403
@blueprint.errorhandler(AliasNoRecordFound)
def handle_no_alias_record_error(err):
ret = { msg: str(err), status_code: 0 }
return flask.jsonify(ret), 404
@blueprint.errorhandler(IndexNoRecordFound)
def handle_no_index_record_error(err):
ret = { msg: str(err), status_code: 0 }
return flask.jsonify(ret), 404
@blueprint.record
def get_config(setup_state):
index_config = setup_state.app.config['INDEX']
alias_config = setup_state.app.config['ALIAS']
blueprint.index_driver = index_config['driver']
blueprint.alias_driver = alias_config['driver']
if 'DIST' in setup_state.app.config:
blueprint.dist = setup_state.app.config['DIST']
|
the-stack_106_22668 | from __future__ import print_function, division, absolute_import
from fontTools.ttLib import TTFont
from afdko.fontpdf import (doTitle, FontPDFParams)
from afdko.otfpdf import txPDFFont
from afdko.pdfgen import Canvas
from test_utils import get_input_path
TOOL = 'fontpdf'
OTF_FONT = 'OTF.otf'
# -----
# Tests
# -----
def test_doTitle_pageIncludeTitle_1():
with TTFont(get_input_path(OTF_FONT)) as otfont:
params = FontPDFParams()
assert params.pageIncludeTitle == 1
pdfFont = txPDFFont(otfont, params)
rt_canvas = Canvas("pdf_file_path")
assert rt_canvas._code == []
doTitle(rt_canvas, pdfFont, params, 1)
assert len(rt_canvas._code)
assert 'SourceSansPro-Black' in rt_canvas._code[1]
def test_doTitle_pageIncludeTitle_0():
with TTFont(get_input_path(OTF_FONT)) as otfont:
params = FontPDFParams()
params.pageIncludeTitle = 0
pdfFont = txPDFFont(otfont, params)
rt_canvas = Canvas("pdf_file_path")
assert rt_canvas._code == []
doTitle(rt_canvas, pdfFont, params, 1)
assert rt_canvas._code == []
|
the-stack_106_22669 | # Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
import requests
from django.conf import settings
from django.core.mail import send_mail
from django.db import models, transaction
from django.db.models.signals import post_save, pre_save
from huxley.core.constants import ContactGender, ContactType, ProgramTypes
from huxley.utils import zoho
class Conference(models.Model):
session = models.PositiveSmallIntegerField(default=0)
start_date = models.DateField()
end_date = models.DateField()
reg_open = models.DateField()
early_reg_close = models.DateField()
reg_close = models.DateField()
min_attendance = models.PositiveSmallIntegerField(default=0)
max_attendance = models.PositiveSmallIntegerField(default=0)
open_reg = models.BooleanField(default=True)
waitlist_reg = models.BooleanField(default=False)
def __unicode__(self):
return 'BMUN %d' % self.session
class Meta:
db_table = u'conference'
get_latest_by = 'start_date'
class Country(models.Model):
name = models.CharField(max_length=128)
special = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Meta:
db_table = u'country'
class Committee(models.Model):
name = models.CharField(max_length=8)
full_name = models.CharField(max_length=128)
countries = models.ManyToManyField(Country, through='Assignment')
delegation_size = models.PositiveSmallIntegerField(default=2)
special = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Meta:
db_table = u'committee'
class School(models.Model):
REGISTRATION_FEE = 50.0
DELEGATE_FEE = 50.0
PROGRAM_TYPE_OPTIONS = (
(ProgramTypes.CLUB, 'Club'),
(ProgramTypes.CLASS, 'Class'),
)
CONTACT_TYPE_OPTIONS = (
(ContactType.FACULTY, 'Faculty'),
(ContactType.STUDENT, 'Student'),
)
GENDER_OPTIONS = (
(ContactGender.MALE, 'Male'),
(ContactGender.FEMALE, 'Female'),
(ContactGender.OTHER, 'Other'),
(ContactGender.UNSPECIFIED, 'Unspecified'),
)
registered = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=128)
address = models.CharField(max_length=128)
city = models.CharField(max_length=128)
state = models.CharField(max_length=16)
zip_code = models.CharField(max_length=16)
country = models.CharField(max_length=64)
primary_name = models.CharField(max_length=128)
primary_gender = models.PositiveSmallIntegerField(choices=GENDER_OPTIONS, default=ContactGender.UNSPECIFIED)
primary_email = models.EmailField()
primary_phone = models.CharField(max_length=32)
primary_type = models.PositiveSmallIntegerField(choices=CONTACT_TYPE_OPTIONS, default=ContactType.FACULTY)
secondary_name = models.CharField(max_length=128, blank=True)
secondary_gender = models.PositiveSmallIntegerField(choices=GENDER_OPTIONS, blank=True, default=ContactGender.UNSPECIFIED)
secondary_email = models.EmailField(blank=True)
secondary_phone = models.CharField(max_length=32, blank=True)
secondary_type = models.PositiveSmallIntegerField(choices=CONTACT_TYPE_OPTIONS, blank=True, default=ContactType.FACULTY)
program_type = models.PositiveSmallIntegerField(choices=PROGRAM_TYPE_OPTIONS, default=ProgramTypes.CLUB)
times_attended = models.PositiveSmallIntegerField(default=0)
international = models.BooleanField(default=False)
waitlist = models.BooleanField(default=False)
beginner_delegates = models.PositiveSmallIntegerField()
intermediate_delegates = models.PositiveSmallIntegerField()
advanced_delegates = models.PositiveSmallIntegerField()
spanish_speaking_delegates = models.PositiveSmallIntegerField()
chinese_speaking_delegates = models.PositiveSmallIntegerField()
countrypreferences = models.ManyToManyField(Country, through='CountryPreference')
committeepreferences = models.ManyToManyField(Committee, limit_choices_to={'special': True})
registration_comments = models.TextField(default='', blank=True)
fees_owed = models.DecimalField(max_digits=6, decimal_places=2, default=0)
fees_paid = models.DecimalField(max_digits=6, decimal_places=2, default=0)
assignments_finalized = models.BooleanField(default=False)
def update_country_preferences(self, country_ids):
'''Given a list of country IDs, first dedupe and filter out 0s, then
clear the existing country preferences and construct new ones.'''
seen = set()
processed_country_ids = []
country_preferences = []
for rank, country_id in enumerate(country_ids):
if not country_id or country_id in seen:
continue
seen.add(country_id)
processed_country_ids.append(country_id)
country_preferences.append(
CountryPreference(
school=self,
country_id=country_id,
rank=rank,
)
)
if country_preferences:
with transaction.atomic():
self.countrypreferences.clear()
CountryPreference.objects.bulk_create(country_preferences)
return processed_country_ids
@classmethod
def update_fees(cls, **kwargs):
school = kwargs['instance']
delegate_fees = cls.DELEGATE_FEE * sum((
school.beginner_delegates,
school.intermediate_delegates,
school.advanced_delegates,
))
school.fees_owed = cls.REGISTRATION_FEE + delegate_fees
@classmethod
def update_waitlist(cls, **kwargs):
'''If the school is about to be created (i.e. has no ID) and
registration is closed, add it to the waitlist.'''
school = kwargs['instance']
if not school.id and settings.CONFERENCE_WAITLIST_OPEN:
school.waitlist = True
@property
def country_preference_ids(self):
'''Return an ordered list of the school's preferred countries.'''
return [country.id for country in self.countrypreferences.all()]
@country_preference_ids.setter
def country_preference_ids(self, country_ids):
'''Queue a pending update to replace the school's preferred countries
on the next save.'''
self._pending_country_preference_ids = country_ids
def save(self, *args, **kwargs):
'''Save the school normally, then update its country preferences.'''
super(School, self).save(*args, **kwargs)
if getattr(self, '_pending_country_preference_ids', []):
self.update_country_preferences(self._pending_country_preference_ids)
self._pending_country_preference_ids = []
@classmethod
def email_comments(cls, **kwargs):
school = kwargs['instance']
if kwargs['created'] and school.registration_comments:
send_mail('Registration Comments from '+ school.name, school.name +
' made comments about registration: '
+ school.registration_comments, '[email protected]',
['[email protected]'], fail_silently=True)
@classmethod
def email_confirmation(cls, **kwargs):
if kwargs['created']:
school = kwargs['instance']
if school.waitlist:
send_mail('BMUN 64 Waitlist Confirmation',
'You have officially been put on the waitlist for BMUN 64. '
'We will inform you if and when you are taken off the waitlist.\n\n'
'If you have any tech related questions, please email [email protected]. '
'For all other questions, please email [email protected].\n\n'
'Thank you for using Huxley!',
'[email protected]',
[school.primary_email], fail_silently=True)
else:
send_mail('BMUN 64 Registration Confirmation',
'You have officially been registered for BMUN 64. '
'To access your account, please log in at huxley.bmun.org.\n\n'
'The school registration fee is $50. The delegate registration '
'fee is $50 per student. You will be able to view your balance '
'on huxley.bmun.org in November, at which point we will begin '
'accepting payments.\n\n'
'If you have any tech related questions, please email [email protected]. '
'For all other questions, please email [email protected].\n\n'
'Thank you for using Huxley!',
'[email protected]',
[school.primary_email], fail_silently=True)
@classmethod
def create_zoho_contact(cls, **kwargs):
if not settings.ZOHO_CREDENTIALS:
return
school = kwargs['instance']
attrs = zoho.generate_contact_attributes(school)
parameters = {'JSONString': json.dumps(attrs)}
if kwargs['created']:
create_url = 'https://invoice.zoho.com/api/v3/contacts?organization_id=' + settings.ORGANIZATION_ID + '&authtoken=' + settings.AUTHTOKEN
r = requests.post(create_url, params=parameters)
else:
update_url = 'https://invoice.zoho.com/api/v3/contacts/'+ zoho.get_contact(school) +'?organization_id=' + settings.ORGANIZATION_ID + '&authtoken=' + settings.AUTHTOKEN
r = requests.put(update_url, params=parameters)
def __unicode__(self):
return self.name
class Meta:
db_table = u'school'
pre_save.connect(School.update_fees, sender=School)
pre_save.connect(School.update_waitlist, sender=School)
post_save.connect(School.email_comments, sender=School)
post_save.connect(School.email_confirmation, sender=School)
post_save.connect(School.create_zoho_contact, sender=School)
class Assignment(models.Model):
committee = models.ForeignKey(Committee)
country = models.ForeignKey(Country)
school = models.ForeignKey(School, null=True, blank=True, default=None)
@classmethod
def update_assignments(cls, new_assignments):
'''
Atomically update the set of country assignments in a transaction.
For each assignment in the updated list, either update the existing
one (and delete its delegates), or create a new one if it doesn't
exist.
'''
assignments = cls.objects.all().values()
assignment_dict = {(a['committee_id'], a['country_id']): a
for a in assignments}
additions = []
deletions = []
def add(committee, country, school):
additions.append(cls(
committee_id=committee,
country_id=country,
school_id=school,
))
def remove(assignment_data):
deletions.append(assignment_data['id'])
for committee, country, school in new_assignments:
key = (committee, country)
old_assignment = assignment_dict.get(key)
if not old_assignment:
add(committee, country, school)
continue
if old_assignment['school_id'] != school:
# Remove the old assignment instead of just updating it
# so that its delegates are deleted by cascade.
remove(old_assignment)
add(committee, country, school)
del assignment_dict[key]
for old_assignment in assignment_dict.values():
remove(old_assignment)
with transaction.atomic():
Assignment.objects.filter(id__in=deletions).delete()
Assignment.objects.bulk_create(additions)
def __unicode__(self):
return self.committee.name + " : " + self.country.name + " : " + (self.school.name if self.school else "Unassigned")
class Meta:
db_table = u'assignment'
unique_together = ('committee', 'country')
class CountryPreference(models.Model):
school = models.ForeignKey(School)
country = models.ForeignKey(Country, limit_choices_to={'special': False})
rank = models.PositiveSmallIntegerField()
def __unicode__(self):
return '%s : %s (%d)' % (self.school.name, self.country.name, self.rank)
class Meta:
db_table = u'country_preference'
ordering = ['-school','rank']
unique_together = ('country', 'school')
class Delegate(models.Model):
assignment = models.ForeignKey(Assignment, related_name='delegates')
name = models.CharField(max_length=64, blank=True)
email = models.EmailField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
summary = models.TextField(default='', null=True)
def __unicode__(self):
return self.name
@property
def country(self):
return self.assignment.country
@property
def committee(self):
return self.assignment.committee
@property
def school(self):
return self.assignment.school
class Meta:
db_table = u'delegate'
ordering = ['assignment__country']
|
the-stack_106_22671 |
from collections import OrderedDict
from .FFI import FFIMethod
__all__ = [
"PotentialArguments"
]
class PotentialArgumentHolder:
"""
Wrapper class that simply holds onto
"""
def __init__(self, args):
if isinstance(args, OrderedDict):
self.arg_vec = args
else:
self.arg_vec = None
# supported extra types
extra_bools = []
extra_ints = []
extra_floats = []
for a in args:
if a is True or a is False:
extra_bools.append(a)
elif isinstance(a, int):
extra_ints.append(a)
elif isinstance(a, float):
extra_floats.append(a)
self.extra_bools = extra_bools
self.extra_ints = extra_ints
self.extra_floats = extra_floats
@property
def ffi_parameters(self):
if self.arg_vec is None:
raise ValueError("Python thinks we're using an old-style potential")
else:
return self.arg_vec.values()
def __repr__(self):
return "{}(<>)".format(type(self).__name__)
class OldStyleArg:
"""
Shim that supports old-style args
"""
def __init__(self, name=None, dtype=None, extra=None, shape=None, default=None):
self.name=name
self.dtype=self.canonicalize_dtype(dtype)
self.extra=extra
self.shape=shape
if default is not None:
raise ValueError("currrently, no longer supporting default values")
self.default=default
_type_map = {'int':int, 'float':float, 'str':str}
def canonicalize_dtype(self, dtype):
if isinstance(dtype, str):
return self._type_map[dtype]
else:
raise ValueError("don't know how to handle old-style dtype '{}'".format(dtype))
def cast(self, v):
if not isinstance(v, self.dtype):
raise ValueError(
"Argument mismatch: argument '{}' is expected to be of type {} (got {})".format(
self.name,
self.dtype.__name__,
type(v).__name__
))
return v
@property
def arg_name(self):
return self.name
class OldStyleArgList:
"""
Shim that supports old-style arg lists
"""
def __init__(self, args):
self.args = [OldStyleArg(**a) for a in args]
@property
def arg_names(self):
return tuple(a.arg_name for a in self.args)
def collect_args(self, *args, excluded_args=None, **kwargs):
arg_dict = OrderedDict()
req_dict = OrderedDict(
(k.arg_name, k) for k in self.args
)
for k in kwargs:
arg_dict[k] = req_dict[k].cast(kwargs[k])
del req_dict[k]
if excluded_args is not None:
for k in excluded_args:
if k in req_dict:
del req_dict[k]
if len(req_dict) > 0:
for v, k in zip(args, req_dict.copy()):
arg_dict[k] = req_dict[k].cast(v)
del req_dict[k]
if len(req_dict) > 0:
raise ValueError("{}.{}: missing required arguments {}".format(
type(self).__name__,
'collect_args',
tuple(req_dict.values())
))
return tuple(arg_dict.values()) # need this to tell PotentialArgumentHolder that this is old-style
class PotentialArgumentSpec:
"""
Simple wrapper to support both old- and new-style calling semantics
"""
def __init__(self, arg_pattern, name=None):
self._name = name
self.arg_pat = self.canonicalize_pats(arg_pattern)
@property
def name(self):
if self._name is None:
return self.arg_pat.name
else:
return self._name
def canonicalize_pats(self, pats):
if isinstance(pats, FFIMethod):
return pats
arg_list = []
for a in pats:
if isinstance(a, (dict, OrderedDict)):
name = a['name']
dtype = a['dtype']
default = a['default'] if 'default' in a else None
shape = a['shape'] if 'shape' in a else ()
else:
name = a[0]
dtype = a[1]
default = a[2] if len(a) > 2 in a else None
shape = a[3] if len(a) > 3 in a else ()
arg_list.append({
"name":name,
"dtype":dtype,
"default":default,
"shape":shape
})
return OldStyleArgList(arg_list)
def collect_args(self, *args, **kwargs):
return PotentialArgumentHolder(self.arg_pat.collect_args(*args, excluded_args=["coords", "raw_coords", "atoms"], **kwargs))
def arg_names(self, excluded=None):
"""
Canonicalizes arguments, if passed
:return:
:rtype:
"""
names = self.arg_pat.arg_names
if excluded is None:
return tuple(x for x in names if x not in excluded)
else:
return names
class AtomsPattern:
"""
Spec to define a pattern for atoms so that calls can be validated before a function
is every called
"""
def __init__(self, atoms):
self.atom_pat = atoms
def validate(self, atoms):
if self.atom_pat is not None:
import re
if isinstance(self._atom_pat, str):
self._atom_pat = re.compile(self._atom_pat)
matches = True
bad_ind = -1
try:
matches = re.match(self._atom_pat, "".join(atoms))
except TypeError:
for i,a in enumerate(zip(self._atom_pat, atoms)):
a1, a2 = a
if a1 != a2:
matches = False
bad_ind = i
if not matches and bad_ind >= 0:
raise ValueError("Atom mismatch at {}: expected atom list {} but got {}".format(
bad_ind,
tuple(self._atom_pat),
tuple(atoms)
))
elif not matches:
raise ValueError("Atom mismatch: expected atom pattern {} but got list {}".format(
bad_ind,
self._atom_pat.pattern,
tuple(atoms)
))
return atoms
|
the-stack_106_22672 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.tracks.controllers import (RHCreateTrack, RHCreateTrackGroup, RHDeleteTrack,
RHDeleteTrackGroup, RHDisplayTracks, RHEditProgram, RHEditTrack,
RHEditTrackGroup, RHManageTracks, RHSortTracks, RHTracksPDF)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('tracks', __name__, template_folder='templates', virtual_template_folder='events/tracks',
url_prefix='/event/<confId>')
_bp.add_url_rule('/manage/tracks/', 'manage', RHManageTracks)
_bp.add_url_rule('/manage/tracks/program', 'edit_program', RHEditProgram, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/create', 'create_track', RHCreateTrack, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/sort', 'sort_tracks', RHSortTracks, methods=('POST',))
_bp.add_url_rule('/manage/tracks/<int:track_id>', 'edit_track', RHEditTrack, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/<int:track_id>', 'delete_track', RHDeleteTrack, methods=('DELETE',))
_bp.add_url_rule('/manage/track-groups/create', 'create_track_group', RHCreateTrackGroup, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/track-groups/<int:track_group_id>', 'edit_track_group', RHEditTrackGroup,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/track-groups/<int:track_group_id>', 'delete_track_group', RHDeleteTrackGroup,
methods=('DELETE',))
_bp.add_url_rule('/program', 'program', RHDisplayTracks)
_bp.add_url_rule('/program.pdf', 'program_pdf', RHTracksPDF)
_compat_bp = IndicoBlueprint('compat_tracks', __name__, url_prefix='/event/<int:confId>')
_compat_bp.add_url_rule('/manage/program/tracks/<int:track_id>/contributions/', 'track_contribs',
make_compat_redirect_func('contributions', 'contribution_list',
view_args_conv={'track_id': None}))
|
the-stack_106_22673 | from __future__ import print_function
import sys, os, time
from h2o.exceptions import H2OTypeError
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.automl import H2OAutoML
"""
This test is used to check arguments passed into H2OAutoML along with different ways of using `.train()`
"""
max_models = 2
def import_dataset(seed=0, larger=False):
df = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/{}".format("prostate_complete.csv.zip" if larger else "prostate.csv")))
target = "CAPSULE"
df[target] = df[target].asfactor()
#Split frames
fr = df.split_frame(ratios=[.8,.1], seed=seed)
#Set up train, validation, and test sets
return dict(train=fr[0], valid=fr[1], test=fr[2], target=target, target_idx=1)
# Below fails bc there are no models in the leaderboard, but AutoML needs to check the models to get the
# model type (binomial, multinomial, or regression)
# print("Check that exclude_algos implementation is complete, and empty leaderboard works")
# aml = H2OAutoML(max_runtime_secs=30, project_name="py_aml0", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234, exclude_algos=["GLM", "DRF", "GBM", "DeepLearning", "StackedEnsemble"])
# aml.train(y="CAPSULE", training_frame=train)
# print("Check leaderboard to ensure that it only has a header")
# print(aml.leaderboard)
# assert aml.leaderboard.nrows == 0, "with all algos excluded, leaderboard is not empty"
def get_partitioned_model_names(leaderboard):
model_names = [leaderboard[i, 0] for i in range(0, (leaderboard.nrows))]
se_model_names = [m for m in model_names if m.startswith('StackedEnsemble')]
non_se_model_names = [m for m in model_names if m not in se_model_names]
return model_names, non_se_model_names, se_model_names
def test_early_stopping_args():
print("Check arguments to H2OAutoML class")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml0", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234, exclude_algos=["DeepLearning"])
aml.train(y=ds['target'], training_frame=ds['train'])
assert aml.project_name == "py_aml0", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerence == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_set_only():
print("AutoML run with x not provided and train set only")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml1", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds['target'], training_frame=ds['train'])
assert aml.project_name == "py_aml1", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerence == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_and_validation_sets():
print("AutoML run with x not provided with train and valid")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml2", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds['target'], training_frame=ds['train'], validation_frame=ds['valid'])
assert aml.project_name == "py_aml2", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerence == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_and_test_sets():
print("AutoML run with x not provided with train and test")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml3", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds['target'], training_frame=ds['train'], leaderboard_frame=ds['test'])
assert aml.project_name == "py_aml3", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerence == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_and_validation_and_test_sets():
print("AutoML run with x not provided with train, valid, and test")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml4", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds['target'], training_frame=ds['train'], validation_frame=ds['valid'], leaderboard_frame=ds['test'])
assert aml.project_name == "py_aml4", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerence == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_y_as_idx_train_and_validation_and_test_sets():
print("AutoML run with x not provided and y as col idx with train, valid, and test")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml5", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds['target_idx'], training_frame=ds['train'], validation_frame=ds['valid'], leaderboard_frame=ds['test'])
assert aml.project_name == "py_aml5", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerence == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_exclude_algos():
print("AutoML doesn't train models for algos listed in exclude_algos")
ds = import_dataset()
aml = H2OAutoML(project_name="py_exclude_algos",
exclude_algos=['DRF', 'GLM'],
max_models=max_models,
seed=1)
aml.train(y=ds['target'], training_frame=ds['train'], validation_frame=ds['valid'])
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert not any(['DRF' in name or 'GLM' in name for name in non_se])
assert len(se) == 2
def test_include_algos():
print("AutoML trains only models for algos listed in include_algos")
ds = import_dataset()
aml = H2OAutoML(project_name="py_include_algos",
include_algos=['GBM'],
max_models=max_models,
seed=1)
aml.train(y=ds['target'], training_frame=ds['train'], validation_frame=ds['valid'])
_, non_se, se = get_partitioned_model_names(aml.leaderboard)
assert all(['GBM' in name for name in non_se])
assert len(se) == 0, "No StackedEnsemble should have been trained if not explicitly included to the existing include_algos"
def test_include_exclude_algos():
print("include_algos and exclude_algos parameters are mutually exclusive")
try:
H2OAutoML(project_name="py_include_exclude_algos",
exclude_algos=['DRF', 'XGBoost'],
include_algos=['GBM'],
max_models=max_models,
seed=1)
assert False, "Should have thrown AssertionError"
except AssertionError as e:
assert "Use either include_algos or exclude_algos" in str(e)
def test_predict_on_train_set():
print("Check predict, leader, and leaderboard")
print("AutoML run with x not provided and train set only")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml6", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds['target'], training_frame=ds['train'])
print("Check leaderboard")
print(aml.leaderboard)
print("Check predictions")
print(aml.predict(ds['train']))
def test_nfolds_param():
print("Check nfolds is passed through to base models")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_nfolds3", nfolds=3, max_models=3, seed=1)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
amodel = h2o.get_model(non_se[0])
assert amodel.params['nfolds']['actual'] == 3
def test_nfolds_eq_0():
print("Check nfolds = 0 works properly")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_nfolds0", nfolds=0, max_models=3, seed=1)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
amodel = h2o.get_model(non_se[0])
assert amodel.params['nfolds']['actual'] == 0
def test_balance_classes():
print("Check balance_classes & related args work properly")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_balance_classes_etc",
exclude_algos=['XGBoost'], # XGB doesn't support balance_classes
max_models=3,
balance_classes=True,
class_sampling_factors=[0.2, 1.4],
max_after_balance_size=3.0,
seed=1)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
amodel = h2o.get_model(non_se[0])
assert amodel.params['balance_classes']['actual'] == True
assert amodel.params['max_after_balance_size']['actual'] == 3.0
assert amodel.params['class_sampling_factors']['actual'] == [0.2, 1.4]
def test_nfolds_default_and_fold_assignements_skipped_by_default():
print("Check that fold assignments were skipped by default and nfolds > 1")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_keep_cross_validation_fold_assignment_0",
nfolds=3, max_models=3, seed=1)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
amodel = h2o.get_model(non_se[0])
assert amodel.params['keep_cross_validation_fold_assignment']['actual'] == False
assert amodel._model_json["output"]["cross_validation_fold_assignment_frame_id"] == None
def test_keep_cross_validation_fold_assignment_enabled_with_nfolds_neq_0():
print("Check that fold assignments were kept when `keep_cross_validation_fold_assignment` = True and nfolds > 1")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_keep_cross_validation_fold_assignment_1",
nfolds=3, max_models=3, seed=1,
keep_cross_validation_fold_assignment=True)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
amodel = h2o.get_model(non_se[0])
assert amodel.params['keep_cross_validation_fold_assignment']['actual'] == True
assert amodel._model_json["output"]["cross_validation_fold_assignment_frame_id"] != None
def test_keep_cross_validation_fold_assignment_enabled_with_nfolds_eq_0():
print("Check that fold assignments were skipped when `keep_cross_validation_fold_assignment` = True and nfolds = 0")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_keep_cross_validation_fold_assignment_2",
nfolds=0, max_models=3, seed=1,
keep_cross_validation_fold_assignment=True)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
amodel = h2o.get_model(non_se[0])
assert amodel.params['keep_cross_validation_fold_assignment']['actual'] == False
assert amodel._model_json["output"]["cross_validation_fold_assignment_frame_id"] == None
def test_stacked_ensembles_are_trained_after_timeout():
print("Check that Stacked Ensembles are still trained after timeout")
max_runtime_secs = 20
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_SE_after_timeout", seed=1, max_runtime_secs=max_runtime_secs, exclude_algos=['DeepLearning'])
start = time.time()
aml.train(y=ds['target'], training_frame=ds['train'])
end = time.time()
assert end-start - max_runtime_secs > 0
_, _, se = get_partitioned_model_names(aml.leaderboard)
assert len(se) == 2, "StackedEnsemble should still be trained after timeout"
def test_automl_stops_after_max_models():
print("Check that automl gets interrupted after `max_models`")
ds = import_dataset()
max_models = 5
aml = H2OAutoML(project_name="py_aml_max_models", seed=1, max_models=max_models)
aml.train(y=ds['target'], training_frame=ds['train'])
_, non_se, _ = get_partitioned_model_names(aml.leaderboard)
assert len(non_se) == max_models, "obtained {} base models when {} are expected".format(len(non_se), max_models)
def test_stacked_ensembles_are_trained_after_max_models():
print("Check that Stacked Ensembles are still trained after max models have been trained")
max_models = 5
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_SE_after_max_models", seed=1, max_models=max_models)
aml.train(y=ds['target'], training_frame=ds['train'])
_, _, se = get_partitioned_model_names(aml.leaderboard)
assert len(se) == 2, "StackedEnsemble should still be trained after max models have been reached"
def test_stacked_ensembles_are_trained_with_blending_frame_even_if_nfolds_eq_0():
print("Check that we can disable cross-validation when passing a blending frame and that Stacked Ensembles are trained using this frame.")
max_models = 5
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_blending_frame", seed=1, max_models=max_models, nfolds=0)
aml.train(y=ds['target'], training_frame=ds['train'], blending_frame=ds['valid'], leaderboard_frame=ds['test'])
_, _, se = get_partitioned_model_names(aml.leaderboard)
assert len(se) == 2, "In blending mode, StackedEnsemble should still be trained in spite of nfolds=0."
for m in se:
model = h2o.get_model(m)
assert model.params['blending_frame']['actual']['name'] == ds['valid'].frame_id
assert model._model_json['output']['stacking_strategy'] == 'blending'
def test_frames_cannot_be_passed_as_key():
print("Check that all AutoML frames can be passed as keys.")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml_frames_as_keys", seed=1, max_models=3, nfolds=0)
kw_args = [
dict(training_frame=ds['train'].frame_id),
dict(training_frame=ds['train'], validation_frame=ds['valid'].frame_id),
dict(training_frame=ds['train'], blending_frame=ds['valid'].frame_id),
dict(training_frame=ds['train'], leaderboard_frame=ds['test'].frame_id),
]
for kwargs in kw_args:
try:
aml.train(y=ds['target'], **kwargs)
assert False, "should have thrown due to wrong frame key"
except H2OTypeError as e:
attr = next(k for k, v in kwargs.items() if v is not ds['train'])
assert "'{}' must be a valid H2OFrame".format(attr) in str(e)
# TO DO PUBDEV-5676
# Add a test that checks fold_column like in runit
pyunit_utils.run_tests([
test_early_stopping_args,
test_no_x_train_set_only,
test_no_x_train_and_validation_sets,
test_no_x_train_and_test_sets,
test_no_x_train_and_validation_and_test_sets,
test_no_x_y_as_idx_train_and_validation_and_test_sets,
test_exclude_algos,
test_include_algos,
test_include_exclude_algos,
test_predict_on_train_set,
test_nfolds_param,
test_nfolds_eq_0,
test_balance_classes,
test_nfolds_default_and_fold_assignements_skipped_by_default,
test_keep_cross_validation_fold_assignment_enabled_with_nfolds_neq_0,
test_keep_cross_validation_fold_assignment_enabled_with_nfolds_eq_0,
test_stacked_ensembles_are_trained_after_timeout,
test_automl_stops_after_max_models,
test_stacked_ensembles_are_trained_after_max_models,
test_stacked_ensembles_are_trained_with_blending_frame_even_if_nfolds_eq_0,
test_frames_cannot_be_passed_as_key,
])
|
the-stack_106_22674 | # apis_v1/documentation_source/position_public_support_count_for_ballot_item_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def position_public_support_count_for_ballot_item_doc_template_values(url_root):
"""
Show documentation about positionPublicSupportCountForBallotItem
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'kind_of_ballot_item',
'value': 'string', # boolean, integer, long, string
'description': 'The kind of ballot item we want the support count for. '
'(kind_of_ballot_item is either "CANDIDATE", "POLITICIAN" or "MEASURE")',
},
{
'name': 'ballot_item_id',
'value': 'integer', # boolean, integer, long, string
'description': 'The unique internal identifier of the ballot item we want the support count for. '
'(either ballot_item_id OR ballot_item_we_vote_id required -- not both. '
'If it exists, ballot_item_id is used instead of ballot_item_we_vote_id)',
},
{
'name': 'ballot_item_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for this ballot_item across all networks '
'(either ballot_item_id OR ballot_item_we_vote_id required -- not both. '
'NOTE: In the future we might support other identifiers used in the industry.',
},
]
optional_query_parameter_list = [
# {
# 'name': '',
# 'value': '', # boolean, integer, long, string
# 'description': '',
# },
]
potential_status_codes_list = [
{
'code': 'UNABLE_TO_RETRIEVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING',
'description': 'Cannot proceed. Neither candidate_id nor measure_id were included.',
},
{
'code': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS',
'description': 'The number of supports for this ballot item was retrieved.',
},
{
'code': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED',
'description': 'The number of organizations that support this ballot item that voter is NOT following.',
},
]
try_now_link_variables_dict = {
'kind_of_ballot_item': 'CANDIDATE',
'ballot_item_id': '5655',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "count": integer,\n' \
'}'
template_values = {
'api_name': 'positionPublicSupportCountForBallotItem',
'api_slug': 'positionPublicSupportCountForBallotItem',
'api_introduction':
"A single number showing the total supporters for this Ballot Item (Candidate or Measure) from "
"any organizations and public figures. (Not just the ones a voter follows.)",
'try_now_link': 'apis_v1:positionPublicSupportCountForBallotItemView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
the-stack_106_22676 | import os, argparse
import pandas as pd
import ami_md.ami_json as aj
dtypes = {
'digitizationProcess.analogDigitalConverter.serialNumber': object,
'digitizationProcess.captureSoftware.version': object,
'digitizationProcess.playbackDevice.serialNumber': object,
'digitizationProcess.timeBaseCorrector.serialNumber': object
}
def _make_parser():
parser = argparse.ArgumentParser()
parser.description = "convert a PAMIdb merge export to JSON records"
parser.add_argument("-i", "--input",
help = "path to a PAMIdb merge export",
required = True)
parser.add_argument("-o", "--output",
help = "directory to save all json files",
required = True)
parser.add_argument("-s", "--schema",
help = "current schema version, preferred format x.y.z",
default = "2.0.0")
return parser
def main():
parser = _make_parser()
args = parser.parse_args()
md = pd.read_csv(args.input, dtype = dtypes)
md = md.dropna(axis = 1, how = "all")
md = md.drop(['asset.fileExt'], axis = 1)
json_directory = os.path.abspath(args.output)
for (index, row) in md.iterrows():
json_tree = aj.ami_json(flat_dict = row.to_dict(),
schema_version = args.schema)
json_tree.write_json(json_directory, indent = 4)
if __name__ == "__main__":
main()
|
the-stack_106_22677 | from typing import List
from typing import Union
import pandas
from sqlalchemy.orm import Session
from db.crud import recipeBewertung as crud_recipeBewertung
from schemes.exceptions import DatabaseException
from schemes.exceptions import RecipeNotFound
from schemes.scheme_filter import FilterRecipe
from schemes.scheme_recipe import Recipe
from schemes.scheme_recipe import RecipeBase
from schemes.scheme_recipe import RecipeBewertungCreate
from schemes.scheme_recipe import RecipeBewertungReturn
from schemes.scheme_user import UserBase
from tools.recipe_db import recipe_db
from tools.recipe_db import RecipeDB
def search_recipe(db_session: Session, user: UserBase, recipe_filter: FilterRecipe) -> Recipe:
"""Search for a recipe with the given filter
Args:
db_session (sqlalchemy.orm.Session): Session to the DB -> See `db: Session = Depends(get_db)`
recipe_filter (schemes.scheme_filter.FilterRecipe): Filter the Recipes
Returns:
schemes.scheme_recipe.Recipe: The one choosen Recipe
"""
pd_random_recipe: pandas.DataFrame = __apply_filter(recipe_db.pd_frame, recipe_filter).sample()
random_recipe = Recipe(
id=pd_random_recipe["_id.$oid"].array[0],
name=pd_random_recipe["name"].array[0],
ingredients=pd_random_recipe["ingredients"].array[0],
url=pd_random_recipe["url"].array[0],
image=pd_random_recipe["image"].array[0],
cookTime=pd_random_recipe["cookTime"].array[0],
prepTime=pd_random_recipe["prepTime"].array[0],
)
if not crud_recipeBewertung.get_bewertung_from_user_to_recipe(db=db_session, user=user, recipe=random_recipe):
add_assessment(
db_session=db_session,
assessment=RecipeBewertungCreate(name=random_recipe.name, person=user, recipe=random_recipe),
)
return random_recipe
def __apply_filter(recipes: pandas.DataFrame, recipe_filter: FilterRecipe) -> pandas.DataFrame:
cooktime_bool = RecipeDB.filter_cooktime(user_pd_frame=recipes, total_time=recipe_filter.total_time)
keyword_bool = RecipeDB.filter_keyword(user_pd_frame=recipes, keyword=recipe_filter.keyword)
filter_bool = cooktime_bool & keyword_bool
if not (True in filter_bool.value_counts()):
raise RecipeNotFound("No Recipe Found with these Filters")
return recipes[filter_bool]
def get_assessments_from_user(db_session: Session, user: UserBase) -> Union[List[RecipeBewertungReturn], None]:
"""Get Bewertungen from a User to all recipes
Args:
db_session (sqlalchemy.orm.Session): Session to the DB -> See `db: Session = Depends(get_db)`
user_mail (str): Mail of the User
Returns:
Union[List[schemes.scheme_recipe.RecipeBewertungReturn], None]: Return a List of all Recipe or None
"""
db_recipes = crud_recipeBewertung.get_all_user_bewertungen(db_session, user)
scheme_recipes = [
RecipeBewertungReturn(
name=db_recipe.rezept_name,
email=db_recipe.person_email,
id=db_recipe.rezept_id,
comment=db_recipe.kommentar,
rating=db_recipe.rating,
timestamp=db_recipe.zeitstempel,
)
for db_recipe in db_recipes
]
return scheme_recipes
def add_assessment(db_session: Session, assessment: RecipeBewertungCreate) -> RecipeBewertungReturn:
"""Add the given assessment to the Database.
Args:
db_session (sqlalchemy.orm.Session): Session to the DB -> See `db: Session = Depends(get_db)`
assessment (schemes.scheme_recipe.RecipeBewertungCreate): The assessment need to be unique
Raises:
schemes.exceptions.DatabaseException: if the User or Recipe does not exist or the assessment is duplicated
Returns:
[schemes.scheme_recipe.RecipeBewertungReturn]: The created recipe
"""
try:
created_assessment = crud_recipeBewertung.create_bewertung(db_session, assessment)
return RecipeBewertungReturn(
name=created_assessment.rezept_name,
email=created_assessment.person_email,
id=created_assessment.rezept_id,
comment=created_assessment.kommentar,
rating=created_assessment.rating,
timestamp=created_assessment.zeitstempel,
)
except DatabaseException as error:
raise error
def update_assessment(
db_session: Session, old_assessment: RecipeBewertungCreate, new_assessment: RecipeBewertungCreate
) -> RecipeBewertungReturn:
"""Update the comment and rating of a existing assessment
Args:
db_session (sqlalchemy.orm.Session): Session to the DB -> See `db: Session = Depends(get_db)`
old_assessment (schemes.scheme_recipe.RestBewertungCreate): The current assessment
new_assessment (schemes.scheme_recipe.RecipeBewertungCreate): The new assessment with the updated values
Raises:
schemes.exceptions.DatabaseException: if the User or Recipe does not exist
Returns:
schemes.scheme_recipe.RecipeBewertungReturn: Recipe with the new values
"""
try:
updated_assessment = crud_recipeBewertung.update_assessment(db_session, old_assessment, new_assessment)
except DatabaseException as error:
raise error
return RecipeBewertungReturn(
name=updated_assessment.rezept_name,
email=updated_assessment.person_email,
id=updated_assessment.rezept_id,
comment=updated_assessment.kommentar,
rating=updated_assessment.rating,
timestamp=updated_assessment.zeitstempel,
)
def delete_assessment(db_session: Session, user: UserBase, recipe: RecipeBase) -> int:
"""Delete one assessment that are mapped between the user and recipe
Args:
db_session (sqlalchemy.orm.Session): Session to the DB -> See `db: Session = Depends(get_db)`
user (schemes.scheme_user.UserBase): The owner of the assessment
recipe (schemes.scheme_recipe.RecipeBase): The mapped recipe
Raises:
schemes.exceptions.DatabaseException: if the User or recipe does not exist
Returns:
int: The number of affected Rows of the delete
"""
rows = crud_recipeBewertung.delete_bewertung(db_session, user, recipe)
if rows == 0:
raise DatabaseException("Can not delete assessment. Does the user and recipe excist?")
return rows
|
the-stack_106_22678 | from flask import Flask, request
app = Flask(__name__)
PORT = 7000
@app.route('/', methods=["GET"])
def home():
remote_user = request.headers.get('REMOTE_USER')
return "Hello {}, this is service2.".format(remote_user)
if __name__ == "__main__":
app.run(port=PORT, debug=True)
|
the-stack_106_22680 | import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import cv2
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, decode
import core.utils as utils
import os
from core.config import cfg
flags.DEFINE_string('weights', './checkpoints/yolov3-416', 'path to weights file')
flags.DEFINE_string('output', './checkpoints/yolov3-416-int8.tflite', 'path to output')
flags.DEFINE_integer('input_size', 416, 'path to output')
flags.DEFINE_string('quantize_mode', 'float32', 'quantize mode (int8, float16, float32)')
flags.DEFINE_string('dataset', "/Volumes/Elements/data/coco_dataset/coco/5k.txt", 'path to dataset')
def representative_data_gen():
fimage = open(FLAGS.dataset).read().split()
for input_value in range(10):
if os.path.exists(fimage[input_value]):
original_image=cv2.imread(fimage[input_value])
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = utils.image_preprocess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])
img_in = image_data[np.newaxis, ...].astype(np.float32)
print("calibration image {}".format(fimage[input_value]))
yield [img_in]
else:
continue
def save_tflite():
converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.weights)
if FLAGS.quantize_mode == 'float16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.compat.v1.lite.constants.FLOAT16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
elif FLAGS.quantize_mode == 'int8':
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
converter.representative_dataset = representative_data_gen
tflite_model = converter.convert()
open(FLAGS.output, 'wb').write(tflite_model)
logging.info("model saved to: {}".format(FLAGS.output))
def demo():
interpreter = tf.lite.Interpreter(model_path=FLAGS.output)
interpreter.allocate_tensors()
logging.info('tflite model loaded')
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
print(output_data)
def main(_argv):
save_tflite()
demo()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
the-stack_106_22683 | RESPONSES = {
'auth_response': {
'status': 200,
'type': 'success',
'code': 'R-0000',
'detail': 'Authentification is success'
},
'json_test': {
'status': 200,
'type': 'success',
'code': 'R-0001',
'detail': 'Json test is correct'
},
}
ERRORS = {
'authentification_error': {
'status': 401,
'type': 'error',
'code': 'E-0000',
'detail': 'Key and Resource are not valid',
},
'json_format_error': {
'status': 400,
'type': 'error',
'code': 'E-0001',
'detail': 'Json request is not valid',
},
'openapi3_error': {
'status': 400,
'type': 'error',
'code': 'E-0002',
'detail': 'Json payload is not valid',
},
'session_error': {
'status': 403,
'type': 'error',
'code': 'E-0003',
'detail': 'Session is not valid',
},
'signature_error': {
'status': 401,
'type': 'error',
'code': 'E-0004',
'detail': 'Signature is not valid',
},
'signature_header_not_found': {
'status': 401,
'type': 'error',
'code': 'E-0005',
'detail': 'Content-Signature header error',
},
}
CATALOG = {
**RESPONSES,
**ERRORS
}
|
the-stack_106_22684 | """
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
from io import BytesIO, StringIO
import pytest
from pandas.errors import ParserError
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
def test_default_separator(python_parser_only):
# see gh-17333
#
# csv.Sniffer in Python treats "o" as separator.
data = "aob\n1o2\n3o4"
parser = python_parser_only
expected = DataFrame({"a": [1, 3], "b": [2, 4]})
result = parser.read_csv(StringIO(data), sep=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("skipfooter", ["foo", 1.5, True])
def test_invalid_skipfooter_non_int(python_parser_only, skipfooter):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter must be an integer"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
def test_invalid_skipfooter_negative(python_parser_only):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter cannot be negative"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=-1)
@pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}])
def test_sniff_delimiter(python_parser_only, kwargs):
data = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_sniff_delimiter_comment(python_parser_only):
data = """# comment line
index|A|B|C
# comment line
foo|1|2|3 # ignore | this
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#")
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_sniff_delimiter_encoding(python_parser_only, encoding):
parser = python_parser_only
data = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
if encoding is not None:
from io import TextIOWrapper
data = data.encode(encoding)
data = BytesIO(data)
data = TextIOWrapper(data, encoding=encoding)
else:
data = StringIO(data)
result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_single_line(python_parser_only):
# see gh-6607: sniff separator
parser = python_parser_only
result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}])
def test_skipfooter(python_parser_only, kwargs):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), **kwargs)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
)
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
# see gh-6607
parser = python_parser_only
with open(csv1, "rb") as f:
data = f.read()
data = data.replace(b",", b"::")
expected = parser.read_csv(csv1)
module = pytest.importorskip(compression)
klass = getattr(module, klass)
with tm.ensure_clean() as path:
tmp = klass(path, mode="wb")
tmp.write(data)
tmp.close()
result = parser.read_csv(path, sep="::", compression=compression)
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index(python_parser_only):
# see gh-6607
data = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
parser = python_parser_only
expected = DataFrame(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],
],
columns=["A", "B", "C", "D", "E"],
index=MultiIndex.from_tuples(
[("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)],
names=["one", "two", "three", "four"],
),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index2(python_parser_only):
# see gh-6893
data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9"
parser = python_parser_only
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list("abcABC"),
index=list("abc"),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("add_footer", [True, False])
def test_skipfooter_with_decimal(python_parser_only, add_footer):
# see gh-6971
data = "1#2\n3#4"
parser = python_parser_only
expected = DataFrame({"a": [1.2, 3.4]})
if add_footer:
# The stray footer line should not mess with the
# casting of the first two lines if we skip it.
kwargs = {"skipfooter": 1}
data += "\nFooter"
else:
kwargs = {}
result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"]
)
@pytest.mark.parametrize(
"encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"]
)
def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):
# see gh-3404
expected = DataFrame({"a": [1], "b": [2]})
parser = python_parser_only
data = "1" + sep + "2"
encoded_data = data.encode(encoding)
result = parser.read_csv(
BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])
def test_multi_char_sep_quotes(python_parser_only, quoting):
# see gh-13374
kwargs = {"sep": ",,"}
parser = python_parser_only
data = 'a,,b\n1,,a\n2,,"2,,b"'
if quoting == csv.QUOTE_NONE:
msg = "Expected 2 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting, **kwargs)
else:
msg = "ignored when a multi-char delimiter is used"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting, **kwargs)
def test_none_delimiter(python_parser_only, capsys):
# see gh-13374 and gh-17465
parser = python_parser_only
data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9"
expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]})
# We expect the third line in the data to be
# skipped because it is malformed, but we do
# not expect any errors to occur.
result = parser.read_csv(
StringIO(data), header=0, sep=None, warn_bad_lines=True, error_bad_lines=False
)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
@pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz'])
@pytest.mark.parametrize("skipfooter", [0, 1])
def test_skipfooter_bad_row(python_parser_only, data, skipfooter):
# see gh-13879 and gh-15910
parser = python_parser_only
if skipfooter:
msg = "parsing errors in the skipped footer rows"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
else:
msg = "unexpected end of data|expected after"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
def test_malformed_skipfooter(python_parser_only):
parser = python_parser_only
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)
@pytest.mark.parametrize("thousands", [None, "."])
@pytest.mark.parametrize(
"value, result_value",
[
("1,2", 1.2),
("1,2e-1", 0.12),
("1,2E-1", 0.12),
("1,2e-10", 0.0000000012),
("1,2e1", 12.0),
("1,2E1", 12.0),
("-1,2e-1", -0.12),
("0,2", 0.2),
(",2", 0.2),
],
)
def test_decimal_and_exponential(python_parser_only, thousands, value, result_value):
# GH#31920
data = StringIO(
f"""a b
1,1 {value}
"""
)
result = python_parser_only.read_csv(
data, "\t", decimal=",", engine="python", thousands=thousands
)
expected = DataFrame({"a": [1.1], "b": [result_value]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("thousands", [None, "."])
@pytest.mark.parametrize(
"value",
["e11,2", "1e11,2", "1,2,2", "1,2.1", "1,2e-10e1", "--1,2", "1a.2,1", "1..2,3"],
)
def test_decimal_and_exponential_erroneous(python_parser_only, thousands, value):
# GH#31920
data = StringIO(
f"""a b
1,1 {value}
"""
)
result = python_parser_only.read_csv(data, "\t", decimal=",", thousands=thousands)
expected = DataFrame({"a": [1.1], "b": [value]})
tm.assert_frame_equal(result, expected)
|
the-stack_106_22685 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import six
import unittest
from itertools import dropwhile
from mock import patch, call
from airflow import configuration
from airflow.models.connection import Connection
from airflow.utils import db
from airflow.contrib.hooks.spark_sql_hook import SparkSqlHook
def get_after(sentinel, iterable):
"""Get the value after `sentinel` in an `iterable`"""
truncated = dropwhile(lambda el: el != sentinel, iterable)
next(truncated)
return next(truncated)
class TestSparkSqlHook(unittest.TestCase):
_config = {
'conn_id': 'spark_default',
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'sql': ' /path/to/sql/file.sql ',
'conf': 'key=value,PROP=VALUE'
}
def setUp(self):
configuration.load_test_config()
db.merge_conn(
Connection(
conn_id='spark_default', conn_type='spark',
host='yarn://yarn-master')
)
def test_build_command(self):
hook = SparkSqlHook(**self._config)
# The subprocess requires an array but we build the cmd by joining on a space
cmd = ' '.join(hook._prepare_command(""))
# Check all the parameters
assert "--executor-cores {}".format(self._config['executor_cores']) in cmd
assert "--executor-memory {}".format(self._config['executor_memory']) in cmd
assert "--keytab {}".format(self._config['keytab']) in cmd
assert "--name {}".format(self._config['name']) in cmd
assert "--num-executors {}".format(self._config['num_executors']) in cmd
sql_path = get_after('-f', hook._prepare_command(""))
assert self._config['sql'].strip() == sql_path
# Check if all config settings are there
for kv in self._config['conf'].split(","):
k, v = kv.split('=')
assert "--conf {0}={1}".format(k, v) in cmd
if self._config['verbose']:
assert "--verbose" in cmd
@patch('airflow.contrib.hooks.spark_sql_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('Spark-sql communicates using stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSqlHook(
conn_id='spark_default',
sql='SELECT 1'
)
with patch.object(hook.log, 'debug') as mock_debug:
with patch.object(hook.log, 'info') as mock_info:
hook.run_query()
mock_debug.assert_called_with(
'Spark-Sql cmd: %s',
['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose',
'--queue', 'default']
)
mock_info.assert_called_with(
'Spark-sql communicates using stdout'
)
# Then
self.assertEqual(
mock_popen.mock_calls[0],
call(['spark-sql', '-e', 'SELECT 1', '--master', 'yarn', '--name', 'default-name', '--verbose',
'--queue', 'default'], stderr=-2, stdout=-1)
)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22686 | #! /usr/bin/env python2.7
import pika
import sys
import json
message = ' '.join(sys.argv[1:]) or "Hello World!"
def broadcast (message):
connection = pika.BlockingConnection(pika.ConnectionParameters(
'localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body=message)
print(" [x] Sent '",message,"'")
connection.close
data = [ { 'a':'A', 'b':(2, 4), 'c':3.0 } ]
broadcast(repr(data))
data_string = json.dumps(data)
broadcast(data_string)
json_string = '''{ "firstName": "John","age": 25,"address": {"streetAddress": "21 2nd Street","city": "New York","state": "NY","postalCode": "10021"},"phoneNumber": [{"type": "home","number": "212 555-1234"},{"type": "fax","number": "646 555-4567"}]}'''
broadcast(json_string)
|
the-stack_106_22691 | # Copyright 2021, Robotec.ai sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Launchfile for benchmarking rosbag2.
This launchfile can only be launched with 'ros2 launch' command.
Two launch arguments are required:
* benchmark - path to benchmark description in yaml format ('benchmark:=<PATH>'),
* producers - path to producers description in yaml format ('producers:=<PATH>').
Goal of this launchfile is to launch in sequence all processes and/or nodes with right parameters
required for selected benchmark. Cross section of parameters is generated based on parameters from
'benchmark' yaml description file.
Based on 'no_transport' parameter in benchmark description, a single run in launch sequence
looks as follow:
NO TRANSPORT:
Only 'writer_benchmark' node is used as 'producer node' (PN). It directly writes messages to
a storage and then fill up a result file. No additional processes are required.
PN starts -> PN exits
TRANSPORT:
For end-to-end benchmark, `ros2 bag record` (ROSBAG) process and 'result writer' (RW) are also
included in a single launch sequence run. In this case 'benchmark_publishers' node act as
producer node. Result writer node writes final result file.
ROSBAG starts -> PN starts -> PN exits -> ROSBAG exits -> RW starts
After the whole sequence is finished, both producers and benchmark description files are copied
to benchmark folder.
"""
import datetime
import os
import pathlib
import shutil
import signal
import sys
from ament_index_python import get_package_share_directory
import launch
import launch_ros
import yaml
_bench_cfg_path = None
_producers_cfg_path = None
_producer_idx = 0
_producer_nodes = []
_rosbag_processes = []
_rosbag_pid = None
_result_writers = []
def _parse_arguments(args=sys.argv[4:]):
"""Parse benchmark and producers config file paths."""
bench_cfg_path = None
producers_cfg_path = None
err_str = 'Missing or invalid arguments detected. ' \
'Launchfile requires "benchmark:=" and "producers:=" arguments ' \
'with coresponding config files.'
if len(args) != 2:
raise RuntimeError(err_str)
else:
for arg in args:
if 'benchmark:=' in arg:
bench_cfg_path = pathlib.Path(arg.replace('benchmark:=', ''))
if not bench_cfg_path.is_file():
raise RuntimeError(
'Batch config file {} does not exist.'.format(bench_cfg_path)
)
elif 'producers:=' in arg:
producers_cfg_path = pathlib.Path(arg.replace('producers:=', ''))
if not producers_cfg_path.is_file():
raise RuntimeError(
'Producers config file {} does not exist.'.format(producers_cfg_path)
)
else:
raise RuntimeError(err_str)
return bench_cfg_path, producers_cfg_path
def _copy_config_files():
"""Copy benchmark and producers config files to benchmark folder."""
global _bench_cfg_path, _producers_cfg_path
# Copy yaml configs for current benchmark after benchmark is finished
benchmark_path = pathlib.Path(_producer_nodes[0]['parameters']['db_folder'])
shutil.copy(str(_bench_cfg_path), str(benchmark_path.with_name('benchmark.yaml')))
shutil.copy(str(_producers_cfg_path), str(benchmark_path.with_name('producers.yaml')))
def _launch_sequence(transport):
"""
Continue with launch sequence (launch entry action of next run).
Launches next producer node or rosbag2 record process, based on transport (end to end)
or transportless type of benchmark.
:param" transport If True launch a 'ros2 bag record' process, else a producer node.
"""
global _producer_idx, _producer_nodes, _rosbag_processes
if _producer_idx == len(_producer_nodes):
_copy_config_files()
return launch.actions.LogInfo(msg='Benchmark finished!')
action = None
if transport:
action = _rosbag_processes[_producer_idx]
else:
action = _producer_nodes[_producer_idx]['node']
return action
def _rosbag_proc_started(event, context):
"""Register current rosbag2 PID so we can terminate it when producer exits."""
global _rosbag_pid
_rosbag_pid = event.pid
def _rosbag_ready_check(event):
"""
Consider rosbag2 ready when 'Listening for topics...' string is printed.
Launches producer node if ready.
"""
target_str = 'Listening for topics...'
if target_str in event.text.decode():
return _launch_sequence(transport=False)
def _rosbag_proc_exited(event, context):
"""
Start next rosbag2 record process after current one exits.
Launches result writer on exit.
"""
global _producer_idx, _result_writers, _rosbag_pid
# ROS2 bag returns 2 if terminated with SIGINT, which we expect here
if event.returncode != 2:
_rosbag_pid = None
return [
launch.actions.LogInfo(msg='Rosbag2 record error. Shutting down benchmark.'),
launch.actions.EmitEvent(
event=launch.events.Shutdown(
reason='Rosbag2 record error'
)
)
]
return [
_result_writers[_producer_idx-1]
]
def _producer_node_started(event, context):
"""Log current benchmark progress on producer start."""
global _producer_idx
return launch.actions.LogInfo(
msg='-----------{}/{}-----------'.format(_producer_idx + 1, len(_producer_nodes))
)
def _producer_node_exited(event, context):
"""
Launch new producer when current has finished.
If transport is on, then stops rosbag2 recorder process.
Handles clearing of bags.
"""
global _producer_idx, _producer_nodes, _rosbag_pid
node_params = _producer_nodes[_producer_idx]['parameters']
transport = node_params['transport']
# Handle clearing bag files
if not node_params['preserve_bags']:
db_files = pathlib.Path.cwd().joinpath(node_params['db_folder']).glob('*.db3')
for f in db_files:
f.unlink()
# If we have non empty rosbag PID, then we need to kill it (end-to-end transport case)
if _rosbag_pid is not None and transport:
os.kill(_rosbag_pid, signal.SIGINT)
_rosbag_pid = None
# Shutdown benchmark with error if producer node crashes
if event.returncode != 0:
return [
launch.actions.LogInfo(msg='Writer error. Shutting down benchmark.'),
launch.actions.EmitEvent(
event=launch.events.Shutdown(
reason='Writer error'
)
)
]
# Bump up producer index, so the launch sequence can continue
_producer_idx += 1
return [
launch.actions.LogInfo(
msg='---------------------------'
),
_launch_sequence(transport=transport)
]
def generate_launch_description():
"""Generate launch description for ros2 launch system."""
global _producer_nodes, _bench_cfg_path, _producers_cfg_path
_bench_cfg_path, _producers_cfg_path = _parse_arguments()
# Parse yaml config for benchmark
bench_cfg = None
with open(_bench_cfg_path, 'r') as config_file:
bench_cfg_yaml = yaml.load(config_file, Loader=yaml.FullLoader)
bench_cfg = (bench_cfg_yaml['rosbag2_performance_benchmarking']
['benchmark_node']
['ros__parameters'])
# Benchmark options
benchmark_params = bench_cfg['benchmark']
repeat_each = benchmark_params.get('repeat_each')
db_root_folder = benchmark_params.get('db_root_folder')
summary_result_file = benchmark_params.get('summary_result_file')
transport = not benchmark_params.get('no_transport')
preserve_bags = benchmark_params.get('preserve_bags')
# Producers options
producers_params = bench_cfg['benchmark']['parameters']
max_cache_size_params = producers_params.get('max_cache_size')
max_bag_size_params = producers_params.get('max_bag_size')
compression_params = producers_params.get('compression')
compression_queue_size_params = producers_params.get('compression_queue_size')
compression_threads_params = producers_params.get('compression_threads')
storage_config_file_params = producers_params.get('storage_config_file')
# Parameters cross section for whole benchmark
# Parameters cross section is a list of all possible parameters variants
params_cross_section = []
# Generate unique benchmark directory name
benchmark_cfg_name = pathlib.Path(_bench_cfg_path).name.replace('.yaml', '')
producer_cfg_name = pathlib.Path(_producers_cfg_path).name.replace('.yaml', '')
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
transport_postfix = 'transport' if transport else 'no_transport'
benchmark_dir_name = benchmark_cfg_name + \
'_' + producer_cfg_name + \
'_' + transport_postfix + \
'_' + timestamp
# Helper function for generating cross section list
def __generate_cross_section_parameter(i,
cache,
compression,
compression_queue_size,
compression_threads,
storage_config,
max_bag_size):
# Storage conf parameter for each producer
st_conf_filename = storage_config.replace('.yaml', '')
storage_conf_path = ''
if storage_config != '':
storage_conf_path = pathlib.Path(
get_package_share_directory(
'rosbag2_performance_benchmarking'
)
).joinpath('config', 'storage', storage_config)
if not storage_conf_path.exists():
raise RuntimeError(
'Config {} does not exist.'.format(storage_config))
st_conf_filename = pathlib.Path(storage_config).with_suffix('')
# Generates unique title for producer
node_title = 'run_' + \
'{i}_{cache}_{comp}_{comp_q}_{comp_t}_{st_conf}_{bag_size}'.format(
i=i,
cache=cache,
comp=compression if compression else 'default_compression',
comp_q=compression_queue_size,
comp_t=compression_threads,
st_conf=st_conf_filename if st_conf_filename else 'default_config',
bag_size=max_bag_size
)
# Result file path for producer
result_file = pathlib.Path(db_root_folder).joinpath(
benchmark_dir_name,
summary_result_file
)
# Database folder path for producer
db_folder = pathlib.Path(db_root_folder).joinpath(
benchmark_dir_name,
node_title
)
# Filling up parameters cross section list for benchmark
params_cross_section.append(
{
'node_title': node_title,
'db_folder': str(db_folder),
'cache': cache,
'preserve_bags': preserve_bags,
'transport': transport,
'result_file': str(result_file),
'compression_format': compression,
'compression_queue_size': compression_queue_size,
'compression_threads': compression_threads,
'storage_config_file': str(storage_conf_path),
'config_file': str(_producers_cfg_path),
'max_bag_size': max_bag_size
}
)
# For the sake of python indentation, multiple for loops in alternative way with helper func
[
__generate_cross_section_parameter(
i,
cache,
compression,
compression_queue_size,
compression_threads,
storage_config,
max_bag_size)
for i in range(0, repeat_each)
for cache in max_cache_size_params
for compression in compression_params
for compression_queue_size in compression_queue_size_params
for compression_threads in compression_threads_params
for storage_config in storage_config_file_params
for max_bag_size in max_bag_size_params
]
ld = launch.LaunchDescription()
ld.add_action(
launch.actions.LogInfo(msg='Launching benchmark!'),
)
# Create all required nodes and processes for benchmark
for producer_param in params_cross_section:
parameters = [
producer_param['config_file'],
{'max_cache_size': producer_param['cache']},
{'max_bag_size': producer_param['max_bag_size']},
{'db_folder': producer_param['db_folder']},
{'results_file': producer_param['result_file']},
{'compression_queue_size': producer_param['compression_queue_size']},
{'compression_threads': producer_param['compression_threads']}
]
if producer_param['storage_config_file'] != '':
parameters.append({'storage_config_file': producer_param['storage_config_file']})
if producer_param['compression_format'] != '':
parameters.append({'compression_format': producer_param['compression_format']})
if not transport:
# Writer benchmark node writes messages directly to a storage, uses no publishers
producer_node = launch_ros.actions.Node(
package='rosbag2_performance_benchmarking',
executable='writer_benchmark',
name='rosbag2_performance_benchmarking_node',
parameters=parameters
)
else:
# Benchmark publishers node uses standard publishers for publishing messages
producer_node = launch_ros.actions.Node(
package='rosbag2_performance_benchmarking',
executable='benchmark_publishers',
name='rosbag2_performance_benchmarking_node',
parameters=parameters
)
# ROS2 bag process for recording messages
rosbag_args = []
if producer_param['storage_config_file']:
rosbag_args += [
'--storage-config-file',
str(producer_param['storage_config_file'])
]
if producer_param['cache']:
rosbag_args += [
'--max-cache-size',
str(producer_param['cache'])
]
if producer_param['compression_format']:
rosbag_args += [
'--compression-mode',
'message'
]
rosbag_args += [
'--compression-format',
str(producer_param['compression_format'])
]
if producer_param['compression_queue_size']:
rosbag_args += [
'--compression-queue-size',
str(producer_param['compression_queue_size'])
]
if producer_param['compression_threads']:
rosbag_args += [
'--compression-threads',
str(producer_param['compression_threads'])
]
if producer_param['max_bag_size']:
rosbag_args += [
'-b',
str(producer_param['max_bag_size'])
]
rosbag_args += ['-o', str(producer_param['db_folder'])]
rosbag_process = launch.actions.ExecuteProcess(
sigkill_timeout=launch.substitutions.LaunchConfiguration(
'sigkill_timeout', default=60),
sigterm_timeout=launch.substitutions.LaunchConfiguration(
'sigterm_timeout', default=60),
cmd=['ros2', 'bag', 'record', '-a'] + rosbag_args
)
# Result writer node walks through output metadata files and generates
# output results file
result_writer = launch_ros.actions.Node(
package='rosbag2_performance_benchmarking',
executable='results_writer',
name='rosbag2_performance_benchmarking_node',
parameters=parameters
)
# Fill up list with rosbag record process and result writers actions
_rosbag_processes.append(rosbag_process)
_result_writers.append(result_writer)
# Fill up dict with producer nodes and their corresponding parameters
_producer_nodes.append({'node': producer_node, 'parameters': producer_param})
# Connect start and exit events for a proper sequence
if not transport:
for producer_node in _producer_nodes:
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessExit(
target_action=producer_node['node'],
on_exit=_producer_node_exited
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessStart(
target_action=producer_node['node'],
on_start=_producer_node_started
)
)
)
else:
for producer_node, rosbag_proc in zip(_producer_nodes, _rosbag_processes):
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessExit(
target_action=producer_node['node'],
on_exit=_producer_node_exited
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessStart(
target_action=producer_node['node'],
on_start=_producer_node_started
)
)
),
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessStart(
target_action=rosbag_proc,
on_start=_rosbag_proc_started
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessIO(
target_action=rosbag_proc,
on_stdout=_rosbag_ready_check,
on_stderr=_rosbag_ready_check
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessExit(
target_action=rosbag_proc,
on_exit=_rosbag_proc_exited
)
)
)
# Launch nodes one after another. Next node is launched after previous is finished.
ld.add_action(_launch_sequence(transport=transport))
return ld
if __name__ == '__main__':
raise RuntimeError('Benchmark launchfile does not support standalone execution.')
|
the-stack_106_22692 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to run neighbourhood processing."""
from improver import cli
from improver.constants import DEFAULT_PERCENTILES
@cli.clizefy
@cli.with_output
def process(cube: cli.inputcube,
mask: cli.inputcube = None,
*,
neighbourhood_output,
neighbourhood_shape,
radii: cli.comma_separated_list,
lead_times: cli.comma_separated_list = None,
degrees_as_complex=False,
weighted_mode=False,
area_sum=False,
remask=False,
percentiles: cli.comma_separated_list = DEFAULT_PERCENTILES,
halo_radius: float = None):
"""Runs neighbourhood processing.
Apply the requested neighbourhood method via the
NeighbourhoodProcessing plugin to a Cube.
Args:
cube (iris.cube.Cube):
The Cube to be processed.
mask (iris.cube.Cube):
A cube to mask the input cube. The data should contain 1 for
usable points and 0 for discarded points.
Only supported with square neighbourhoods. (Optional)
neighbourhood_output (str):
The form of the results generated using neighbourhood processing.
If "probabilities" is selected, the mean probability with a
neighbourhood is calculated. If "percentiles" is selected, then
the percentiles are calculated with a neighbourhood. Calculating
percentiles from a neighbourhood is only supported for a circular
neighbourhood.
Options: "probabilities", "percentiles".
neighbourhood_shape (str):
Name of the neighbourhood method to use. Only a "circular"
neighbourhood shape is applicable for calculating "percentiles"
output.
Options: "circular", "square".
radii (list of float):
The radius or a list of radii in metres of the neighbourhood to
apply.
If it is a list, it must be the same length as lead_times, which
defines at which lead time to use which nbhood radius. The radius
will be interpolated for intermediate lead times.
lead_times (list of int):
The lead times in hours that correspond to the radii to be used.
If lead_times are set, radii must be a list the same length as
lead_times.
degrees_as_complex (bool):
Include this option to process angles as complex numbers.
Not compatible with circular kernel or percentiles.
weighted_mode (bool):
Include this option to set the weighting to decrease with radius.
Otherwise a constant weighting is assumed.
weighted_mode is only applicable for calculating "probability"
neighbourhood output using the circular kernel.
area_sum (bool):
Return sum rather than fraction over the neighbourhood area.
remask (bool):
Include this option to apply the original un-neighbourhood
processed mask to the neighbourhood processed cube.
Otherwise the original un-neighbourhood processed mask
is not applied. Therefore, the neighbourhood processing may result
in values being present in area that were originally masked.
percentiles (float):
Calculates value at the specified percentiles from the
neighbourhood surrounding each grid point. This argument has no
effect if the output is probabilities.
halo_radius (float):
Set this radius in metres to define the excess halo to clip. Used
where a larger grid was defined than the standard grid and we want
to clip the grid back to the standard grid. Otherwise no clipping
is applied.
Returns:
iris.cube.Cube:
A processed Cube.
Raises:
RuntimeError:
If weighted_mode is used with the wrong neighbourhood_output.
RuntimeError:
If degree_as_complex is used with
neighbourhood_output='percentiles'.
RuntimeError:
If degree_as_complex is used with neighbourhood_shape='circular'.
"""
from improver.nbhood import radius_by_lead_time
from improver.nbhood.nbhood import (
GeneratePercentilesFromANeighbourhood, NeighbourhoodProcessing)
from improver.utilities.pad_spatial import remove_cube_halo
from improver.wind_calculations.wind_direction import WindDirection
sum_or_fraction = 'sum' if area_sum else 'fraction'
if neighbourhood_output == "percentiles":
if weighted_mode:
raise RuntimeError('weighted_mode cannot be used with'
'neighbourhood_output="percentiles"')
if degrees_as_complex:
raise RuntimeError('Cannot generate percentiles from complex '
'numbers')
if neighbourhood_shape == "circular":
if degrees_as_complex:
raise RuntimeError(
'Cannot process complex numbers with circular neighbourhoods')
if degrees_as_complex:
# convert cube data into complex numbers
cube.data = WindDirection.deg_to_complex(cube.data)
radius_or_radii, lead_times = radius_by_lead_time(radii, lead_times)
if neighbourhood_output == "probabilities":
result = NeighbourhoodProcessing(
neighbourhood_shape, radius_or_radii,
lead_times=lead_times,
weighted_mode=weighted_mode,
sum_or_fraction=sum_or_fraction,
re_mask=remask)(cube, mask_cube=mask)
elif neighbourhood_output == "percentiles":
result = GeneratePercentilesFromANeighbourhood(
neighbourhood_shape, radius_or_radii,
lead_times=lead_times,
percentiles=percentiles)(cube)
if degrees_as_complex:
# convert neighbourhooded cube back to degrees
result.data = WindDirection.complex_to_deg(result.data)
if halo_radius is not None:
result = remove_cube_halo(result, halo_radius)
return result
|
the-stack_106_22694 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 2, 23)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
the-stack_106_22695 | import sys
from pysam import VariantFile
"""
mkdir prplot ; cd prplot
bedtools intersect -wao -a $data/pingpong_ext/OUT/HGspecificvars_1.bed -b $data/pingpong_ext/OUT/pp/over/sspecific.short.hg-haps.sam.bed > rec1.short.bed
bedtools intersect -wao -a $data/pingpong_ext/OUT/HGspecificvars_2.bed -b $data/pingpong_ext/OUT/pp/over/sspecific.short.hg-haps.sam.bed > rec2.short.bed
bedtools intersect -wao -a $data/pingpong_ext/OUT/HGspecificvars_1.bed -b $data/pingpong_ext/OUT/pp/over/sspecific.long.hg-haps.sam.bed > rec1.long.bed
bedtools intersect -wao -a $data/pingpong_ext/OUT/HGspecificvars_2.bed -b $data/pingpong_ext/OUT/pp/over/sspecific.long.hg-haps.sam.bed > rec2.long.bed
cat rec1.short.bed rec1.long.bed > rec1.bed
cat rec2.short.bed rec2.long.bed > rec2.bed
bedtools intersect -wao -b $data/pingpong_ext/OUT/HGspecificvars_1.bed -a $data/pingpong_ext/OUT/pp/over/sspecific.short.hg-haps.sam.bed > pre1.short.bed
bedtools intersect -wao -b $data/pingpong_ext/OUT/HGspecificvars_2.bed -a $data/pingpong_ext/OUT/pp/over/sspecific.short.hg-haps.sam.bed > pre2.short.bed
bedtools intersect -wao -b $data/pingpong_ext/OUT/HGspecificvars_1.bed -a $data/pingpong_ext/OUT/pp/over/sspecific.long.hg-haps.sam.bed > pre1.long.bed
bedtools intersect -wao -b $data/pingpong_ext/OUT/HGspecificvars_2.bed -a $data/pingpong_ext/OUT/pp/over/sspecific.long.hg-haps.sam.bed > pre2.long.bed
cat pre1.short.bed pre1.long.bed > pre1.bed
cat pre2.short.bed pre2.long.bed > pre2.bed
"""
def parse_recbed(bed_path):
alleles = {}
for line in open(bed_path):
line = line.strip('\n').split('\t')
ref, vidx, ridx, covering = line[0], line[3], line[8], int(line[-1])
aidx = vidx + ('2' if ref.endswith("_2") else '1')
if aidx not in alleles:
alleles[aidx] = set()
if covering > 0:
alleles[aidx].add(ridx)
return alleles
def recall(alleles, cutoff):
total = 0
hits = 0
for ridxs in alleles.values():
total += 1
if any([int(ridx.split('#')[1]) >= cutoff for ridx in ridxs]):
hits += 1
return hits, total
# precision setup
def count_sample(fq_path):
# assuming fastq
n = 4
reads_by_coverage = {}
for line in open(fq_path):
if n % 4 == 0:
line = line[1:-1] # removes @ and \n
cov = int(line.split('#')[1])
reads_by_coverage[cov] = reads_by_coverage[cov] + 1 if cov in reads_by_coverage else 1
n = 0
n += 1
return reads_by_coverage
def parse_vcf(vcf_path, sample1="HG00733", sample2="NA19240"):
variants = {}
vcf = VariantFile(vcf_path)
for record in vcf.fetch():
idx = record.id
gt1, gt2 = record.samples[sample1]["GT"]
gt1_2, gt2_2 = record.samples[sample2]["GT"]
variants[idx] = [(gt1, gt2), (gt1_2, gt2_2)]
return variants
def parse_prebed(bed_path, firsthap):
alignments = {}
for line in open(bed_path):
line = line.strip('\n').split('\t')
ref, start, end, ridx, vidx, covering = line[0], line[1], line[2], line[3], line[9], int(line[-1])
if ref.endswith("_2") and firsthap:
continue
posidx = ridx + ":" + start + "-" + end
if posidx not in alignments:
alignments[posidx] = set()
if covering > 0:
if vidx == ".":
print(line)
exit(1)
alignments[posidx].add(vidx)
return alignments
def check_haplo_uniqueness(variants, firsthap=True):
HGhaplo = [v[0][not firsthap] for v in variants]
NAhaplo1 = [v[1][0] for v in variants]
NAhaplo2 = [v[1][1] for v in variants]
return HGhaplo != NAhaplo1 and HGhaplo != NAhaplo2
def precision(alignments1, alignments2, variants, cutoff):
covering_results = {}
for posidx, vidxs in alignments1.items():
ridx = posidx.split(':')[0]
if int(ridx.split('#')[1]) < cutoff:
continue
if ridx not in covering_results:
covering_results[ridx] = False
if len(vidxs) > 0:
covered_variants = [variants[vidx] for vidx in vidxs]
unique = check_haplo_uniqueness(covered_variants, True)
covering_results[ridx] = covering_results[ridx] or unique
for posidx, vidxs in alignments2.items():
ridx = posidx.split(':')[0]
if int(ridx.split('#')[1]) < cutoff:
continue
if ridx not in covering_results:
covering_results[ridx] = False
if len(vidxs) > 0:
covered_variants = [variants[vidx] for vidx in vidxs]
unique = check_haplo_uniqueness(covered_variants, False)
covering_results[ridx] = covering_results[ridx] or unique
covering = 0
for ridx, flag in covering_results.items():
if flag:
covering += 1
return covering
def main():
fq_path = sys.argv[1]
vcf_path = sys.argv[2]
rec1_waobed = sys.argv[3] # bedtools intersect -wao {variants} {alignments}
rec2_waobed = sys.argv[4]
pre1_waobed = sys.argv[5] # bedtools intersect -wao {alignments} {variants}
pre2_waobed = sys.argv[6]
# # RECALL
# alleles1 = parse_recbed(rec1_waobed)
# print("Parsed rec1", file=sys.stderr)
# alleles2 = parse_recbed(rec2_waobed)
# print("Parsed rec2", file=sys.stderr)
# alleles = {**alleles1, **alleles2}
# print("Combined", file=sys.stderr)
# for c in range(5, 11):
# print(f"Computing recall with c={c}", file=sys.stderr)
# hits, total = recall(alleles, c)
# R = round(hits/total*100 if total > 0 else 0, 3)
# print("R", c, hits, total, R, sep=',')
# PRECISION
print("Parsing sample", file=sys.stderr)
reads_by_coverage = count_sample(fq_path)
print("Parsing VCF", file=sys.stderr)
variants = parse_vcf(vcf_path)
print("Parsing pre1", file=sys.stderr)
alignments1 = parse_prebed(pre1_waobed, True)
print("Parsing pre2", file=sys.stderr)
alignments2 = parse_prebed(pre2_waobed, False)
for c in range(5, 11):
print(f"Computing recall with c={c}", file=sys.stderr)
covering = precision(alignments1, alignments2, variants, c)
total = 0 # TODO improve this
for cutoff,n in reads_by_coverage.items():
if cutoff >= c:
total += n
P = round(covering/total*100 if total != 0 else 0, 3)
print("P", c, covering, total, P, sep=',')
if __name__ == "__main__":
main()
|
the-stack_106_22696 | from train_model import joblib
from process_picture import obtain_one_picture
def image_identification(image, model_type):
"""
:param: image
:param: model_type
:return:
"""
x_data = obtain_one_picture(image)
# deal with picture
clr = joblib.load("model/" + model_type +".pkl")
return clr.predict([x_data])[0]
if __name__ == '__main__':
# the picture come from img test
result = image_identification("img/1234.jpg", "lr")
m_dict = joblib.load("img/m_dict.pkl")
print("The picture class is: {}, and number path is {}".format(result, m_dict[result]))
|
the-stack_106_22697 |
# Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Credits: http://stackoverflow.com/a/11927374
import os
import socket
import logging
from .utils import Singleton
from six import with_metaclass
class LogLevel(with_metaclass(Singleton)):
def __init__(self):
self.logLevel = "INFO"
def setLogLevel(self, logLevel):
self.logLevel = logLevel
def getLogLevel(self):
return self.logLevel
class TruncatingFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, size=400, noNL=True):
super(TruncatingFormatter, self).__init__(fmt, datefmt)
self.size = size
self.noNL = noNL
def truncate(self, text, size=80, noNL=True):
if len(text) < self.size:
ret = text
else:
ret = text[:self.size-6] + " [...(%d)]" % (len(text) - self.size)
if self.noNL:
return ret.replace("\n", "\\n")
else:
return ret
def format(self, record):
record.message = self.truncate(record.getMessage())
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
return self._fmt % record.__dict__
class Logger(object):
def __init__(self, name, size=400, noNL=True):
logLevel = LogLevel().getLogLevel()
logger = logging.getLogger(name)
logger.setLevel(logLevel)
if not logger.handlers:
log_dir = os.environ["ZEPPELIN_LOG_DIR"]
prefix = "zeppelin-interpreter-pyspark-comm-layer"
file_name = os.path.join(log_dir, '%s-%s-%s.log' % (prefix, os.environ["USER"], socket.gethostname()))
handler = logging.FileHandler(file_name)
formatter = TruncatingFormatter('%(asctime)s %(levelname)s:%(name)s %(message)s', size=size, noNL=noNL)
handler.setFormatter(formatter)
handler.setLevel(logLevel)
logger.addHandler(handler)
self._logger = logger
def get(self):
return self._logger
|
the-stack_106_22698 | """Script to convert an old-structure influxdb to a new one."""
import argparse
import sys
from typing import List
# Based on code at
# http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def print_progress(iteration: int, total: int, prefix: str = '',
suffix: str = '', decimals: int = 2,
bar_length: int = 68) -> None:
"""Print progress bar.
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
filled_length = int(round(bar_length * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
line = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, line,
percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
print("\n")
def run(script_args: List) -> int:
"""Run the actual script."""
from influxdb import InfluxDBClient
parser = argparse.ArgumentParser(
description="Migrate legacy influxDB.")
parser.add_argument(
'-d', '--dbname',
metavar='dbname',
required=True,
help="InfluxDB database name")
parser.add_argument(
'-H', '--host',
metavar='host',
default='127.0.0.1',
help="InfluxDB host address")
parser.add_argument(
'-P', '--port',
metavar='port',
default=8086,
help="InfluxDB host port")
parser.add_argument(
'-u', '--username',
metavar='username',
default='root',
help="InfluxDB username")
parser.add_argument(
'-p', '--password',
metavar='password',
default='root',
help="InfluxDB password")
parser.add_argument(
'-s', '--step',
metavar='step',
default=1000,
help="How many points to migrate at the same time")
parser.add_argument(
'-o', '--override-measurement',
metavar='override_measurement',
default="",
help="Store all your points in the same measurement")
parser.add_argument(
'-D', '--delete',
action='store_true',
default=False,
help="Delete old database")
parser.add_argument(
'--script',
choices=['influxdb_migrator'])
args = parser.parse_args()
# Get client for old DB
client = InfluxDBClient(args.host, args.port,
args.username, args.password)
client.switch_database(args.dbname)
# Get DB list
db_list = [db['name'] for db in client.get_list_database()]
# Get measurements of the old DB
res = client.query('SHOW MEASUREMENTS')
measurements = [measurement['name'] for measurement in res.get_points()]
nb_measurements = len(measurements)
# Move data
# Get old DB name
old_dbname = "{}__old".format(args.dbname)
# Create old DB if needed
if old_dbname not in db_list:
client.create_database(old_dbname)
# Copy data to the old DB
print("Cloning from {} to {}".format(args.dbname, old_dbname))
for index, measurement in enumerate(measurements):
client.query('''SELECT * INTO {}..:MEASUREMENT FROM '''
'"{}" GROUP BY *'.format(old_dbname, measurement))
# Print progress
print_progress(index + 1, nb_measurements)
# Delete the database
client.drop_database(args.dbname)
# Create new DB if needed
client.create_database(args.dbname)
client.switch_database(old_dbname)
# Get client for new DB
new_client = InfluxDBClient(args.host, args.port, args.username,
args.password, args.dbname)
# Counter of points without time
point_wt_time = 0
print("Migrating from {} to {}".format(old_dbname, args.dbname))
# Walk into measurement
for index, measurement in enumerate(measurements):
# Get tag list
res = client.query('''SHOW TAG KEYS FROM "{}"'''.format(measurement))
tags = [v['tagKey'] for v in res.get_points()]
# Get field list
res = client.query('''SHOW FIELD KEYS FROM "{}"'''.format(measurement))
fields = [v['fieldKey'] for v in res.get_points()]
# Get points, convert and send points to the new DB
offset = 0
while True:
nb_points = 0
# Prepare new points
new_points = []
# Get points
res = client.query('SELECT * FROM "{}" LIMIT {} OFFSET '
'{}'.format(measurement, args.step, offset))
for point in res.get_points():
new_point = {"tags": {},
"fields": {},
"time": None}
if args.override_measurement:
new_point["measurement"] = args.override_measurement
else:
new_point["measurement"] = measurement
# Check time
if point["time"] is None:
# Point without time
point_wt_time += 1
print("Can not convert point without time")
continue
# Convert all fields
for field in fields:
try:
new_point["fields"][field] = float(point[field])
except (ValueError, TypeError):
if field == "value":
new_key = "state"
else:
new_key = "{}_str".format(field)
new_point["fields"][new_key] = str(point[field])
# Add tags
for tag in tags:
new_point["tags"][tag] = point[tag]
# Set time
new_point["time"] = point["time"]
# Add new point to the new list
new_points.append(new_point)
# Count nb points
nb_points += 1
# Send to the new db
try:
new_client.write_points(new_points)
except Exception as exp:
raise exp
# If there is no points
if nb_points == 0:
# print("Measurement {} migrated".format(measurement))
break
else:
# Increment offset
offset += args.step
# Print progress
print_progress(index + 1, nb_measurements)
# Delete database if needed
if args.delete:
print("Dropping {}".format(old_dbname))
client.drop_database(old_dbname)
|
the-stack_106_22704 | import torch.nn as nn
from .blocks import LayerDepwiseDecode, LayerDepwiseEncode
class MobileHairNet(nn.Module):
def __init__(self, encode_block=LayerDepwiseEncode, decode_block=LayerDepwiseDecode, mobilenet_block=None, *args, **kwargs):
super(MobileHairNet, self).__init__()
self.encode_block = encode_block
self.decode_block = decode_block
self.make_layers()
def make_layers(self):
self.encode_layer1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2, padding=1),
self.encode_block(32, 64, reserve=True)
)
self.encode_layer2 = nn.Sequential(
self.encode_block(64, 128),
self.encode_block(128, 128),
)
self.encode_layer3 = nn.Sequential(
self.encode_block(128, 256),
self.encode_block(256,256)
)
self.encode_layer4 = nn.Sequential(
self.encode_block(256, 512),
self.encode_block(512, 512),
self.encode_block(512, 512),
self.encode_block(512, 512),
self.encode_block(512, 512),
self.encode_block(512, 512),
)
self.encode_layer5 = nn.Sequential(
self.encode_block(512, 1024),
self.encode_block(1024, 1024)
)
self.decode_layer1 = nn.Upsample(scale_factor=2)
self.decode_layer2 = nn.Sequential(
nn.Conv2d(in_channels=1024, out_channels=64, kernel_size=1),
self.decode_block(in_channel=64, out_channel=64, kernel_size=3),
nn.Upsample(scale_factor=2)
)
self.decode_layer3 = nn.Sequential(
self.decode_block(in_channel=64, out_channel=64, kernel_size=3),
nn.Upsample(scale_factor=2)
)
self.decode_layer4 = nn.Sequential(
self.decode_block(in_channel=64, out_channel=64, kernel_size=3),
nn.Upsample(scale_factor=2)
)
self.decode_layer5 = nn.Sequential(
self.decode_block(in_channel=64, out_channel=64, kernel_size=3),
nn.Upsample(scale_factor=2),
self.decode_block(in_channel=64, out_channel=64, kernel_size=3),
nn.Conv2d(in_channels=64, out_channels=2, kernel_size=3, padding=1)
)
self.encode_to_decoder4 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1)
self.encode_to_decoder3 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1)
self.encode_to_decoder2 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=1)
self.encode_to_decoder1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1)
self.soft_max = nn.Softmax(dim=1)
self._init_weight()
def forward(self, x):
#connet encode 4-> decode 1, encode 3-> decode 2, encode 2-> decode 3, encode 1-> decode 4
encode_layer1 = self.encode_layer1(x)
encode_layer2 = self.encode_layer2(encode_layer1)
encode_layer3 = self.encode_layer3(encode_layer2)
encode_layer4 = self.encode_layer4(encode_layer3)
encode_layer5 = self.encode_layer5(encode_layer4)
encode_layer4 = self.encode_to_decoder4(encode_layer4)
encode_layer3 = self.encode_to_decoder3(encode_layer3)
encode_layer2 = self.encode_to_decoder2(encode_layer2)
encode_layer1 = self.encode_to_decoder1(encode_layer1)
decode_layer1 = self.decode_layer1(encode_layer5) + encode_layer4
decode_layer2 = self.decode_layer2(decode_layer1) + encode_layer3
decode_layer3 = self.decode_layer3(decode_layer2) + encode_layer2
decode_layer4 = self.decode_layer4(decode_layer3) + encode_layer1
out = self.decode_layer5(decode_layer4)
return out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias) |
the-stack_106_22705 | # coding: utf-8
# WaveMaker Code
# by kense
# ############
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(seed=None) # random seed
def jonswap(f,Hs,Tp): # JONSWAP function
fp=1/float(Tp)
TH = Tp/float(np.sqrt(Hs))
if TH<=3.6:
gamma=5
elif (3.6<TH) and (TH<=5):
gamma=np.exp(5.75-1.15*TH)
else:
gamma=1
if f<=fp:
sigma = 0.07
else:
sigma = 0.09
s=0.3125*Hs**2*Tp*(f/float(fp))**(-5)*np.exp(-1.25*(f/float(fp))**(-4))*(1-0.287*np.log(gamma))*gamma**(np.exp(-0.5*((f/float(fp)-1)/float(sigma))**2))
return s
def x0(TDur,dt,Hs,Tp,f0,df,fHighCut,y,ny): # etaBC generation
f=np.arange(f0,fHighCut+f0,df)
omega=2*np.pi*f
JS = np.zeros((1,f.size))
for ij in range(0,f.size-1):
fL=f[ij]
JS1 = jonswap(fL,Hs,Tp)
JS[0,ij] = JS1
#tSpan=0:dt:TDur;
tSpan=np.arange(0,TDur+dt,dt)
etaBC=np.zeros((tSpan.size,1))
A=np.zeros((1,omega.size))
B=np.zeros((1,omega.size))
AB=np.random.standard_normal((1,2*omega.size)) #random Fourie coefficients
for j in range(0,omega.size):
A[0,j]=np.sqrt(JS[0,j]*df)*AB[0,j]
B[0,j]=np.sqrt(JS[0,j]*df)*AB[0,omega.size+j]
for i in range(0,tSpan.size):
etaBC[i,0]=np.sum(A*np.cos(2*np.pi*f*tSpan[i])+B*np.sin(2*np.pi*f*tSpan[i])); # Initial Wave Surface
# Print Check
#print omega.size
#print etaBC
#plt.plot(tSpan,etaBC,'b-*')
#plt.xlabel('Time[s]')
#plt.ylabel('Eta[m]')
#plt.grid()
#plt.show()
n = tSpan.size
# Lets print output the Wave Maker
file = open("waveMakerSignalrandom.inp","w")
file.write("# This is a Wave Generation surface response at x=0\n")
file.write("%.7f %.0f %.0f\n" % (dt, n, ny))
file.write("%.7f\n" % y)
for i in range(0,tSpan.size):
file.write("%.7f %.7f\n" % (tSpan[i],etaBC[i,0]))
file.close()
## Which Fourie coefficients have been used
file = open("randomAB.txt","w")
file.write("# These are random Aj and Bj\n")
for i in range(0,omega.size):
file.write("%.7s %.7s\n" % (AB[0,i],AB[0,omega.size+i]))
file.close()
|
the-stack_106_22706 | import sys
import getopt
import re
def main(argv):
inname=''
outname=''
try:
opts,args=getopt.getopt(argv,"hi:o:",["infile=","outfile=",])
except getopt.GetoptError:
print('pbsv_tra_fliter.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('pbsv_tra_fliter.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--infile"):
inname = arg
elif opt in ("-o", "--outfile"):
outname = arg
return inname,outname
if __name__ == "__main__":
inputname,outputname = main(sys.argv[1:])
inputfile = open(inputname)
outputfile = open(outputname,'w')
while 1:
lines = inputfile.readlines(10000)
if not lines:
break
for line1 in lines:
line1 = line1.rstrip()
cut1 = line1.strip().split('\t')
if re.search('^#',line1):
outputfile.write(str(line1))
outputfile.write('\n')
else:
cut2 = cut1[9]
FORMAT = cut2.strip().split(':')
GT = FORMAT[0]
cut3 = cut1[7]
INFO = cut3.strip().split(';')
sv_type = INFO[0]
if GT == '0/1' or GT == '1/1':
if cut1[6] == 'PASS':
if sv_type == 'SVTYPE=BND':
outputfile.write(str(line1))
outputfile.write('\n')
outputfile.close()
|
the-stack_106_22707 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 21 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors
|
the-stack_106_22708 | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from unittest import mock
from django.test import override_settings
from promgen import models, rest, tests
from promgen.notification.email import NotificationEmail
from promgen.notification.linenotify import NotificationLineNotify
from promgen.notification.user import NotificationUser
class UserSplayTest(tests.PromgenTest):
fixtures = ["testcases.yaml"]
@override_settings(PROMGEN=tests.SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@mock.patch("promgen.notification.email.send_mail")
@mock.patch("promgen.util.post")
def test_user_splay(self, mock_email, mock_post):
one = models.Service.objects.get(pk=1)
NotificationUser.create(obj=one, value=one.owner.username)
NotificationLineNotify.create(obj=one.owner, value="#foo")
NotificationEmail.create(obj=one.owner, value="[email protected]")
response = self.fireAlert()
self.assertRoute(response, rest.AlertReceiver, 202)
self.assertCount(models.Alert, 1, "Alert Queued")
self.assertCount(models.AlertError, 0, "No failed alerts")
# Since we test the specifics elsewhere, just want to check
# the count of calls here
self.assertEqual(mock_post.call_count, 1, "Called LINE Notify")
self.assertEqual(mock_email.call_count, 1, "Called email")
@override_settings(PROMGEN=tests.SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@mock.patch("promgen.notification.email.send_mail")
def test_failed_user(self, mock_email):
# We have one valid sender and one invalid one
# The invalid one should be skipped while still letting
# the valid one pass
one = models.Service.objects.get(pk=1)
NotificationEmail.create(obj=one, value="[email protected]")
NotificationUser.create(obj=one, value="does not exist")
response = self.fireAlert()
self.assertRoute(response, rest.AlertReceiver, 202)
self.assertCount(models.Alert, 1, "Alert Queued")
self.assertCount(models.AlertError, 0, "No failed alerts")
self.assertEqual(mock_email.call_count, 1, "Still called email")
|
the-stack_106_22711 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module of the current Interactive Beam environment.
For internal use only; no backwards-compatibility guarantees.
Provides interfaces to interact with existing Interactive Beam environment.
External Interactive Beam users please use interactive_beam module in
application code or notebook.
"""
# pytype: skip-file
from __future__ import absolute_import
import atexit
import importlib
import logging
import sys
import apache_beam as beam
from apache_beam.runners import runner
from apache_beam.utils.interactive_utils import is_in_ipython
from apache_beam.utils.interactive_utils import is_in_notebook
# Interactive Beam user flow is data-centric rather than pipeline-centric, so
# there is only one global interactive environment instance that manages
# implementation that enables interactivity.
_interactive_beam_env = None
_LOGGER = logging.getLogger(__name__)
def current_env(cache_manager=None):
"""Gets current Interactive Beam environment."""
global _interactive_beam_env
if not _interactive_beam_env:
_interactive_beam_env = InteractiveEnvironment(cache_manager)
return _interactive_beam_env
def new_env(cache_manager=None):
"""Creates a new Interactive Beam environment to replace current one."""
global _interactive_beam_env
if _interactive_beam_env:
_interactive_beam_env.cleanup()
_interactive_beam_env = None
return current_env(cache_manager)
class InteractiveEnvironment(object):
"""An interactive environment with cache and pipeline variable metadata.
Interactive Beam will use the watched variable information to determine if a
PCollection is assigned to a variable in user pipeline definition. When
executing the pipeline, interactivity is applied with implicit cache
mechanism for those PCollections if the pipeline is interactive. Users can
also visualize and introspect those PCollections in user code since they have
handles to the variables.
"""
def __init__(self, cache_manager=None):
self._cache_manager = cache_manager
# Register a cleanup routine when kernel is restarted or terminated.
if cache_manager:
atexit.register(self.cleanup)
# Holds class instances, module object, string of module names.
self._watching_set = set()
# Holds variables list of (Dict[str, object]).
self._watching_dict_list = []
# Holds results of main jobs as Dict[Pipeline, PipelineResult].
# Each key is a pipeline instance defined by the end user. The
# InteractiveRunner is responsible for populating this dictionary
# implicitly.
self._main_pipeline_results = {}
# Holds results of background caching jobs as
# Dict[Pipeline, PipelineResult]. Each key is a pipeline instance defined by
# the end user. The InteractiveRunner is responsible for populating this
# dictionary implicitly when a background caching jobs is started.
self._background_caching_pipeline_results = {}
self._cached_source_signature = {}
self._tracked_user_pipelines = set()
# Tracks the computation completeness of PCollections. PCollections tracked
# here don't need to be re-computed when data introspection is needed.
self._computed_pcolls = set()
# Always watch __main__ module.
self.watch('__main__')
# Do a warning level logging if current python version is below 3.6.
if sys.version_info < (3, 6):
self._is_py_version_ready = False
_LOGGER.warning('Interactive Beam requires Python 3.5.3+.')
else:
self._is_py_version_ready = True
# Check if [interactive] dependencies are installed.
try:
import IPython # pylint: disable=unused-import
import jsons # pylint: disable=unused-import
import timeloop # pylint: disable=unused-import
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator # pylint: disable=unused-import
self._is_interactive_ready = True
except ImportError:
self._is_interactive_ready = False
_LOGGER.warning('Dependencies required for Interactive Beam PCollection '
'visualization are not available, please use: `pip '
'install apache-beam[interactive]` to install necessary '
'dependencies to enable all data visualization features.')
self._is_in_ipython = is_in_ipython()
self._is_in_notebook = is_in_notebook()
if not self._is_in_ipython:
_LOGGER.warning('You cannot use Interactive Beam features when you are '
'not in an interactive environment such as a Jupyter '
'notebook or ipython terminal.')
if self._is_in_ipython and not self._is_in_notebook:
_LOGGER.warning('You have limited Interactive Beam features since your '
'ipython kernel is not connected any notebook frontend.')
@property
def is_py_version_ready(self):
"""If Python version is above the minimum requirement."""
return self._is_py_version_ready
@property
def is_interactive_ready(self):
"""If the [interactive] dependencies are installed."""
return self._is_interactive_ready
@property
def is_in_ipython(self):
"""If the runtime is within an IPython kernel."""
return self._is_in_ipython
@property
def is_in_notebook(self):
"""If the kernel is connected to a notebook frontend.
If not, it could be that the user is using kernel in a terminal or a unit
test.
"""
return self._is_in_notebook
def cleanup(self):
# Utilizes cache manager to clean up cache from everywhere.
if self.cache_manager():
self.cache_manager().cleanup()
def watch(self, watchable):
"""Watches a watchable.
A watchable can be a dictionary of variable metadata such as locals(), a str
name of a module, a module object or an instance of a class. The variable
can come from any scope even local. Duplicated variable naming doesn't
matter since they are different instances. Duplicated variables are also
allowed when watching.
"""
if isinstance(watchable, dict):
self._watching_dict_list.append(watchable.items())
else:
self._watching_set.add(watchable)
def watching(self):
"""Analyzes and returns a list of pair lists referring to variable names and
values from watched scopes.
Each entry in the list represents the variable defined within a watched
watchable. Currently, each entry holds a list of pairs. The format might
change in the future to hold more metadata. Duplicated pairs are allowed.
And multiple paris can have the same variable name as the "first" while
having different variable values as the "second" since variables in
different scopes can have the same name.
"""
watching = list(self._watching_dict_list)
for watchable in self._watching_set:
if isinstance(watchable, str):
module = importlib.import_module(watchable)
watching.append(vars(module).items())
else:
watching.append(vars(watchable).items())
return watching
def set_cache_manager(self, cache_manager):
"""Sets the cache manager held by current Interactive Environment."""
if self._cache_manager is cache_manager:
# NOOP if setting to the same cache_manager.
return
if self._cache_manager:
# Invoke cleanup routine when a new cache_manager is forcefully set and
# current cache_manager is not None.
self.cleanup()
atexit.unregister(self.cleanup)
self._cache_manager = cache_manager
if self._cache_manager:
# Re-register cleanup routine for the new cache_manager if it's not None.
atexit.register(self.cleanup)
def cache_manager(self):
"""Gets the cache manager held by current Interactive Environment."""
return self._cache_manager
def set_pipeline_result(self, pipeline, result, is_main_job):
"""Sets the pipeline run result. Adds one if absent. Otherwise, replace.
When is_main_job is True, set the result for the main job; otherwise, set
the result for the background caching job.
"""
assert issubclass(type(pipeline), beam.Pipeline), (
'pipeline must be an instance of apache_beam.Pipeline or its subclass')
assert issubclass(type(result), runner.PipelineResult), (
'result must be an instance of '
'apache_beam.runners.runner.PipelineResult or its subclass')
if is_main_job:
self._main_pipeline_results[pipeline] = result
else:
self._background_caching_pipeline_results[pipeline] = result
def evict_pipeline_result(self, pipeline, is_main_job=True):
"""Evicts the tracking of given pipeline run. Noop if absent."""
if is_main_job:
return self._main_pipeline_results.pop(pipeline, None)
return self._background_caching_pipeline_results.pop(pipeline, None)
def pipeline_result(self, pipeline, is_main_job=True):
"""Gets the pipeline run result. None if absent."""
if is_main_job:
return self._main_pipeline_results.get(pipeline, None)
return self._background_caching_pipeline_results.get(pipeline, None)
def is_terminated(self, pipeline, is_main_job=True):
"""Queries if the most recent job (by executing the given pipeline) state
is in a terminal state. True if absent."""
result = self.pipeline_result(pipeline, is_main_job=is_main_job)
if result:
return runner.PipelineState.is_terminal(result.state)
return True
def set_cached_source_signature(self, pipeline, signature):
self._cached_source_signature[pipeline] = signature
def get_cached_source_signature(self, pipeline):
return self._cached_source_signature.get(pipeline, set())
def track_user_pipelines(self):
"""Record references to all user-defined pipeline instances watched in
current environment.
Current static global singleton interactive environment holds references to
a set of pipeline instances defined by the user in the watched scope.
Interactive Beam features could use the references to determine if a given
pipeline is defined by user or implicitly created by Beam SDK or runners,
then handle them differently.
This is invoked every time a PTransform is to be applied if the current
code execution is under ipython due to the possibility that any user-defined
pipeline can be re-evaluated through notebook cell re-execution at any time.
Each time this is invoked, the tracked user pipelines are refreshed to
remove any pipeline instances that are no longer in watched scope. For
example, after a notebook cell re-execution re-evaluating a pipeline
creation, the last pipeline reference created by last evaluation will not be
in watched scope anymore.
"""
self._tracked_user_pipelines = set()
for watching in self.watching():
for _, val in watching:
if isinstance(val, beam.pipeline.Pipeline):
self._tracked_user_pipelines.add(val)
@property
def tracked_user_pipelines(self):
return self._tracked_user_pipelines
def mark_pcollection_computed(self, pcolls):
"""Marks computation completeness for the given pcolls.
Interactive Beam can use this information to determine if a computation is
needed to introspect the data of any given PCollection.
"""
self._computed_pcolls.update(pcoll for pcoll in pcolls)
def evict_computed_pcollections(self):
"""Evicts all computed PCollections.
Interactive Beam will treat none of the PCollections in any given pipeline
as completely computed.
"""
self._computed_pcolls = set()
@property
def computed_pcollections(self):
return self._computed_pcolls
|
the-stack_106_22712 | from urllib.parse import urlparse
from discord.ext import commands
from .utils import utils
import collections
import traceback
import functools
import discord
import inspect
import logging
import asyncio
import datetime
log = logging.getLogger(__name__)
"""
Credit to Danny(Rapptz) for example of custom commands
It was based of his and improve from there with a lot of changes to my like.
I believe this cogs is full of hack.
"""
class CreateCustom:
def __init__(self,**kwargs):
self.name = kwargs.get('name')
self.content = kwargs.get('content')
self.brief= kwargs.get('brief')
self.guild_id= kwargs.get('guild_id')
async def run_command(cmd,o,ctx,*args:str):
"""
Custom Command
"""
"""
Args:
cmd: CreateCustom Obj
o: Nothing
ctx: ctx
*args: any remain message from user
"""
args = list(args)
#ignore obj
if(bool(urlparse(cmd.content).netloc)):
temp = cmd.content.find(".", int(len(cmd.content)/2))
temp = cmd.content[temp:]
picture = False
for x in ["png","gif","jpg","bmp","jpeg"]:
if x in temp.lower():
picture = True
break
if picture:
embed = discord.Embed()
embed.set_image(url = cmd.content)
return await ctx.send(embed = embed)
msg = ctx.message
name = ""
mention = ""
#a bad way to fix it, way i know, sorry.
cmd.content = cmd.content.replace("\\t","\t").replace("\\n","\n")
if msg.mentions: #putting mention in
ment = msg.mentions
for i in range(len(ment)):
x = ment.pop(0)
blank = " "
if len(ment) >1:
blank = ","
name += x.name + blank
mention += x.mention + blank
if args:
log.debug("Cleaning out mentions")
try:
for l in range(len(args)):
args.pop(args.index(x.mention)) #when there is dupe mention
except Exception as e:
log.debug(e)
pass
content = cmd.content.format(cmduser = msg.author.name,cmdmention = msg.author.mention,
user = name, mention = mention,msg = " ".join(args))
await ctx.send(content[:2000]) #sorry folk, you wont make it past 2k!
class CustomCmd(commands.Command):
def __init__(self,func,**kwargs):
self._entries = {}
self.module = None
super().__init__(func,**kwargs)
self.name = kwargs.get("name",self.name)
self.brief = kwargs.get("brief",self.brief)
self.params = collections.OrderedDict()
self.params["cog"] = self.cog # These are for help command to ignore errors by user.
self.params["ctx"] = "nothing"# These are for help command to ignore errors by user.
async def callback(self):
pass #ignore any problem and JUST CARRY ON.
async def invoke(self, ctx):
server = ctx.message.guild
if server is not None:
log.debug("Invoke command: {} , guild ID {}".format(ctx.command.name,server.id))
entry = self._entries.get(server.id)
if entry is None:
return
# update the callback called
self.callback = functools.partial(run_command, entry)
self.params = inspect.signature(self.callback).parameters
await super().invoke(ctx)
async def can_run(self,ctx):
server = ctx.message.guild
if server is not None:
log.debug("checking conditions, {} , {}".format(ctx.command.name,server.id))
get_entry = self._entries.get(server.id)
if get_entry: #to make brief for that server, totally hacky way?
try:
ctx.bot.get_command(get_entry.name).brief = get_entry.brief or ""
except: #if user didn't enter brief in.
pass
return bool(get_entry)
class Custom_Commands(commands.Cog, name = "Custom Commands"):
"""
An unique custom commands for your server!
"""
def __init__(self,bot):
self.bot = bot
self.redis = bot.db.redis
self.starter = True
self.bg = utils.Background("customcmd",60,50,self.timer,log)
self.bot.background.update({"customcmd":self.bg})
self.bg.start()
def cog_unload(self):
self.bg.stop()
def cog_check(self,ctx):
return utils.is_enable(ctx,"custom commands")
async def timer(self):
try:
for guild in list(self.bot.guilds):
log.debug(guild)
if await self.redis.hget("{}:Config:Cogs".format(guild.id),"custom commands") == "on":
list_name = await self.redis.smembers("{}:Customcmd:update_delete".format(guild.id))
log.debug(list_name)
if list_name:
for name in list_name: #if it edit or delete, either way remove them, we will do fresh update
cmd = self.bot.get_command(name)
print(cmd)
if cmd:
#Set None.. for some reason doesn't exist?
cmd._entries.pop(guild.id,None)
await self.redis.delete("{}:Customcmd:update_delete".format(guild.id))
if await self.redis.get("{}:Customcmd:update".format(guild.id)) or list_name or self.starter is True: #Which mean there is update
log.debug("adding commands")
cmd_content = await self.redis.hgetall("{}:Customcmd:content".format(guild.id))
cmd_brief = await self.redis.hgetall("{}:Customcmd:brief".format(guild.id))
log.debug("commands contents: {}".format(cmd_content))
for name,content in cmd_content.items():
log.debug("name {} : content: {}".format(name,content))
brief = cmd_brief[name]
entry = CreateCustom(name=name.lower(), content=content, brief = brief,guild_id=guild.id)
self.create_command(entry)
await self.redis.delete("{}:Customcmd:update".format(guild.id))
self.starter = False
except asyncio.CancelledError:
return utils.prRed("Asyncio Cancelled Error")
except Exception as e:
utils.prRed(e)
utils.prRed(traceback.format_exc())
def create_command(self,cmd):
cmd_exit = self.bot.get_command(cmd.name)
log.debug(cmd_exit)
if cmd_exit is None: #checking if we have exist command
command = self.bot.command(name = cmd.name, brief = cmd.brief,cls = CustomCmd)(run_command) #Decorator
command.cog = self #adding cog to command so it can format in help.
command._entries[cmd.guild_id] = cmd
elif isinstance(cmd_exit,CustomCmd):
log.debug("command already exist")
cmd_exit._entries[cmd.guild_id] = cmd
def setup(bot):
bot.add_cog(Custom_Commands(bot))
|
the-stack_106_22713 | from datetime import datetime
class PCB(object):
def __init__(self, pid=0, name="", priority=0, arrival=0, burst=0, simArrival=0, simBurst=0):
""" Variables were initially made to handle real-time CPU processing;
However, all "sim" variables were later added to account for simulating CPU times."""
self.pid = pid
self.name = name
self.priority = priority
self.after = None
self.before = None
self.arrival = arrival
self.child = None
self.state = None
self.inQueue = None
self.startTime = datetime.now()
self.endTime = 0
self.runTime = 0
self.waitTime = 0
self.age = 0
self.burst = burst
self.lastBurst = 0
self.avgBurst = 0
self.numBursts = 0
self.predictedBurst = 0
self.registers = []
#All simulation elements below:
self.simArrival = simArrival
self.simBurst = simBurst
self.simWait = 0
self.simTurnAround = 0
self.simStartTime = 0
self.simEndTime = 0
self.simInitDefinedBurst = simBurst
def complete(self, CPUStep):
"""
Called when a process has finished.
Updates CPU times, and sim times.
"""
self.endTime = datetime.now()
self.calculageAge()
self.waitTime = self.age - self.runTime
self.processSimTimes(CPUStep)
def calculageAge(self):
"""Called to calculate the "real time" age an object lived."""
diff = self.endTime - self.startTime
self.age = diff.total_seconds() * 1000
def inProcessing(self, CPUStep):
"""Update times when a PCB is admitted into CPU."""
self.runTime = datetime.now()
self.simStartTime = CPUStep
def outProcessing(self, CPUStep):
"""Update times when a PCB is leaving the CPU."""
diff = datetime.now() - self.runTime
self.runTime = diff.total_seconds() * 1000
self.simEndTime = CPUStep
def processSimTimes(self, CPUStep):
"""Calculate simulation times."""
self.simTurnAround = (CPUStep - self.simArrival)
self.simWait = self.simTurnAround - self.simInitDefinedBurst
def printSimTimes(self):
"""Print the simulation times."""
print("Finished PID:", self.pid)
print("Arrived:", self.simArrival)
print("Wait Time:", self.simWait)
print("Turnaround:", self.simTurnAround)
def printRawTime(self):
"""Print the raw CPU processing times."""
arrival = tmpPCB.arrival # comment out if you don't want to consider 'arrival' time, i.e., you want true arrival
print("Arrived:", tmpPCB.arrival)
print("Runtime:", tmpPCB.runTime)
print("Wait Time:", tmpPCB.waitTime)# - tmpPCB.arrival)
print("Turnaround:", tmpPCB.age)# - tmpPCB.arrival) |
the-stack_106_22714 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from nemo.collections.tts.data.datalayers import FastSpeech2Dataset
class TestTTSDatasets:
@pytest.mark.unit
def test_fs2_dataset(self, test_data_dir):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
mappings_file = os.path.join(test_data_dir, 'tts/mini_ljspeech/mappings.json')
ignore_file = os.path.join(test_data_dir, 'tts/mini_ljspeech/wavs_to_ignore.pkl')
# Test loading data (including supplementary data) with ignore file
ds = FastSpeech2Dataset(
manifest_filepath=manifest_path,
mappings_filepath=mappings_file,
sample_rate=22050,
ignore_file=ignore_file,
load_supplementary_values=True,
)
assert len(ds) == 4
count = 0
for _ in ds:
count += 1
assert count == 4
|
the-stack_106_22715 | import os
# after running this function, we need to manually remove the blank line in the head.
def transfer_neuroner_into_ncrfpp(input_dir, output_dir, tag='BIO'):
fin_train = open(os.path.join(input_dir, 'train.txt'), 'r')
fin_dev = open(os.path.join(input_dir, 'valid.txt'), 'r')
fin_test = open(os.path.join(input_dir, 'test.txt'), 'r')
fout_train = open(os.path.join(output_dir, 'train.txt'), 'w')
fout_dev = open(os.path.join(output_dir, 'valid.txt'), 'w')
fout_test = open(os.path.join(output_dir, 'test.txt'), 'w')
fin = [fin_train, fin_dev, fin_test]
fout = [fout_train, fout_dev, fout_test]
for fin_, fout_ in zip(fin, fout):
out_lines = []
last_label = ''
for line in fin_:
line = line.strip()
if line == '':
if tag == 'BIOES':
if len(out_lines) > 0 and last_label == 'B':
last_out_line = out_lines[-1]
position = last_out_line.rfind('B-')
last_out_line = last_out_line[:position] + 'S' + last_out_line[position + 1:]
out_lines[-1] = last_out_line
elif len(out_lines) > 0 and last_label == 'I':
last_out_line = out_lines[-1]
position = last_out_line.rfind('I-')
last_out_line = last_out_line[:position] + 'E' + last_out_line[position + 1:]
out_lines[-1] = last_out_line
out_lines.append('\n')
last_label = ''
continue
if line.find('-DOCSTART-') != -1:
continue
columns = line.split()
out_line = ''
for idx, column in enumerate(columns):
if idx == 0:
out_line += column+' '
elif idx == 1:
#out_line += '[POS]'+column+" "
pass
elif idx == 2:
pass
else:
if tag == 'BIOES':
if column[0]=='B':
out_line += column+'\n'
elif column[0]=='I':
out_line += column+'\n'
elif column[0] == 'O':
if len(out_lines) > 0 and last_label=='B':
last_out_line = out_lines[-1]
position = last_out_line.rfind('B-')
last_out_line = last_out_line[:position]+'S'+last_out_line[position+1:]
out_lines[-1] = last_out_line
elif len(out_lines) > 0 and last_label=='I':
last_out_line = out_lines[-1]
position = last_out_line.rfind('I-')
last_out_line = last_out_line[:position] + 'E' + last_out_line[position + 1:]
out_lines[-1] = last_out_line
out_line += column+'\n'
else:
out_line += column+'\n'
out_lines.append(out_line)
last_label = column[0]
for out_line in out_lines:
fout_.write(out_line)
fin_train.close()
fin_dev.close()
fin_test.close()
fout_train.close()
fout_dev.close()
fout_test.close()
if __name__ == '__main__':
transfer_neuroner_into_ncrfpp('/Users/feili/project/NeuroNER/data/conll2003/en',
'/Users/feili/project/NCRFpp_0914/NCRFpp/conll03', tag='BIOES')
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.